Merge branch 'develop' into feature/TD-1925_new

This commit is contained in:
Hongze Cheng 2021-01-06 05:16:29 +00:00
commit e82a6c84d4
101 changed files with 4552 additions and 2874 deletions

View File

@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.0.12.0")
SET(TD_VER_NUMBER "2.0.13.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -4,6 +4,8 @@
TDengine采用关系型数据模型需要建库、建表。因此对于一个具体的应用场景需要考虑库的设计超级表和普通表的设计。本节不讨论细致的语法规则只介绍概念。
关于数据建模请参考<a href="https://www.taosdata.com/blog/2020/11/11/1945.html">视频教程</a>
## 创建库
不同类型的数据采集点往往具有不同的数据特征包括数据采集频率的高低数据保留时间的长短副本的数目数据块的大小是否允许更新数据等等。为让各种场景下TDengine都能最大效率的工作TDengine建议将不同数据特征的表创建在不同的库里因为每个库可以配置不同的存储策略。创建一个库时除SQL标准的选项外应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如
@ -60,4 +62,3 @@ TDengine支持多列模型只要物理量是一个数据采集点同时采集
TDengine建议尽可能采用多列模型因为插入效率以及存储效率更高。但对于有些场景一个采集点的采集量的种类经常变化这个时候如果采用多列模型就需要频繁修改超级表的结构定义让应用变的复杂这个时候采用单列模型会显得简单。
关于数据建模请参考<a href="https://www.taosdata.com/blog/2020/11/11/1945.html">视频教程</a>

View File

@ -6,6 +6,8 @@
TDengine的集群管理极其简单除添加和删除节点需要人工干预之外其他全部是自动完成最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。
关于集群搭建请参考<a href="https://www.taosdata.com/blog/2020/11/11/1961.html">视频教程</a>
## 准备工作
**第零步**规划集群所有物理节点的FQDN将规划好的FQDN分别添加到每个物理节点的/etc/hostname修改每个物理节点的/etc/hosts将所有集群物理节点的IP与FQDN的对应添加好。【如部署了DNS请联系网络管理员在DNS上做好相关配置】
@ -227,4 +229,3 @@ SHOW MNODES;
TDengine提供一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/)在TDengine Arbitrator Linux一节中选择适合的版本下载并安装。该程序对系统资源几乎没有要求只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号缺省是6042。配置每个taosd实例时可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了当副本数为偶数数系统将自动连接配置的arbitrator。如果副本数为奇数即使配置了arbitrator, 系统也不会去建立连接。
关于集群搭建请参考<a href="https://www.taosdata.com/blog/2020/11/11/1961.html">视频教程</a>

View File

@ -1,6 +1,6 @@
name: tdengine
base: core18
version: '2.0.12.0'
version: '2.0.13.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.0.12.0
- usr/lib/libtaos.so.2.0.13.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so

View File

@ -75,11 +75,11 @@ static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SC
static int32_t convertFunctionId(int32_t optr, int16_t* functionId);
static uint8_t convertOptr(SStrToken *pToken);
static int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery);
static int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery, bool intervalQuery);
static bool validateIpAddress(const char* ip, size_t size);
static bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery);
static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool intervalQuery);
static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd);
@ -1475,7 +1475,7 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo) {
pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY;
}
int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery) {
int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery, bool intervalQuery) {
assert(pSelection != NULL && pCmd != NULL);
const char* msg2 = "functions can not be mixed up";
@ -1531,7 +1531,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
addPrimaryTsColIntoResult(pQueryInfo);
}
if (!functionCompatibleCheck(pQueryInfo, joinQuery)) {
if (!functionCompatibleCheck(pQueryInfo, joinQuery, intervalQuery)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@ -2810,7 +2810,7 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo)
return false;
}
static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) {
static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool intervalQuery) {
int32_t startIdx = 0;
size_t numOfExpr = tscSqlExprNumOfExprs(pQueryInfo);
@ -2826,6 +2826,10 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) {
int32_t factor = functionCompatList[tscSqlExprGet(pQueryInfo, startIdx)->functionId];
if (tscSqlExprGet(pQueryInfo, 0)->functionId == TSDB_FUNC_LAST_ROW && (joinQuery || intervalQuery)) {
return false;
}
// diff function cannot be executed with other function
// arithmetic function can be executed with other arithmetic functions
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
@ -2850,7 +2854,7 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) {
}
}
if (functionId == TSDB_FUNC_LAST_ROW && joinQuery) {
if (functionId == TSDB_FUNC_LAST_ROW && (joinQuery || intervalQuery)) {
return false;
}
}
@ -6320,7 +6324,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
}
bool isSTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
if (parseSelectClause(&pSql->cmd, 0, pQuerySql->pSelection, isSTable, false) != TSDB_CODE_SUCCESS) {
if (parseSelectClause(&pSql->cmd, 0, pQuerySql->pSelection, isSTable, false, false) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -6565,7 +6569,9 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
int32_t joinQuery = (pQuerySql->from != NULL && taosArrayGetSize(pQuerySql->from) > 2);
if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable, joinQuery) != TSDB_CODE_SUCCESS) {
int32_t intervalQuery = !(pQuerySql->interval.type == 0 || pQuerySql->interval.n == 0);
if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable, joinQuery, intervalQuery) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}

View File

@ -458,12 +458,13 @@ void tscFreeRegisteredSqlObj(void *pSql) {
assert(RID_VALID(p->self));
int32_t num = atomic_sub_fetch_32(&pTscObj->numOfObj, 1);
int32_t total = atomic_sub_fetch_32(&tscNumOfObj, 1);
tscDebug("%p free SqlObj, total in tscObj:%d, total:%d", pSql, num, total);
tscFreeSqlObj(p);
taosReleaseRef(tscRefId, pTscObj->rid);
int32_t num = atomic_sub_fetch_32(&pTscObj->numOfObj, 1);
int32_t total = atomic_sub_fetch_32(&tscNumOfObj, 1);
tscDebug("%p free SqlObj, total in tscObj:%d, total:%d", pSql, num, total);
}
void tscFreeSqlObj(SSqlObj* pSql) {

View File

@ -56,6 +56,12 @@
<scope>test</scope>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.47</version>
</dependency>
<!-- for restful -->
<dependency>
<groupId>org.apache.httpcomponents</groupId>
@ -73,7 +79,14 @@
<version>1.2.58</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.49</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>

View File

@ -0,0 +1,808 @@
/***************************************************************************
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************/
package com.taosdata.jdbc;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
public abstract class AbstractDatabaseMetaData implements DatabaseMetaData {
private final static String PRODUCT_NAME = "TDengine";
private final static String PRODUCT_VESION = "2.0.x.x";
private final static String DRIVER_NAME = "taos-jdbcdriver";
private final static String DRIVER_VERSION = "2.0.x";
private final static int DRIVER_MAJAR_VERSION = 2;
private final static int DRIVER_MINOR_VERSION = 0;
public boolean allProceduresAreCallable() throws SQLException {
return false;
}
public boolean allTablesAreSelectable() throws SQLException {
return false;
}
public abstract String getURL() throws SQLException;
public abstract String getUserName() throws SQLException;
public boolean isReadOnly() throws SQLException {
return false;
}
public boolean nullsAreSortedHigh() throws SQLException {
return false;
}
public boolean nullsAreSortedLow() throws SQLException {
return !nullsAreSortedHigh();
}
public boolean nullsAreSortedAtStart() throws SQLException {
return true;
}
public boolean nullsAreSortedAtEnd() throws SQLException {
return !nullsAreSortedAtStart();
}
public String getDatabaseProductName() throws SQLException {
return PRODUCT_NAME;
}
public String getDatabaseProductVersion() throws SQLException {
return PRODUCT_VESION;
}
public String getDriverName() throws SQLException {
return DRIVER_NAME;
}
public String getDriverVersion() throws SQLException {
return DRIVER_VERSION;
}
public int getDriverMajorVersion() {
return DRIVER_MAJAR_VERSION;
}
public int getDriverMinorVersion() {
return DRIVER_MINOR_VERSION;
}
public boolean usesLocalFiles() throws SQLException {
return false;
}
public boolean usesLocalFilePerTable() throws SQLException {
return false;
}
public boolean supportsMixedCaseIdentifiers() throws SQLException {
return false;
}
public boolean storesUpperCaseIdentifiers() throws SQLException {
return false;
}
public boolean storesLowerCaseIdentifiers() throws SQLException {
return false;
}
public boolean storesMixedCaseIdentifiers() throws SQLException {
return false;
}
public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException {
return false;
}
public boolean storesUpperCaseQuotedIdentifiers() throws SQLException {
return false;
}
public boolean storesLowerCaseQuotedIdentifiers() throws SQLException {
return false;
}
public boolean storesMixedCaseQuotedIdentifiers() throws SQLException {
return false;
}
public String getIdentifierQuoteString() throws SQLException {
return " ";
}
public String getSQLKeywords() throws SQLException {
return null;
}
public String getNumericFunctions() throws SQLException {
return null;
}
public String getStringFunctions() throws SQLException {
return null;
}
public String getSystemFunctions() throws SQLException {
return null;
}
public String getTimeDateFunctions() throws SQLException {
return null;
}
public String getSearchStringEscape() throws SQLException {
return null;
}
public String getExtraNameCharacters() throws SQLException {
return null;
}
public boolean supportsAlterTableWithAddColumn() throws SQLException {
return true;
}
public boolean supportsAlterTableWithDropColumn() throws SQLException {
return true;
}
public boolean supportsColumnAliasing() throws SQLException {
return true;
}
public boolean nullPlusNonNullIsNull() throws SQLException {
return false;
}
public boolean supportsConvert() throws SQLException {
return false;
}
public boolean supportsConvert(int fromType, int toType) throws SQLException {
return false;
}
public boolean supportsTableCorrelationNames() throws SQLException {
return false;
}
public boolean supportsDifferentTableCorrelationNames() throws SQLException {
return false;
}
public boolean supportsExpressionsInOrderBy() throws SQLException {
return false;
}
public boolean supportsOrderByUnrelated() throws SQLException {
return false;
}
public boolean supportsGroupBy() throws SQLException {
return false;
}
public boolean supportsGroupByUnrelated() throws SQLException {
return false;
}
public boolean supportsGroupByBeyondSelect() throws SQLException {
return false;
}
public boolean supportsLikeEscapeClause() throws SQLException {
return false;
}
public boolean supportsMultipleResultSets() throws SQLException {
return false;
}
public boolean supportsMultipleTransactions() throws SQLException {
return false;
}
public boolean supportsNonNullableColumns() throws SQLException {
return false;
}
public boolean supportsMinimumSQLGrammar() throws SQLException {
return false;
}
public boolean supportsCoreSQLGrammar() throws SQLException {
return false;
}
public boolean supportsExtendedSQLGrammar() throws SQLException {
return false;
}
public boolean supportsANSI92EntryLevelSQL() throws SQLException {
return false;
}
public boolean supportsANSI92IntermediateSQL() throws SQLException {
return false;
}
public boolean supportsANSI92FullSQL() throws SQLException {
return false;
}
public boolean supportsIntegrityEnhancementFacility() throws SQLException {
return false;
}
public boolean supportsOuterJoins() throws SQLException {
return false;
}
public boolean supportsFullOuterJoins() throws SQLException {
return false;
}
public boolean supportsLimitedOuterJoins() throws SQLException {
return false;
}
public String getSchemaTerm() throws SQLException {
return null;
}
public String getProcedureTerm() throws SQLException {
return null;
}
public String getCatalogTerm() throws SQLException {
return "database";
}
public boolean isCatalogAtStart() throws SQLException {
return true;
}
public String getCatalogSeparator() throws SQLException {
return ".";
}
public boolean supportsSchemasInDataManipulation() throws SQLException {
return false;
}
public boolean supportsSchemasInProcedureCalls() throws SQLException {
return false;
}
public boolean supportsSchemasInTableDefinitions() throws SQLException {
return false;
}
public boolean supportsSchemasInIndexDefinitions() throws SQLException {
return false;
}
public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException {
return false;
}
public boolean supportsCatalogsInDataManipulation() throws SQLException {
return true;
}
public boolean supportsCatalogsInProcedureCalls() throws SQLException {
return false;
}
public boolean supportsCatalogsInTableDefinitions() throws SQLException {
return false;
}
public boolean supportsCatalogsInIndexDefinitions() throws SQLException {
return false;
}
public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException {
return false;
}
public boolean supportsPositionedDelete() throws SQLException {
return false;
}
public boolean supportsPositionedUpdate() throws SQLException {
return false;
}
public boolean supportsSelectForUpdate() throws SQLException {
return false;
}
public boolean supportsStoredProcedures() throws SQLException {
return false;
}
public boolean supportsSubqueriesInComparisons() throws SQLException {
return false;
}
public boolean supportsSubqueriesInExists() throws SQLException {
return false;
}
public boolean supportsSubqueriesInIns() throws SQLException {
return false;
}
public boolean supportsSubqueriesInQuantifieds() throws SQLException {
return false;
}
public boolean supportsCorrelatedSubqueries() throws SQLException {
return false;
}
public boolean supportsUnion() throws SQLException {
return false;
}
public boolean supportsUnionAll() throws SQLException {
return false;
}
public boolean supportsOpenCursorsAcrossCommit() throws SQLException {
return false;
}
public boolean supportsOpenCursorsAcrossRollback() throws SQLException {
return false;
}
public boolean supportsOpenStatementsAcrossCommit() throws SQLException {
return false;
}
public boolean supportsOpenStatementsAcrossRollback() throws SQLException {
return false;
}
public int getMaxBinaryLiteralLength() throws SQLException {
return 0;
}
public int getMaxCharLiteralLength() throws SQLException {
return 0;
}
public int getMaxColumnNameLength() throws SQLException {
return 0;
}
public int getMaxColumnsInGroupBy() throws SQLException {
return 0;
}
public int getMaxColumnsInIndex() throws SQLException {
return 0;
}
public int getMaxColumnsInOrderBy() throws SQLException {
return 0;
}
public int getMaxColumnsInSelect() throws SQLException {
return 0;
}
public int getMaxColumnsInTable() throws SQLException {
return 0;
}
public int getMaxConnections() throws SQLException {
return 0;
}
public int getMaxCursorNameLength() throws SQLException {
return 0;
}
public int getMaxIndexLength() throws SQLException {
return 0;
}
public int getMaxSchemaNameLength() throws SQLException {
return 0;
}
public int getMaxProcedureNameLength() throws SQLException {
return 0;
}
public int getMaxCatalogNameLength() throws SQLException {
return 0;
}
public int getMaxRowSize() throws SQLException {
return 0;
}
public boolean doesMaxRowSizeIncludeBlobs() throws SQLException {
return false;
}
public int getMaxStatementLength() throws SQLException {
return 0;
}
public int getMaxStatements() throws SQLException {
return 0;
}
public int getMaxTableNameLength() throws SQLException {
return 0;
}
public int getMaxTablesInSelect() throws SQLException {
return 0;
}
public int getMaxUserNameLength() throws SQLException {
return 0;
}
public int getDefaultTransactionIsolation() throws SQLException {
return 0;
}
public boolean supportsTransactions() throws SQLException {
return false;
}
public boolean supportsTransactionIsolationLevel(int level) throws SQLException {
return false;
}
public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException {
return false;
}
public boolean supportsDataManipulationTransactionsOnly() throws SQLException {
return false;
}
public boolean dataDefinitionCausesTransactionCommit() throws SQLException {
return false;
}
public boolean dataDefinitionIgnoredInTransactions() throws SQLException {
return false;
}
public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern)
throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern,
String columnNamePattern) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public abstract ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types)
throws SQLException;
public ResultSet getSchemas() throws SQLException {
return getEmptyResultSet();
}
public abstract ResultSet getCatalogs() throws SQLException;
public ResultSet getTableTypes() throws SQLException {
DatabaseMetaDataResultSet resultSet = new DatabaseMetaDataResultSet();
// set up ColumnMetaDataList
List<ColumnMetaData> columnMetaDataList = new ArrayList<ColumnMetaData>(1);
ColumnMetaData colMetaData = new ColumnMetaData();
colMetaData.setColIndex(0);
colMetaData.setColName("TABLE_TYPE");
colMetaData.setColSize(10);
colMetaData.setColType(TSDBConstants.TSDB_DATA_TYPE_BINARY);
columnMetaDataList.add(colMetaData);
// set up rowDataList
List<TSDBResultSetRowData> rowDataList = new ArrayList<TSDBResultSetRowData>(2);
TSDBResultSetRowData rowData = new TSDBResultSetRowData();
rowData.setString(0, "TABLE");
rowDataList.add(rowData);
rowData = new TSDBResultSetRowData();
rowData.setString(0, "STABLE");
rowDataList.add(rowData);
resultSet.setColumnMetaDataList(columnMetaDataList);
resultSet.setRowDataList(rowDataList);
return resultSet;
}
public abstract ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException;
protected int getNullable(int index, String typeName) {
if (index == 0 && "TIMESTAMP".equals(typeName))
return DatabaseMetaData.columnNoNulls;
return DatabaseMetaData.columnNullable;
}
protected int getColumnSize(String typeName, int length) {
switch (typeName) {
case "TIMESTAMP":
return 23;
default:
return 0;
}
}
protected int getDecimalDigits(String typeName) {
switch (typeName) {
case "FLOAT":
return 5;
case "DOUBLE":
return 9;
default:
return 0;
}
}
protected int getDataType(String typeName) {
switch (typeName) {
case "TIMESTAMP":
return Types.TIMESTAMP;
case "INT":
return Types.INTEGER;
case "BIGINT":
return Types.BIGINT;
case "FLOAT":
return Types.FLOAT;
case "DOUBLE":
return Types.DOUBLE;
case "BINARY":
return Types.BINARY;
case "SMALLINT":
return Types.SMALLINT;
case "TINYINT":
return Types.TINYINT;
case "BOOL":
return Types.BOOLEAN;
case "NCHAR":
return Types.NCHAR;
default:
return Types.NULL;
}
}
public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern)
throws SQLException {
return getEmptyResultSet();
}
public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern)
throws SQLException {
return getEmptyResultSet();
}
public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable)
throws SQLException {
return getEmptyResultSet();
}
public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException {
return getEmptyResultSet();
}
public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException {
return getEmptyResultSet();
}
public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException {
return getEmptyResultSet();
}
public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException {
return getEmptyResultSet();
}
public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable,
String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException {
return getEmptyResultSet();
}
public ResultSet getTypeInfo() throws SQLException {
return getEmptyResultSet();
}
public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate)
throws SQLException {
return getEmptyResultSet();
}
public boolean supportsResultSetType(int type) throws SQLException {
return false;
}
public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException {
return false;
}
public boolean ownUpdatesAreVisible(int type) throws SQLException {
return false;
}
public boolean ownDeletesAreVisible(int type) throws SQLException {
return false;
}
public boolean ownInsertsAreVisible(int type) throws SQLException {
return false;
}
public boolean othersUpdatesAreVisible(int type) throws SQLException {
return false;
}
public boolean othersDeletesAreVisible(int type) throws SQLException {
return false;
}
public boolean othersInsertsAreVisible(int type) throws SQLException {
return false;
}
public boolean updatesAreDetected(int type) throws SQLException {
return false;
}
public boolean deletesAreDetected(int type) throws SQLException {
return false;
}
public boolean insertsAreDetected(int type) throws SQLException {
return false;
}
public boolean supportsBatchUpdates() throws SQLException {
return false;
}
public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types)
throws SQLException {
return getEmptyResultSet();
}
public Connection getConnection() throws SQLException {
return null;
}
public boolean supportsSavepoints() throws SQLException {
return false;
}
public boolean supportsNamedParameters() throws SQLException {
return false;
}
public boolean supportsMultipleOpenResults() throws SQLException {
return false;
}
public boolean supportsGetGeneratedKeys() throws SQLException {
return false;
}
public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException {
return getEmptyResultSet();
}
public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
return getEmptyResultSet();
}
public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern,
String attributeNamePattern) throws SQLException {
return getEmptyResultSet();
}
public boolean supportsResultSetHoldability(int holdability) throws SQLException {
return false;
}
public int getResultSetHoldability() throws SQLException {
return 0;
}
public int getDatabaseMajorVersion() throws SQLException {
return 0;
}
public int getDatabaseMinorVersion() throws SQLException {
return 0;
}
public int getJDBCMajorVersion() throws SQLException {
return 0;
}
public int getJDBCMinorVersion() throws SQLException {
return 0;
}
public int getSQLStateType() throws SQLException {
return 0;
}
public boolean locatorsUpdateCopy() throws SQLException {
return false;
}
public boolean supportsStatementPooling() throws SQLException {
return false;
}
public RowIdLifetime getRowIdLifetime() throws SQLException {
return null;
}
public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException {
return null;
}
public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException {
return false;
}
public boolean autoCommitFailureClosesAllResultSets() throws SQLException {
return false;
}
public ResultSet getClientInfoProperties() throws SQLException {
return getEmptyResultSet();
}
public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern)
throws SQLException {
return getEmptyResultSet();
}
public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern,
String columnNamePattern) throws SQLException {
return getEmptyResultSet();
}
public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern,
String columnNamePattern) throws SQLException {
return getEmptyResultSet();
}
public boolean generatedKeyAlwaysReturned() throws SQLException {
return false;
}
private ResultSet getEmptyResultSet() {
return new EmptyResultSet();
}
}

View File

@ -19,68 +19,71 @@ import java.util.Map;
public abstract class TSDBConstants {
public static final String DEFAULT_PORT = "6200";
public static final String UNSUPPORT_METHOD_EXCEPTIONZ_MSG = "this operation is NOT supported currently!";
public static final String INVALID_VARIABLES = "invalid variables";
public static Map<Integer, String> DATATYPE_MAP = null;
public static final String DEFAULT_PORT = "6200";
public static final String UNSUPPORT_METHOD_EXCEPTIONZ_MSG = "this operation is NOT supported currently!";
public static final String INVALID_VARIABLES = "invalid variables";
public static Map<Integer, String> DATATYPE_MAP = null;
public static final long JNI_NULL_POINTER = 0L;
public static final long JNI_NULL_POINTER = 0L;
public static final int JNI_SUCCESS = 0;
public static final int JNI_TDENGINE_ERROR = -1;
public static final int JNI_CONNECTION_NULL = -2;
public static final int JNI_RESULT_SET_NULL = -3;
public static final int JNI_NUM_OF_FIELDS_0 = -4;
public static final int JNI_SQL_NULL = -5;
public static final int JNI_FETCH_END = -6;
public static final int TSDB_DATA_TYPE_NULL = 0;
public static final int TSDB_DATA_TYPE_BOOL = 1;
public static final int TSDB_DATA_TYPE_TINYINT = 2;
public static final int TSDB_DATA_TYPE_SMALLINT = 3;
public static final int TSDB_DATA_TYPE_INT = 4;
public static final int TSDB_DATA_TYPE_BIGINT = 5;
public static final int TSDB_DATA_TYPE_FLOAT = 6;
public static final int TSDB_DATA_TYPE_DOUBLE = 7;
public static final int TSDB_DATA_TYPE_BINARY = 8;
public static final int TSDB_DATA_TYPE_TIMESTAMP = 9;
public static final int TSDB_DATA_TYPE_NCHAR = 10;
public static String WrapErrMsg(String msg) {
return "TDengine Error: " + msg;
}
public static final int JNI_SUCCESS = 0;
public static final int JNI_TDENGINE_ERROR = -1;
public static final int JNI_CONNECTION_NULL = -2;
public static final int JNI_RESULT_SET_NULL = -3;
public static final int JNI_NUM_OF_FIELDS_0 = -4;
public static final int JNI_SQL_NULL = -5;
public static final int JNI_FETCH_END = -6;
public static String FixErrMsg(int code) {
switch (code) {
case JNI_TDENGINE_ERROR:
return WrapErrMsg("internal error of database!");
case JNI_CONNECTION_NULL:
return WrapErrMsg("invalid tdengine connection!");
case JNI_RESULT_SET_NULL:
return WrapErrMsg("invalid resultset pointer!");
case JNI_NUM_OF_FIELDS_0:
return WrapErrMsg("invalid num of fields!");
case JNI_SQL_NULL:
return WrapErrMsg("can't execute empty sql!");
case JNI_FETCH_END:
return WrapErrMsg("fetch to the end of resultset");
default:
break;
}
return WrapErrMsg("unkown error!");
}
public static final int TSDB_DATA_TYPE_NULL = 0;
public static final int TSDB_DATA_TYPE_BOOL = 1;
public static final int TSDB_DATA_TYPE_TINYINT = 2;
public static final int TSDB_DATA_TYPE_SMALLINT = 3;
public static final int TSDB_DATA_TYPE_INT = 4;
public static final int TSDB_DATA_TYPE_BIGINT = 5;
public static final int TSDB_DATA_TYPE_FLOAT = 6;
public static final int TSDB_DATA_TYPE_DOUBLE = 7;
public static final int TSDB_DATA_TYPE_BINARY = 8;
public static final int TSDB_DATA_TYPE_TIMESTAMP = 9;
public static final int TSDB_DATA_TYPE_NCHAR = 10;
static {
DATATYPE_MAP = new HashMap<Integer, String>();
DATATYPE_MAP.put(1, "BOOL");
DATATYPE_MAP.put(2, "TINYINT");
DATATYPE_MAP.put(3, "SMALLINT");
DATATYPE_MAP.put(4, "INT");
DATATYPE_MAP.put(5, "BIGINT");
DATATYPE_MAP.put(6, "FLOAT");
DATATYPE_MAP.put(7, "DOUBLE");
DATATYPE_MAP.put(8, "BINARY");
DATATYPE_MAP.put(9, "TIMESTAMP");
DATATYPE_MAP.put(10, "NCHAR");
}
// nchar field's max length
public static final int maxFieldSize = 16 * 1024;
public static String WrapErrMsg(String msg) {
return "TDengine Error: " + msg;
}
public static String FixErrMsg(int code) {
switch (code) {
case JNI_TDENGINE_ERROR:
return WrapErrMsg("internal error of database!");
case JNI_CONNECTION_NULL:
return WrapErrMsg("invalid tdengine connection!");
case JNI_RESULT_SET_NULL:
return WrapErrMsg("invalid resultset pointer!");
case JNI_NUM_OF_FIELDS_0:
return WrapErrMsg("invalid num of fields!");
case JNI_SQL_NULL:
return WrapErrMsg("can't execute empty sql!");
case JNI_FETCH_END:
return WrapErrMsg("fetch to the end of resultset");
default:
break;
}
return WrapErrMsg("unkown error!");
}
static {
DATATYPE_MAP = new HashMap<Integer, String>();
DATATYPE_MAP.put(1, "BOOL");
DATATYPE_MAP.put(2, "TINYINT");
DATATYPE_MAP.put(3, "SMALLINT");
DATATYPE_MAP.put(4, "INT");
DATATYPE_MAP.put(5, "BIGINT");
DATATYPE_MAP.put(6, "FLOAT");
DATATYPE_MAP.put(7, "DOUBLE");
DATATYPE_MAP.put(8, "BINARY");
DATATYPE_MAP.put(9, "TIMESTAMP");
DATATYPE_MAP.put(10, "NCHAR");
}
}

View File

@ -1,19 +1,29 @@
package com.taosdata.jdbc.rs;
import com.taosdata.jdbc.TSDBConstants;
import com.taosdata.jdbc.TSDBDriver;
import java.sql.*;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Executor;
public class RestfulConnection implements Connection {
private static final String CONNECTION_IS_CLOSED = "connection is closed.";
private static final String AUTO_COMMIT_IS_TRUE = "auto commit is true";
private final String host;
private final int port;
private final Properties props;
private final String database;
private volatile String database;
private final String url;
/******************************************************/
private boolean isClosed;
private DatabaseMetaData metadata;
private Map<String, Class<?>> typeMap;
private Properties clientInfoProps = new Properties();
public RestfulConnection(String host, String port, Properties props, String database, String url) {
this.host = host;
@ -21,280 +31,424 @@ public class RestfulConnection implements Connection {
this.props = props;
this.database = database;
this.url = url;
this.metadata = new RestfulDatabaseMetaData(url, props.getProperty(TSDBDriver.PROPERTY_KEY_USER), this);
}
@Override
public Statement createStatement() throws SQLException {
if (isClosed())
throw new SQLException(TSDBConstants.WrapErrMsg("restful TDengine connection is closed."));
throw new SQLException(CONNECTION_IS_CLOSED);
return new RestfulStatement(this, database);
}
@Override
public PreparedStatement prepareStatement(String sql) throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
//TODO: prepareStatement
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public CallableStatement prepareCall(String sql) throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public String nativeSQL(String sql) throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
//nothing did
return sql;
}
@Override
public void setAutoCommit(boolean autoCommit) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (!autoCommit)
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public boolean getAutoCommit() throws SQLException {
return false;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
return true;
}
@Override
public void commit() throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (getAutoCommit())
throw new SQLException(AUTO_COMMIT_IS_TRUE);
//nothing to do
}
@Override
public void rollback() throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (getAutoCommit())
throw new SQLException(AUTO_COMMIT_IS_TRUE);
//nothing to do
}
@Override
public void close() throws SQLException {
if (isClosed)
return;
//TODO: release all resources
isClosed = true;
}
@Override
public boolean isClosed() throws SQLException {
return false;
return isClosed;
}
@Override
public DatabaseMetaData getMetaData() throws SQLException {
//TODO: RestfulDatabaseMetaData is not implemented
return new RestfulDatabaseMetaData();
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
return this.metadata;
}
@Override
public void setReadOnly(boolean readOnly) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
// nothing to do
}
@Override
public boolean isReadOnly() throws SQLException {
return false;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
return true;
}
@Override
public void setCatalog(String catalog) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
synchronized (RestfulConnection.class) {
this.database = catalog;
}
}
@Override
public String getCatalog() throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
return this.database;
}
@Override
public void setTransactionIsolation(int level) throws SQLException {
//transaction is not supported
throw new SQLFeatureNotSupportedException("transactions are not supported");
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
switch (level) {
case Connection.TRANSACTION_NONE:
break;
case Connection.TRANSACTION_READ_UNCOMMITTED:
case Connection.TRANSACTION_READ_COMMITTED:
case Connection.TRANSACTION_REPEATABLE_READ:
case Connection.TRANSACTION_SERIALIZABLE:
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
default:
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
}
}
/**
*
*/
@Override
public int getTransactionIsolation() throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
//Connection.TRANSACTION_NONE specifies that transactions are not supported.
return Connection.TRANSACTION_NONE;
}
@Override
public SQLWarning getWarnings() throws SQLException {
//TODO: getWarnings not implemented
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
return null;
}
@Override
public void clearWarnings() throws SQLException {
throw new SQLFeatureNotSupportedException("clearWarnings not supported.");
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
//nothing to do
}
@Override
public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (resultSetType != ResultSet.TYPE_FORWARD_ONLY) {
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
if (resultSetConcurrency != ResultSet.CONCUR_READ_ONLY)
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
return createStatement();
}
@Override
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (resultSetType != ResultSet.TYPE_FORWARD_ONLY || resultSetConcurrency != ResultSet.CONCUR_READ_ONLY)
throw new SQLFeatureNotSupportedException(TSDBConstants.INVALID_VARIABLES);
return this.prepareStatement(sql);
}
@Override
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (resultSetType != ResultSet.TYPE_FORWARD_ONLY || resultSetConcurrency != ResultSet.CONCUR_READ_ONLY)
throw new SQLFeatureNotSupportedException(TSDBConstants.INVALID_VARIABLES);
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public Map<String, Class<?>> getTypeMap() throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
synchronized (RestfulConnection.class) {
if (this.typeMap == null) {
this.typeMap = new HashMap<>();
}
return this.typeMap;
}
}
@Override
public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
synchronized (RestfulConnection.class) {
this.typeMap = map;
}
}
@Override
public void setHoldability(int holdability) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (holdability != ResultSet.HOLD_CURSORS_OVER_COMMIT)
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public int getHoldability() throws SQLException {
return 0;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
return ResultSet.HOLD_CURSORS_OVER_COMMIT;
}
@Override
public Savepoint setSavepoint() throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (getAutoCommit())
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
//nothing to do
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public Savepoint setSavepoint(String name) throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (getAutoCommit())
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
//nothing to do
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public void rollback(Savepoint savepoint) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
if (getAutoCommit())
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
//nothing to do
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public void releaseSavepoint(Savepoint savepoint) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
return null;
if (resultSetHoldability != ResultSet.HOLD_CURSORS_OVER_COMMIT)
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
return createStatement(resultSetType, resultSetConcurrency);
}
@Override
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
return null;
if (resultSetHoldability != ResultSet.HOLD_CURSORS_OVER_COMMIT)
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
return prepareStatement(sql, resultSetType, resultSetConcurrency);
}
@Override
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
return null;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
return null;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
return null;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
return null;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public Clob createClob() throws SQLException {
//TODO: not supported
throw new SQLFeatureNotSupportedException();
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public Blob createBlob() throws SQLException {
//TODO: not supported
throw new SQLFeatureNotSupportedException();
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public NClob createNClob() throws SQLException {
//TODO: not supported
throw new SQLFeatureNotSupportedException();
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public SQLXML createSQLXML() throws SQLException {
//TODO: not supported
throw new SQLFeatureNotSupportedException();
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public boolean isValid(int timeout) throws SQLException {
return false;
if (timeout < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
// TODO:
/* The driver shall submit a query on the connection or use some other mechanism that positively verifies
the connection is still valid when this method is called.*/
return !isClosed();
}
@Override
public void setClientInfo(String name, String value) throws SQLClientInfoException {
if (isClosed)
throw new SQLClientInfoException();
clientInfoProps.setProperty(name, value);
}
@Override
public void setClientInfo(Properties properties) throws SQLClientInfoException {
if (isClosed)
throw new SQLClientInfoException();
for (Enumeration<Object> enumer = properties.keys(); enumer.hasMoreElements(); ) {
String name = (String) enumer.nextElement();
clientInfoProps.put(name, properties.getProperty(name));
}
}
@Override
public String getClientInfo(String name) throws SQLException {
return null;
if (isClosed)
throw new SQLClientInfoException();
return clientInfoProps.getProperty(name);
}
@Override
public Properties getClientInfo() throws SQLException {
return null;
if (isClosed)
throw new SQLClientInfoException();
return clientInfoProps;
}
@Override
public Array createArrayOf(String typeName, Object[] elements) throws SQLException {
//TODO: not supported
throw new SQLFeatureNotSupportedException();
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
//TODO: not supported
throw new SQLFeatureNotSupportedException();
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public void setSchema(String schema) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
synchronized (RestfulConnection.class) {
this.database = schema;
}
}
@Override
public String getSchema() throws SQLException {
return null;
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
return this.database;
}
@Override
public void abort(Executor executor) throws SQLException {
if (executor == null) {
throw new SQLException("Executor can not be null");
}
executor.execute(() -> {
try {
close();
} catch (SQLException e) {
e.printStackTrace();
}
});
}
@Override
public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public int getNetworkTimeout() throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
return 0;
}

View File

@ -33,7 +33,7 @@ public class RestfulDriver extends AbstractTaosDriver {
return null;
Properties props = parseURL(url, info);
String host = props.getProperty(TSDBDriver.PROPERTY_KEY_HOST, "localhost");
String host = props.getProperty(TSDBDriver.PROPERTY_KEY_HOST);
String port = props.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "6041");
String database = props.containsKey(TSDBDriver.PROPERTY_KEY_DBNAME) ? props.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME) : null;

View File

@ -2,13 +2,15 @@ package com.taosdata.jdbc.rs;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.util.List;
import java.util.ArrayList;
public class RestfulResultSetMetaData implements ResultSetMetaData {
private List<String> fields;
private final String database;
private ArrayList<RestfulResultSet.Field> fields;
public RestfulResultSetMetaData(List<String> fields) {
public RestfulResultSetMetaData(String database, ArrayList<RestfulResultSet.Field> fields) {
this.database = database;
this.fields = fields;
}
@ -24,6 +26,7 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public boolean isCaseSensitive(int column) throws SQLException {
//TODO
return false;
}
@ -39,7 +42,7 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public int isNullable(int column) throws SQLException {
return 0;
return ResultSetMetaData.columnNullable;
}
@Override
@ -54,7 +57,7 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public String getColumnLabel(int column) throws SQLException {
return fields.get(column - 1);
return fields.get(column - 1).name;
}
@Override
@ -64,7 +67,7 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public String getSchemaName(int column) throws SQLException {
return null;
return this.database;
}
@Override
@ -84,7 +87,7 @@ public class RestfulResultSetMetaData implements ResultSetMetaData {
@Override
public String getCatalogName(int column) throws SQLException {
return null;
return this.database;
}
@Override

View File

@ -7,20 +7,60 @@ import com.taosdata.jdbc.rs.util.HttpClientPoolUtil;
import com.taosdata.jdbc.utils.SqlSyntaxValidator;
import java.sql.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
public class RestfulStatement implements Statement {
private static final String STATEMENT_CLOSED = "Statement already closed.";
private boolean closed;
private String database;
private final RestfulConnection conn;
public RestfulStatement(RestfulConnection c, String database) {
this.conn = c;
private volatile RestfulResultSet resultSet;
private volatile int affectedRows;
private volatile boolean closeOnCompletion;
public RestfulStatement(RestfulConnection conn, String database) {
this.conn = conn;
this.database = database;
}
private String[] parseTableIdentifier(String sql) {
sql = sql.trim().toLowerCase();
String[] ret = null;
if (sql.contains("where"))
sql = sql.substring(0, sql.indexOf("where"));
if (sql.contains("interval"))
sql = sql.substring(0, sql.indexOf("interval"));
if (sql.contains("fill"))
sql = sql.substring(0, sql.indexOf("fill"));
if (sql.contains("sliding"))
sql = sql.substring(0, sql.indexOf("sliding"));
if (sql.contains("group by"))
sql = sql.substring(0, sql.indexOf("group by"));
if (sql.contains("order by"))
sql = sql.substring(0, sql.indexOf("order by"));
if (sql.contains("slimit"))
sql = sql.substring(0, sql.indexOf("slimit"));
if (sql.contains("limit"))
sql = sql.substring(0, sql.indexOf("limit"));
// parse
if (sql.contains("from")) {
sql = sql.substring(sql.indexOf("from") + 4).trim();
return Arrays.asList(sql.split(",")).stream()
.map(tableIdentifier -> {
tableIdentifier = tableIdentifier.trim();
if (tableIdentifier.contains(" "))
tableIdentifier = tableIdentifier.substring(0, tableIdentifier.indexOf(" "));
return tableIdentifier;
}).collect(Collectors.joining(",")).split(",");
}
return ret;
}
@Override
public ResultSet executeQuery(String sql) throws SQLException {
if (isClosed())
@ -29,43 +69,33 @@ public class RestfulStatement implements Statement {
throw new SQLException("not a select sql for executeQuery: " + sql);
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
// row data
String result = HttpClientPoolUtil.execute(url, sql);
String fields = "";
List<String> words = Arrays.asList(sql.split(" "));
if (words.get(0).equalsIgnoreCase("select")) {
int index = 0;
if (words.contains("from")) {
index = words.indexOf("from");
JSONObject resultJson = JSON.parseObject(result);
if (resultJson.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + resultJson.getString("desc") + "\n" + "error code: " + resultJson.getString("code")));
}
// parse table name from sql
String[] tableIdentifiers = parseTableIdentifier(sql);
if (tableIdentifiers != null) {
List<JSONObject> fieldJsonList = new ArrayList<>();
for (String tableIdentifier : tableIdentifiers) {
// field meta
String fields = HttpClientPoolUtil.execute(url, "DESCRIBE " + tableIdentifier);
JSONObject fieldJson = JSON.parseObject(fields);
if (fieldJson.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + fieldJson.getString("desc") + "\n" + "error code: " + fieldJson.getString("code")));
}
fieldJsonList.add(fieldJson);
}
if (words.contains("FROM")) {
index = words.indexOf("FROM");
}
fields = HttpClientPoolUtil.execute(url, "DESCRIBE " + words.get(index + 1));
this.resultSet = new RestfulResultSet(database, this, resultJson, fieldJsonList);
} else {
this.resultSet = new RestfulResultSet(database, this, resultJson);
}
JSONObject jsonObject = JSON.parseObject(result);
if (jsonObject.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " +
jsonObject.getString("desc") + "\n" +
"error code: " + jsonObject.getString("code")));
}
String dataStr = jsonObject.getString("data");
if ("use".equalsIgnoreCase(fields.split(" ")[0])) {
return new RestfulResultSet(dataStr, "");
}
JSONObject jsonField = JSON.parseObject(fields);
if (jsonField == null) {
return new RestfulResultSet(dataStr, "");
}
if (jsonField.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " +
jsonField.getString("desc") + "\n" +
"error code: " + jsonField.getString("code")));
}
String fieldData = jsonField.getString("data");
return new RestfulResultSet(dataStr, fieldData);
this.affectedRows = 0;
return resultSet;
}
@Override
@ -78,77 +108,103 @@ public class RestfulStatement implements Statement {
if (this.database == null)
throw new SQLException("Database not specified or available");
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
HttpClientPoolUtil.execute(url, "use " + conn.getDatabase());
final String url = "http://" + conn.getHost().trim() + ":" + conn.getPort() + "/rest/sql";
// HttpClientPoolUtil.execute(url, "use " + conn.getDatabase());
String result = HttpClientPoolUtil.execute(url, sql);
JSONObject jsonObject = JSON.parseObject(result);
if (jsonObject.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " +
jsonObject.getString("desc") + "\n" +
"error code: " + jsonObject.getString("code")));
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + jsonObject.getString("desc") + "\n" + "error code: " + jsonObject.getString("code")));
}
return Integer.parseInt(jsonObject.getString("rows"));
this.resultSet = null;
this.affectedRows = Integer.parseInt(jsonObject.getString("rows"));
return this.affectedRows;
}
@Override
public void close() throws SQLException {
this.closed = true;
synchronized (RestfulStatement.class) {
if (!isClosed())
this.closed = true;
}
}
@Override
public int getMaxFieldSize() throws SQLException {
return 0;
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return TSDBConstants.maxFieldSize;
}
@Override
public void setMaxFieldSize(int max) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
if (max < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
// nothing to do
}
@Override
public int getMaxRows() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return 0;
}
@Override
public void setMaxRows(int max) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
if (max < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
// nothing to do
}
@Override
public void setEscapeProcessing(boolean enable) throws SQLException {
if (isClosed())
throw new SQLException(RestfulStatement.STATEMENT_CLOSED);
}
@Override
public int getQueryTimeout() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return 0;
}
@Override
public void setQueryTimeout(int seconds) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
if (seconds < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
}
@Override
public void cancel() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public SQLWarning getWarnings() throws SQLException {
//TODO: getWarnings not Implemented
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return null;
}
@Override
public void clearWarnings() throws SQLException {
// nothing to do
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
}
@Override
public void setCursorName(String name) throws SQLException {
if (isClosed())
throw new SQLException(RestfulStatement.STATEMENT_CLOSED);
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
@ -159,133 +215,181 @@ public class RestfulStatement implements Statement {
//如果执行了use操作应该将当前Statement的catalog设置为新的database
if (SqlSyntaxValidator.isUseSql(sql)) {
this.database = sql.trim().replace("use", "").trim();
this.conn.setCatalog(this.database);
}
if (this.database == null)
throw new SQLException("Database not specified or available");
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
// use database
HttpClientPoolUtil.execute(url, "use " + conn.getDatabase());
// execute sql
String result = HttpClientPoolUtil.execute(url, sql);
// parse result
JSONObject jsonObject = JSON.parseObject(result);
if (jsonObject.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " +
jsonObject.getString("desc") + "\n" +
"error code: " + jsonObject.getString("code")));
if (SqlSyntaxValidator.isSelectSql(sql)) {
executeQuery(sql);
} else if (SqlSyntaxValidator.isShowSql(sql) || SqlSyntaxValidator.isDescribeSql(sql)) {
final String url = "http://" + conn.getHost().trim() + ":" + conn.getPort() + "/rest/sql";
if (!SqlSyntaxValidator.isShowDatabaseSql(sql)) {
HttpClientPoolUtil.execute(url, "use " + conn.getDatabase());
}
String result = HttpClientPoolUtil.execute(url, sql);
JSONObject resultJson = JSON.parseObject(result);
if (resultJson.getString("status").equals("error")) {
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " + resultJson.getString("desc") + "\n" + "error code: " + resultJson.getString("code")));
}
this.resultSet = new RestfulResultSet(database, this, resultJson);
} else {
executeUpdate(sql);
}
return true;
}
@Override
public ResultSet getResultSet() throws SQLException {
return null;
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return resultSet;
}
@Override
public int getUpdateCount() throws SQLException {
return 0;
if (isClosed()) {
throw new SQLException("Invalid method call on a closed statement.");
}
return this.affectedRows;
}
@Override
public boolean getMoreResults() throws SQLException {
return false;
return getMoreResults(CLOSE_CURRENT_RESULT);
}
@Override
public void setFetchDirection(int direction) throws SQLException {
if (direction != ResultSet.FETCH_FORWARD && direction != ResultSet.FETCH_REVERSE && direction != ResultSet.FETCH_UNKNOWN)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
this.resultSet.setFetchDirection(direction);
}
@Override
public int getFetchDirection() throws SQLException {
return 0;
return this.resultSet.getFetchDirection();
}
@Override
public void setFetchSize(int rows) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
if (rows < 0)
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
//nothing to do
}
@Override
public int getFetchSize() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return 0;
}
@Override
public int getResultSetConcurrency() throws SQLException {
return 0;
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return this.resultSet.getConcurrency();
}
@Override
public int getResultSetType() throws SQLException {
return 0;
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return this.resultSet.getType();
}
@Override
public void addBatch(String sql) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
//TODO:
}
@Override
public void clearBatch() throws SQLException {
//TODO:
}
@Override
public int[] executeBatch() throws SQLException {
//TODO:
return new int[0];
}
@Override
public Connection getConnection() throws SQLException {
return null;
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return this.conn;
}
@Override
public boolean getMoreResults(int current) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
if (resultSet == null)
return false;
// switch (current) {
// case CLOSE_CURRENT_RESULT:
// resultSet.close();
// break;
// case KEEP_CURRENT_RESULT:
// break;
// case CLOSE_ALL_RESULTS:
// resultSet.close();
// break;
// default:
// throw new SQLException(TSDBConstants.INVALID_VARIABLES);
// }
// return next;
return false;
}
@Override
public ResultSet getGeneratedKeys() throws SQLException {
return null;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
return 0;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
return 0;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public int executeUpdate(String sql, String[] columnNames) throws SQLException {
return 0;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
return false;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public boolean execute(String sql, int[] columnIndexes) throws SQLException {
return false;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public boolean execute(String sql, String[] columnNames) throws SQLException {
return false;
throw new SQLFeatureNotSupportedException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
@Override
public int getResultSetHoldability() throws SQLException {
return 0;
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return this.resultSet.getHoldability();
}
@Override
@ -295,22 +399,30 @@ public class RestfulStatement implements Statement {
@Override
public void setPoolable(boolean poolable) throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
//nothing to do
}
@Override
public boolean isPoolable() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return false;
}
@Override
public void closeOnCompletion() throws SQLException {
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
this.closeOnCompletion = true;
}
@Override
public boolean isCloseOnCompletion() throws SQLException {
return false;
if (isClosed())
throw new SQLException(STATEMENT_CLOSED);
return this.closeOnCompletion;
}
@Override

View File

@ -17,6 +17,8 @@ import org.apache.http.protocol.HTTP;
import org.apache.http.protocol.HttpContext;
import org.apache.http.util.EntityUtils;
import java.nio.charset.Charset;
public class HttpClientPoolUtil {
public static PoolingHttpClientConnectionManager cm = null;
@ -94,7 +96,9 @@ public class HttpClientPoolUtil {
initPools();
}
method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0);
method.setEntity(new StringEntity(data));
method.setHeader("Authorization", "Basic cm9vdDp0YW9zZGF0YQ==");
method.setHeader("Content-Type", "text/plain");
method.setEntity(new StringEntity(data, Charset.forName("UTF-8")));
HttpContext context = HttpClientContext.create();
CloseableHttpResponse httpResponse = httpClient.execute(method, context);
httpEntity = httpResponse.getEntity();
@ -105,26 +109,13 @@ public class HttpClientPoolUtil {
if (method != null) {
method.abort();
}
// e.printStackTrace();
// logger.error("execute post request exception, url:" + uri + ", exception:" + e.toString()
// + ", cost time(ms):" + (System.currentTimeMillis() - startTime));
new Exception("execute post request exception, url:"
+ uri + ", exception:" + e.toString() +
", cost time(ms):" + (System.currentTimeMillis() - startTime))
.printStackTrace();
new Exception("execute post request exception, url:" + uri + ", exception:" + e.toString() + ", cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace();
} finally {
if (httpEntity != null) {
try {
EntityUtils.consumeQuietly(httpEntity);
} catch (Exception e) {
// e.printStackTrace();
// logger.error("close response exception, url:" + uri + ", exception:" + e.toString()
// + ", cost time(ms):" + (System.currentTimeMillis() - startTime));
new Exception(
"close response exception, url:" + uri +
", exception:" + e.toString()
+ ", cost time(ms):" + (System.currentTimeMillis() - startTime))
.printStackTrace();
new Exception("close response exception, url:" + uri + ", exception:" + e.toString() + ", cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace();
}
}
}

View File

@ -15,14 +15,12 @@
package com.taosdata.jdbc.utils;
import com.taosdata.jdbc.TSDBConnection;
import com.taosdata.jdbc.TSDBJNIConnector;
import java.sql.Connection;
import java.sql.SQLException;
public class SqlSyntaxValidator {
private static final String[] updateSQL = {"insert", "update", "delete", "create", "alter", "drop", "show", "describe", "use"};
private static final String[] updateSQL = {"insert", "update", "delete", "create", "alter", "drop", "show", "describe", "use", "import"};
private static final String[] querySQL = {"select"};
private TSDBConnection tsdbConnection;
@ -31,22 +29,6 @@ public class SqlSyntaxValidator {
this.tsdbConnection = (TSDBConnection) connection;
}
public boolean validateSqlSyntax(String sql) throws SQLException {
boolean res = false;
if (tsdbConnection == null || tsdbConnection.isClosed()) {
throw new SQLException("invalid connection");
} else {
TSDBJNIConnector jniConnector = tsdbConnection.getConnection();
if (jniConnector == null) {
throw new SQLException("jniConnector is null");
} else {
res = jniConnector.validateCreateTableSql(sql);
}
}
return res;
}
public static boolean isValidForExecuteUpdate(String sql) {
for (String prefix : updateSQL) {
if (sql.trim().toLowerCase().startsWith(prefix))
@ -56,18 +38,28 @@ public class SqlSyntaxValidator {
}
public static boolean isUseSql(String sql) {
return sql.trim().toLowerCase().startsWith(updateSQL[8]) || sql.trim().toLowerCase().matches("create\\s*database.*") || sql.toLowerCase().toLowerCase().matches("drop\\s*database.*");
return sql.trim().toLowerCase().startsWith("use") || sql.trim().toLowerCase().matches("create\\s*database.*") || sql.toLowerCase().toLowerCase().matches("drop\\s*database.*");
}
public static boolean isUpdateSql(String sql) {
return sql.trim().toLowerCase().startsWith(updateSQL[1]);
public static boolean isShowSql(String sql) {
return sql.trim().toLowerCase().startsWith("show");
}
public static boolean isDescribeSql(String sql) {
return sql.trim().toLowerCase().startsWith("describe");
}
public static boolean isInsertSql(String sql) {
return sql.trim().toLowerCase().startsWith(updateSQL[0]);
return sql.trim().toLowerCase().startsWith("insert") || sql.trim().toLowerCase().startsWith("import");
}
public static boolean isSelectSql(String sql) {
return sql.trim().toLowerCase().startsWith(querySQL[0]);
return sql.trim().toLowerCase().startsWith("select");
}
public static boolean isShowDatabaseSql(String sql) {
return sql.trim().toLowerCase().matches("show\\s*databases");
}
}

View File

@ -1,5 +1,6 @@
package com.taosdata.jdbc.rs;
import org.junit.*;
import org.junit.runners.MethodSorters;

View File

@ -0,0 +1,399 @@
package com.taosdata.jdbc.rs;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.FixMethodOrder;
import org.junit.Test;
import org.junit.runners.MethodSorters;
import java.sql.*;
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class SQLTest {
private static final String host = "master";
private static Connection connection;
@Test
public void testCase001() {
String sql = "create database if not exists restful_test";
execute(sql);
}
@Test
public void testCase002() {
String sql = "use restful_test";
execute(sql);
}
@Test
public void testCase003() {
String sql = "show databases";
executeWithResult(sql);
}
@Test
public void testCase004() {
String sql = "show tables";
executeWithResult(sql);
}
@Test
public void testCase005() {
String sql = "show stables";
executeWithResult(sql);
}
@Test
public void testCase006() {
String sql = "show dnodes";
executeWithResult(sql);
}
@Test
public void testCase007() {
String sql = "show vgroups";
executeWithResult(sql);
}
@Test
public void testCase008() {
String sql = "drop table if exists restful_test.weather";
execute(sql);
}
@Test
public void testCase009() {
String sql = "create table if not exists restful_test.weather(ts timestamp, temperature float) tags(location nchar(64))";
execute(sql);
}
@Test
public void testCase010() {
String sql = "create table t1 using restful_test.weather tags('北京')";
execute(sql);
}
@Test
public void testCase011() {
String sql = "insert into restful_test.t1 values(now, 22.22)";
executeUpdate(sql);
}
@Test
public void testCase012() {
String sql = "insert into restful_test.t1 values('2020-01-01 00:00:00.000', 22.22)";
executeUpdate(sql);
}
@Test
public void testCase013() {
String sql = "insert into restful_test.t1 values('2020-01-01 00:01:00.000', 22.22),('2020-01-01 00:02:00.000', 22.22)";
executeUpdate(sql);
}
@Test
public void testCase014() {
String sql = "insert into restful_test.t2 using weather tags('上海') values('2020-01-01 00:03:00.000', 22.22)";
executeUpdate(sql);
}
@Test
public void testCase015() {
String sql = "insert into restful_test.t2 using weather tags('上海') values('2020-01-01 00:01:00.000', 22.22),('2020-01-01 00:02:00.000', 22.22)";
executeUpdate(sql);
}
@Test
public void testCase016() {
String sql = "insert into t1 values('2020-01-01 01:0:00.000', 22.22),('2020-01-01 02:00:00.000', 22.22) t2 values('2020-01-01 01:0:00.000', 33.33),('2020-01-01 02:00:00.000', 33.33)";
executeUpdate(sql);
}
@Test
public void testCase017() {
String sql = "Insert into t3 using weather tags('广东') values('2020-01-01 01:0:00.000', 22.22),('2020-01-01 02:00:00.000', 22.22) t4 using weather tags('天津') values('2020-01-01 01:0:00.000', 33.33),('2020-01-01 02:00:00.000', 33.33)";
executeUpdate(sql);
}
@Test
public void testCase018() {
String sql = "select * from restful_test.t1";
executeQuery(sql);
}
@Test
public void testCase019() {
String sql = "select * from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase020() {
String sql = "select ts, temperature from restful_test.t1";
executeQuery(sql);
}
@Test
public void testCase021() {
String sql = "select ts, temperature from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase022() {
String sql = "select temperature, ts from restful_test.t1";
executeQuery(sql);
}
@Test
public void testCase023() {
String sql = "select temperature, ts from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase024() {
String sql = "import into restful_test.t5 using weather tags('石家庄') values('2020-01-01 00:01:00.000', 22.22)";
executeUpdate(sql);
}
@Test
public void testCase025() {
String sql = "import into restful_test.t6 using weather tags('沈阳') values('2020-01-01 00:01:00.000', 22.22),('2020-01-01 00:02:00.000', 22.22)";
executeUpdate(sql);
}
@Test
public void testCase026() {
String sql = "import into restful_test.t7 using weather tags('长沙') values('2020-01-01 00:01:00.000', 22.22) restful_test.t8 using weather tags('吉林') values('2020-01-01 00:01:00.000', 22.22)";
executeUpdate(sql);
}
@Test
public void testCase027() {
String sql = "import into restful_test.t9 using weather tags('武汉') values('2020-01-01 00:01:00.000', 22.22) ,('2020-01-02 00:01:00.000', 22.22) restful_test.t10 using weather tags('哈尔滨') values('2020-01-01 00:01:00.000', 22.22),('2020-01-02 00:01:00.000', 22.22)";
executeUpdate(sql);
}
@Test
public void testCase028() {
String sql = "select location, temperature, ts from restful_test.weather where temperature > 1";
executeQuery(sql);
}
@Test
public void testCase029() {
String sql = "select location, temperature, ts from restful_test.weather where temperature < 1";
executeQuery(sql);
}
@Test
public void testCase030() {
String sql = "select location, temperature, ts from restful_test.weather where ts > now";
executeQuery(sql);
}
@Test
public void testCase031() {
String sql = "select location, temperature, ts from restful_test.weather where ts < now";
executeQuery(sql);
}
@Test
public void testCase032() {
String sql = "select count(*) from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase033() {
String sql = "select first(*) from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase034() {
String sql = "select last(*) from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase035() {
String sql = "select last_row(*) from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase036() {
String sql = "select ts, ts as primary_key from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase037() {
String sql = "select database()";
execute("use restful_test");
executeQuery(sql);
}
@Test
public void testCase038() {
String sql = "select client_version()";
executeQuery(sql);
}
@Test
public void testCase039() {
String sql = "select server_status()";
executeQuery(sql);
}
@Test
public void testCase040() {
String sql = "select server_status() as status";
executeQuery(sql);
}
@Test
public void testCase041() {
String sql = "select tbname, location from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase042() {
String sql = "select count(tbname) from restful_test.weather";
executeQuery(sql);
}
@Test
public void testCase043() {
String sql = "select * from restful_test.weather where ts < now - 1h";
executeQuery(sql);
}
@Test
public void testCase044() {
String sql = "select * from restful_test.weather where ts < now - 1h and location like '%'";
executeQuery(sql);
}
@Test
public void testCase045() {
String sql = "select * from restful_test.weather where ts < now - 1h order by ts";
executeQuery(sql);
}
@Test
public void testCase046() {
String sql = "select last(*) from restful_test.weather where ts < now - 1h group by tbname order by tbname";
executeQuery(sql);
}
@Test
public void testCase047() {
String sql = "select * from restful_test.weather limit 2";
executeQuery(sql);
}
@Test
public void testCase048() {
String sql = "select * from restful_test.weather limit 2 offset 5";
executeQuery(sql);
}
@Test
public void testCase049() {
String sql = "select * from restful_test.t1, restful_test.t3 where t1.ts = t3.ts ";
executeQuery(sql);
}
@Test
public void testCase050() {
String sql = "select * from restful_test.t1, restful_test.t3 where t1.ts = t3.ts and t1.location = t3.location";
executeQuery(sql);
}
@Test
public void testCase051() {
String sql = "select * from restful_test.t1 tt, restful_test.t3 yy where tt.ts = yy.ts";
executeQuery(sql);
}
private void executeUpdate(String sql) {
try (Statement statement = connection.createStatement()) {
long start = System.currentTimeMillis();
int affectedRows = statement.executeUpdate(sql);
long end = System.currentTimeMillis();
System.out.println("[ affected rows : " + affectedRows + " ] time cost: " + (end - start) + " ms, execute statement ====> " + sql);
} catch (SQLException e) {
e.printStackTrace();
}
}
private void executeWithResult(String sql) {
try (Statement statement = connection.createStatement()) {
statement.execute(sql);
ResultSet resultSet = statement.getResultSet();
printResult(resultSet);
} catch (SQLException e) {
e.printStackTrace();
}
}
private void execute(String sql) {
try (Statement statement = connection.createStatement()) {
long start = System.currentTimeMillis();
boolean execute = statement.execute(sql);
long end = System.currentTimeMillis();
printSql(sql, execute, (end - start));
} catch (SQLException e) {
System.out.println("ERROR execute SQL ===> " + sql);
e.printStackTrace();
}
}
private static void printSql(String sql, boolean succeed, long cost) {
System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql);
}
private void executeQuery(String sql) {
try (Statement statement = connection.createStatement()) {
long start = System.currentTimeMillis();
ResultSet resultSet = statement.executeQuery(sql);
long end = System.currentTimeMillis();
printSql(sql, true, (end - start));
printResult(resultSet);
} catch (SQLException e) {
System.out.println("ERROR execute SQL ===> " + sql);
e.printStackTrace();
}
}
private static void printResult(ResultSet resultSet) throws SQLException {
ResultSetMetaData metaData = resultSet.getMetaData();
while (resultSet.next()) {
StringBuilder sb = new StringBuilder();
for (int i = 1; i <= metaData.getColumnCount(); i++) {
String columnLabel = metaData.getColumnLabel(i);
String value = resultSet.getString(i);
sb.append(columnLabel + ": " + value + "\t");
}
System.out.println(sb.toString());
}
}
@BeforeClass
public static void before() throws ClassNotFoundException, SQLException {
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata");
}
@AfterClass
public static void after() throws SQLException {
connection.close();
}
}

View File

@ -188,8 +188,8 @@ void dnodeReprocessMWriteMsg(void *pMsg) {
++pWrite->pBatchMasterMsg->received;
if (pWrite->pBatchMasterMsg->successed + pWrite->pBatchMasterMsg->received
>= pWrite->pBatchMasterMsg->expected) {
dnodeSendRedirectMsg(&pWrite->rpcMsg, true);
dnodeFreeMWriteMsg(pWrite);
dnodeSendRedirectMsg(&pWrite->pBatchMasterMsg->rpcMsg, true);
dnodeFreeMWriteMsg(pWrite->pBatchMasterMsg);
}
mnodeDestroySubMsg(pWrite);

View File

@ -60,7 +60,7 @@ int32_t dnodeInitServer() {
rpcInit.label = "DND-S";
rpcInit.numOfThreads = 1;
rpcInit.cfp = dnodeProcessReqMsgFromDnode;
rpcInit.sessions = TSDB_MAX_VNODES;
rpcInit.sessions = TSDB_MAX_VNODES << 4;
rpcInit.connType = TAOS_CONN_SERVER;
rpcInit.idleTime = tsShellActivityTimer * 1000;
@ -123,7 +123,7 @@ int32_t dnodeInitClient() {
rpcInit.label = "DND-C";
rpcInit.numOfThreads = 1;
rpcInit.cfp = dnodeProcessRspFromDnode;
rpcInit.sessions = TSDB_MAX_VNODES;
rpcInit.sessions = TSDB_MAX_VNODES << 4;
rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.idleTime = tsShellActivityTimer * 1000;
rpcInit.user = "t";

View File

@ -6,12 +6,12 @@
"user": "root",
"password": "taosdata",
"databases": "db01",
"super_table_query":
{"rate":1, "concurrent":1,
"specified_table_query":
{"query_interval":1, "concurrent":1,
"sqls": [{"sql": "select count(*) from stb01", "result": "./query_res0.txt"}]
},
"sub_table_query":
{"stblname": "stb01", "rate":1, "threads":1,
"sqls": [{"sql": "select count(*) from xxxx", "result": "./query_res1.txt"}]
"super_table_query":
{"stblname": "stb01", "query_interval":1, "threads":1,
"sqls": [{"sql": "select count(*) from xxxx", "result": "./query_res1.txt"}]
}
}

View File

@ -1441,11 +1441,12 @@ static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, char* sTblName
strncpy(pTblName, (char *)row[0], TSDB_TABLE_NAME_LEN);
//printf("==== sub table name: %s\n", pTblName);
count++;
if (count == childTblCount) {
char *tmp = realloc(childTblName, (size_t)count*1.5*TSDB_TABLE_NAME_LEN);
if (count >= childTblCount - 1) {
char *tmp = realloc(childTblName, (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1);
if (tmp != NULL) {
childTblName = tmp;
memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0, (size_t)(count*0.5*TSDB_TABLE_NAME_LEN));
childTblCount = (int)(childTblCount*1.5);
memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0, (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN));
} else {
// exit, if allocate more memory failed
printf("realloc fail for save child table name of %s.%s\n", dbName, sTblName);
@ -3960,7 +3961,11 @@ void *superQueryProcess(void *sarg) {
for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) {
int64_t t1 = taosGetTimestampUs();
selectAndGetResult(winfo->taos, g_queryInfo.superQueryInfo.sql[i], g_queryInfo.superQueryInfo.result[i]);
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID);
}
selectAndGetResult(winfo->taos, g_queryInfo.superQueryInfo.sql[i], tmpFile);
int64_t t2 = taosGetTimestampUs();
printf("taosc select sql return, Spent %f s\n", (t2 - t1)/1000000.0);
} else {
@ -4019,7 +4024,11 @@ void *subQueryProcess(void *sarg) {
for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) {
memset(sqlstr,0,sizeof(sqlstr));
replaceSubTblName(g_queryInfo.subQueryInfo.sql[i], sqlstr, i);
selectAndGetResult(winfo->taos, sqlstr, g_queryInfo.subQueryInfo.result[i]);
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
if (g_queryInfo.subQueryInfo.result[i][0] != 0) {
sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID);
}
selectAndGetResult(winfo->taos, sqlstr, tmpFile);
}
}
et = taosGetTimestampMs();
@ -4193,7 +4202,11 @@ void *subSubscribeProcess(void *sarg) {
sprintf(topic, "taosdemo-subscribe-%d", i);
memset(subSqlstr,0,sizeof(subSqlstr));
replaceSubTblName(g_queryInfo.subQueryInfo.sql[i], subSqlstr, i);
g_queryInfo.subQueryInfo.tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, g_queryInfo.subQueryInfo.result[i]);
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
if (g_queryInfo.subQueryInfo.result[i][0] != 0) {
sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID);
}
g_queryInfo.subQueryInfo.tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, tmpFile);
if (NULL == g_queryInfo.subQueryInfo.tsub[i]) {
return NULL;
}
@ -4211,7 +4224,11 @@ void *subSubscribeProcess(void *sarg) {
TAOS_RES* res = taos_consume(g_queryInfo.subQueryInfo.tsub[i]);
if (res) {
getResult(res, g_queryInfo.subQueryInfo.result[i]);
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
if (g_queryInfo.subQueryInfo.result[i][0] != 0) {
sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID);
}
getResult(res, tmpFile);
taos_free_result(res);
}
}
@ -4244,7 +4261,11 @@ void *superSubscribeProcess(void *sarg) {
char topic[32] = {0};
for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) {
sprintf(topic, "taosdemo-subscribe-%d", i);
g_queryInfo.superQueryInfo.tsub[i] = subscribeImpl(winfo->taos, g_queryInfo.superQueryInfo.sql[i], topic, g_queryInfo.superQueryInfo.result[i]);
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
if (g_queryInfo.subQueryInfo.result[i][0] != 0) {
sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID);
}
g_queryInfo.superQueryInfo.tsub[i] = subscribeImpl(winfo->taos, g_queryInfo.superQueryInfo.sql[i], topic, tmpFile);
if (NULL == g_queryInfo.superQueryInfo.tsub[i]) {
return NULL;
}
@ -4262,7 +4283,11 @@ void *superSubscribeProcess(void *sarg) {
TAOS_RES* res = taos_consume(g_queryInfo.superQueryInfo.tsub[i]);
if (res) {
getResult(res, g_queryInfo.superQueryInfo.result[i]);
char tmpFile[MAX_FILE_NAME_LEN*2] = {0};
if (g_queryInfo.superQueryInfo.result[i][0] != 0) {
sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID);
}
getResult(res, tmpFile);
taos_free_result(res);
}
}

View File

@ -311,6 +311,11 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) {
return TSDB_CODE_MND_INVALID_DB_OPTION;
}
if (pCfg->replications > mnodeGetDnodesNum()) {
mError("no enough dnode to config replica: %d, #dnodes: %d", pCfg->replications, mnodeGetDnodesNum());
return TSDB_CODE_MND_INVALID_DB_OPTION;
}
if (pCfg->quorum < TSDB_MIN_DB_REPLICA_OPTION || pCfg->quorum > TSDB_MAX_DB_REPLICA_OPTION) {
mError("invalid db option quorum:%d valid range: [%d, %d]", pCfg->quorum, TSDB_MIN_DB_REPLICA_OPTION,
TSDB_MAX_DB_REPLICA_OPTION);

View File

@ -827,21 +827,21 @@ static int32_t mnodeProcessBatchCreateTableMsg(SMnodeMsg *pMsg) {
SCreateTableMsg *pCreateTable = (SCreateTableMsg*) ((char*) pCreate + sizeof(SCMCreateTableMsg));
int32_t code = mnodeValidateCreateTableMsg(pCreateTable, pMsg);
if (code == TSDB_CODE_SUCCESS || code == TSDB_CODE_MND_TABLE_ALREADY_EXIST) {
++pMsg->pBatchMasterMsg->successed;
mnodeDestroySubMsg(pMsg);
}
if (code != TSDB_CODE_MND_ACTION_IN_PROGRESS) {
mnodeDestroySubMsg(pMsg);
return code;
++pMsg->pBatchMasterMsg->successed;
mnodeDestroySubMsg(pMsg);
} else if (code == TSDB_CODE_MND_ACTION_NEED_REPROCESSED) {
return code;
} else if (code != TSDB_CODE_MND_ACTION_IN_PROGRESS) {
++pMsg->pBatchMasterMsg->received;
mnodeDestroySubMsg(pMsg);
}
if (pMsg->pBatchMasterMsg->successed + pMsg->pBatchMasterMsg->received
>= pMsg->pBatchMasterMsg->expected) {
return code;
} else {
return TSDB_CODE_MND_ACTION_IN_PROGRESS;
dnodeSendRpcMWriteRsp(pMsg->pBatchMasterMsg, TSDB_CODE_SUCCESS);
}
return TSDB_CODE_MND_ACTION_IN_PROGRESS;
} else { // batch master replay, reprocess the whole batch
assert(0);
}

View File

@ -986,6 +986,19 @@ static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) {
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) {
mnodeMsg->pVgroup = NULL;
mnodeDestroyVgroup(pVgroup);
if (mnodeMsg->pBatchMasterMsg) {
++mnodeMsg->pBatchMasterMsg->received;
if (mnodeMsg->pBatchMasterMsg->successed + mnodeMsg->pBatchMasterMsg->received
>= mnodeMsg->pBatchMasterMsg->expected) {
dnodeSendRpcMWriteRsp(mnodeMsg->pBatchMasterMsg, code);
}
mnodeDestroySubMsg(mnodeMsg);
return;
}
dnodeSendRpcMWriteRsp(mnodeMsg, code);
}
} else {
@ -995,6 +1008,19 @@ static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) {
.pObj = pVgroup
};
sdbDeleteRow(&row);
if (mnodeMsg->pBatchMasterMsg) {
++mnodeMsg->pBatchMasterMsg->received;
if (mnodeMsg->pBatchMasterMsg->successed + mnodeMsg->pBatchMasterMsg->received
>= mnodeMsg->pBatchMasterMsg->expected) {
dnodeSendRpcMWriteRsp(mnodeMsg->pBatchMasterMsg, mnodeMsg->code);
}
mnodeDestroySubMsg(mnodeMsg);
return;
}
dnodeSendRpcMWriteRsp(mnodeMsg, mnodeMsg->code);
}
}
@ -1192,4 +1218,4 @@ void mnodeSetVgidVer(int8_t *cver, uint64_t iver) {
cver[0] = (int8_t)((int32_t)(iver % 1000000) / 10000);
cver[1] = (int8_t)((int32_t)(iver % 100000) / 100);
cver[2] = (int8_t)(iver % 100);
}
}

View File

@ -1567,6 +1567,7 @@ static int rpcCheckAuthentication(SRpcConn *pConn, char *msg, int msgLen) {
// for response, if code is auth failure, it shall bypass the auth process
code = htonl(pHead->code);
if (code == TSDB_CODE_RPC_INVALID_TIME_STAMP || code == TSDB_CODE_RPC_AUTH_FAILURE ||
code == TSDB_CODE_RPC_INVALID_VERSION ||
code == TSDB_CODE_RPC_AUTH_REQUIRED || code == TSDB_CODE_MND_INVALID_USER || code == TSDB_CODE_RPC_NOT_READY) {
pHead->msgLen = (int32_t)htonl((uint32_t)pHead->msgLen);
// tTrace("%s, dont check authentication since code is:0x%x", pConn->info, code);

View File

@ -10,12 +10,6 @@
<packaging>jar</packaging>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.0.0</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>

View File

@ -29,11 +29,7 @@ public class JdbcTaosdemo {
JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args);
boolean isHelp = Arrays.asList(args).contains("--help");
if (isHelp) {
JdbcTaosdemoConfig.printHelp();
return;
}
if (config.getHost() == null) {
if (isHelp || config.host == null || config.host.isEmpty()) {
JdbcTaosdemoConfig.printHelp();
return;
}
@ -85,7 +81,7 @@ public class JdbcTaosdemo {
taosdemo.selectLastOneYear();
// drop super table
if (config.isDeleteTable())
if (config.dropTable)
taosdemo.dropSuperTable();
taosdemo.close();
}
@ -103,7 +99,7 @@ public class JdbcTaosdemo {
logger.info("[ OK ] Connection established.");
} catch (ClassNotFoundException | SQLException e) {
logger.error(e.getMessage());
throw new RuntimeException("connection failed: " + config.getHost());
throw new RuntimeException("connection failed: " + config.host);
}
}
@ -111,7 +107,7 @@ public class JdbcTaosdemo {
* create database
*/
private void createDatabase() {
String sql = SqlSpeller.createDatabaseSQL(config.getDbName(), config.getKeep(), config.getDays());
String sql = SqlSpeller.createDatabaseSQL(config.database, config.keep, config.days);
execute(sql);
}
@ -119,7 +115,7 @@ public class JdbcTaosdemo {
* drop database
*/
private void dropDatabase() {
String sql = SqlSpeller.dropDatabaseSQL(config.getDbName());
String sql = SqlSpeller.dropDatabaseSQL(config.database);
execute(sql);
}
@ -127,7 +123,7 @@ public class JdbcTaosdemo {
* use database
*/
private void useDatabase() {
String sql = SqlSpeller.useDatabaseSQL(config.getDbName());
String sql = SqlSpeller.useDatabaseSQL(config.database);
execute(sql);
}
@ -135,7 +131,7 @@ public class JdbcTaosdemo {
* create super table
*/
private void createSuperTable() {
String sql = SqlSpeller.createSuperTableSQL(config.getStbName());
String sql = SqlSpeller.createSuperTableSQL(config.superTable);
execute(sql);
}
@ -144,9 +140,9 @@ public class JdbcTaosdemo {
*/
private void createTableMultiThreads() {
try {
final int tableSize = config.getNumberOfTable() / config.getNumberOfThreads();
final int tableSize = (int) (config.numOfTables / config.numOfThreadsForCreate);
List<Thread> threads = new ArrayList<>();
for (int i = 0; i < config.getNumberOfThreads(); i++) {
for (int i = 0; i < config.numOfThreadsForCreate; i++) {
Thread thread = new Thread(new CreateTableTask(config, i * tableSize, tableSize), "Thread-" + i);
threads.add(thread);
thread.start();
@ -169,9 +165,9 @@ public class JdbcTaosdemo {
final long startDatetime = TimeStampUtil.datetimeToLong("2005-01-01 00:00:00.000");
final long finishDatetime = TimeStampUtil.datetimeToLong("2030-01-01 00:00:00.000");
final int tableSize = config.getNumberOfTable() / config.getNumberOfThreads();
final int tableSize = (int) (config.numOfTables / config.numOfThreadsForInsert);
List<Thread> threads = new ArrayList<>();
for (int i = 0; i < config.getNumberOfThreads(); i++) {
for (int i = 0; i < config.numOfThreadsForInsert; i++) {
Thread thread = new Thread(new InsertTableDatetimeTask(config, i * tableSize, tableSize, startDatetime, finishDatetime), "Thread-" + i);
threads.add(thread);
thread.start();
@ -188,10 +184,10 @@ public class JdbcTaosdemo {
private void insertMultiThreads() {
try {
final int tableSize = config.getNumberOfTable() / config.getNumberOfThreads();
final int numberOfRecordsPerTable = config.getNumberOfRecordsPerTable();
final int tableSize = (int) (config.numOfTables / config.numOfThreadsForInsert);
final int numberOfRecordsPerTable = (int) config.numOfRowsPerTable;
List<Thread> threads = new ArrayList<>();
for (int i = 0; i < config.getNumberOfThreads(); i++) {
for (int i = 0; i < config.numOfThreadsForInsert; i++) {
Thread thread = new Thread(new InsertTableTask(config, i * tableSize, tableSize, numberOfRecordsPerTable), "Thread-" + i);
threads.add(thread);
thread.start();
@ -207,86 +203,85 @@ public class JdbcTaosdemo {
}
private void selectFromTableLimit() {
String sql = SqlSpeller.selectFromTableLimitSQL(config.getDbName(), config.getTbPrefix(), 1, 10, 0);
String sql = SqlSpeller.selectFromTableLimitSQL(config.database, config.prefixOfTable, 1, 10, 0);
executeQuery(sql);
}
private void selectCountFromTable() {
String sql = SqlSpeller.selectCountFromTableSQL(config.getDbName(), config.getTbPrefix(), 1);
String sql = SqlSpeller.selectCountFromTableSQL(config.database, config.prefixOfTable, 1);
executeQuery(sql);
}
private void selectAvgMinMaxFromTable() {
String sql = SqlSpeller.selectAvgMinMaxFromTableSQL("current", config.getDbName(), config.getTbPrefix(), 1);
String sql = SqlSpeller.selectAvgMinMaxFromTableSQL("current", config.database, config.prefixOfTable, 1);
executeQuery(sql);
}
private void selectLastFromTable() {
String sql = SqlSpeller.selectLastFromTableSQL(config.getDbName(), config.getTbPrefix(), 1);
String sql = SqlSpeller.selectLastFromTableSQL(config.database, config.prefixOfTable, 1);
executeQuery(sql);
}
private void selectFromSuperTableLimit() {
String sql = SqlSpeller.selectFromSuperTableLimitSQL(config.getDbName(), config.getStbName(), 10, 0);
String sql = SqlSpeller.selectFromSuperTableLimitSQL(config.database, config.superTable, 10, 0);
executeQuery(sql);
}
private void selectCountFromSuperTable() {
String sql = SqlSpeller.selectCountFromSuperTableSQL(config.getDbName(), config.getStbName());
String sql = SqlSpeller.selectCountFromSuperTableSQL(config.database, config.superTable);
executeQuery(sql);
}
private void selectAvgMinMaxFromSuperTable() {
String sql = SqlSpeller.selectAvgMinMaxFromSuperTableSQL("current", config.getDbName(), config.getStbName());
String sql = SqlSpeller.selectAvgMinMaxFromSuperTableSQL("current", config.database, config.superTable);
executeQuery(sql);
}
private void selectAvgMinMaxFromSuperTableWhereTag() {
String sql = SqlSpeller.selectAvgMinMaxFromSuperTableWhere("current", config.getDbName(), config.getStbName());
String sql = SqlSpeller.selectAvgMinMaxFromSuperTableWhere("current", config.database, config.superTable);
executeQuery(sql);
}
private void selectLastFromSuperTableWhere() {
String sql = SqlSpeller.selectLastFromSuperTableWhere("current", config.getDbName(), config.getStbName());
String sql = SqlSpeller.selectLastFromSuperTableWhere("current", config.database, config.superTable);
executeQuery(sql);
}
private void selectGroupBy() {
String sql = SqlSpeller.selectGroupBy("current", config.getDbName(), config.getStbName());
String sql = SqlSpeller.selectGroupBy("current", config.database, config.superTable);
executeQuery(sql);
}
private void selectLike() {
String sql = SqlSpeller.selectLike(config.getDbName(), config.getStbName());
String sql = SqlSpeller.selectLike(config.database, config.superTable);
executeQuery(sql);
}
private void selectLastOneHour() {
String sql = SqlSpeller.selectLastOneHour(config.getDbName(), config.getStbName());
String sql = SqlSpeller.selectLastOneHour(config.database, config.superTable);
executeQuery(sql);
}
private void selectLastOneDay() {
String sql = SqlSpeller.selectLastOneDay(config.getDbName(), config.getStbName());
String sql = SqlSpeller.selectLastOneDay(config.database, config.superTable);
executeQuery(sql);
}
private void selectLastOneWeek() {
String sql = SqlSpeller.selectLastOneWeek(config.getDbName(), config.getStbName());
String sql = SqlSpeller.selectLastOneWeek(config.database, config.superTable);
executeQuery(sql);
}
private void selectLastOneMonth() {
String sql = SqlSpeller.selectLastOneMonth(config.getDbName(), config.getStbName());
String sql = SqlSpeller.selectLastOneMonth(config.database, config.superTable);
executeQuery(sql);
}
private void selectLastOneYear() {
String sql = SqlSpeller.selectLastOneYear(config.getDbName(), config.getStbName());
String sql = SqlSpeller.selectLastOneYear(config.database, config.superTable);
executeQuery(sql);
}
private void close() {
try {
if (connection != null) {
@ -303,7 +298,7 @@ public class JdbcTaosdemo {
* drop super table
*/
private void dropSuperTable() {
String sql = SqlSpeller.dropSuperTableSQL(config.getDbName(), config.getStbName());
String sql = SqlSpeller.dropSuperTableSQL(config.database, config.superTable);
execute(sql);
}

View File

@ -1,54 +1,90 @@
package com.taosdata.example.jdbcTaosdemo.domain;
import com.taosdata.example.jdbcTaosdemo.utils.TimeStampUtil;
public final class JdbcTaosdemoConfig {
// instance
public String host; //host
public int port = 6030; //port
public String user = "root"; //user
public String password = "taosdata"; //password
// database
public String database = "test"; //database
public int keep = 3650; //keep
public int days = 30; //days
public int replica = 1; //replica
//super table
public boolean doCreateTable = true;
public String superTable = "weather"; //super table name
public String prefixOfFields = "col";
public int numOfFields;
public String prefixOfTags = "tag";
public int numOfTags;
public String superTableSQL;
//sub table
public String prefixOfTable = "t";
// insert task
public boolean autoCreateTable = true;
public long numOfTables = 100;
public long numOfRowsPerTable = 100;
public int numOfTablesPerSQL = 10;
public int numOfValuesPerSQL = 10;
public int numOfThreadsForCreate = 1;
public int numOfThreadsForInsert = 1;
public long startTime;
public long timeGap = 1;
public int frequency;
public int order;
public int rate = 10;
public long range = 1000l;
// select task
//The host to connect to TDengine. Must insert one
private String host;
//The TCP/IP port number to use for the connection. Default is 6030.
private int port = 6030;
//The TDengine user name to use when connecting to the server. Default is 'root'
private String user = "root";
//The password to use when connecting to the server. Default is 'taosdata'
private String password = "taosdata";
//Destination database. Default is 'test'
private String dbName = "test";
//keep
private int keep = 36500;
//days
private int days = 120;
//Super table Name. Default is 'meters'
private String stbName = "meters";
//Table name prefix. Default is 'd'
private String tbPrefix = "d";
//The number of tables. Default is 10.
private int numberOfTable = 10;
//The number of records per table. Default is 2
private int numberOfRecordsPerTable = 2;
//The number of records per request. Default is 100
private int numberOfRecordsPerRequest = 100;
//The number of threads. Default is 1.
private int numberOfThreads = 1;
//Delete data. Default is false
private boolean deleteTable = false;
// drop task
public boolean dropTable = false;
public static void printHelp() {
System.out.println("Usage: java -jar JdbcTaosDemo.jar [OPTION...]");
System.out.println("-h host The host to connect to TDengine. you must input one");
System.out.println("-p port The TCP/IP port number to use for the connection. Default is 6030");
System.out.println("-u user The TDengine user name to use when connecting to the server. Default is 'root'");
System.out.println("-P password The password to use when connecting to the server.Default is 'taosdata'");
System.out.println("-d database Destination database. Default is 'test'");
System.out.println("-m tablePrefix Table prefix name. Default is 'd'");
System.out.println("-t num_of_tables The number of tables. Default is 10");
System.out.println("-n num_of_records_per_table The number of records per table. Default is 2");
System.out.println("-r num_of_records_per_req The number of records per request. Default is 100");
System.out.println("-T num_of_threads The number of threads. Default is 1");
System.out.println("-D delete table Delete data methods. Default is false");
System.out.println("--help Give this help list");
// System.out.println("--infinite infinite insert mode");
System.out.println("Usage: java -jar jdbc-taosdemo-2.0.jar [OPTION...]");
// instance
System.out.println("-host The host to connect to TDengine which you must specify");
System.out.println("-port The TCP/IP port number to use for the connection. Default is 6030");
System.out.println("-user The TDengine user name to use when connecting to the server. Default is 'root'");
System.out.println("-password The password to use when connecting to the server.Default is 'taosdata'");
// database
System.out.println("-database Destination database. Default is 'test'");
System.out.println("-keep database keep parameter. Default is 3650");
System.out.println("-days database days parameter. Default is 30");
System.out.println("-replica database replica parameter. Default 1, min: 1, max: 3");
// super table
System.out.println("-doCreateTable do create super table and sub table, true or false, Default true");
System.out.println("-superTable super table name. Default 'weather'");
System.out.println("-prefixOfFields The prefix of field in super table. Default is 'col'");
System.out.println("-numOfFields The number of field in super table. Default is (ts timestamp, temperature float, humidity int).");
System.out.println("-prefixOfTags The prefix of tag in super table. Default is 'tag'");
System.out.println("-numOfTags The number of tag in super table. Default is (location nchar(64), groupId int).");
System.out.println("-superTableSQL specify a sql statement for the super table.\n" +
" Default is 'create table weather(ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int). \n" +
" if you use this parameter, the numOfFields and numOfTags will be invalid'");
// sub table
System.out.println("-prefixOfTable The prefix of sub tables. Default is 't'");
System.out.println("-numOfTables The number of tables. Default is 1");
System.out.println("-numOfThreadsForCreate The number of thread during create sub table. Default is 1");
// insert task
System.out.println("-autoCreateTable Use auto Create sub tables SQL. Default is false");
System.out.println("-numOfRowsPerTable The number of records per table. Default is 1");
System.out.println("-numOfThreadsForInsert The number of threads during insert row. Default is 1");
System.out.println("-numOfTablesPerSQL The number of table per SQL. Default is 1");
System.out.println("-numOfValuesPerSQL The number of value per SQL. Default is 1");
System.out.println("-startTime start time for insert task, The format is \"yyyy-MM-dd HH:mm:ss.SSS\".");
System.out.println("-timeGap the number of time gap. Default is 1000 ms");
System.out.println("-frequency the number of records per second inserted into one table. default is 0, do not control frequency");
System.out.println("-order Insert mode--0: In order, 1: Out of order. Default is in order");
System.out.println("-rate The proportion of data out of order. effective only if order is 1. min 0, max 100, default is 10");
System.out.println("-range The range of data out of order. effective only if order is 1. default is 1000 ms");
// query task
// System.out.println("-sqlFile The select sql file");
// drop task
System.out.println("-dropTable Drop data before quit. Default is false");
System.out.println("--help Give this help list");
}
/**
@ -59,95 +95,111 @@ public final class JdbcTaosdemoConfig {
*/
public JdbcTaosdemoConfig(String[] args) {
for (int i = 0; i < args.length; i++) {
if ("-h".equals(args[i]) && i < args.length - 1) {
// instance
if ("-host".equals(args[i]) && i < args.length - 1) {
host = args[++i];
}
if ("-p".equals(args[i]) && i < args.length - 1) {
if ("-port".equals(args[i]) && i < args.length - 1) {
port = Integer.parseInt(args[++i]);
}
if ("-u".equals(args[i]) && i < args.length - 1) {
if ("-user".equals(args[i]) && i < args.length - 1) {
user = args[++i];
}
if ("-P".equals(args[i]) && i < args.length - 1) {
if ("-password".equals(args[i]) && i < args.length - 1) {
password = args[++i];
}
if ("-d".equals(args[i]) && i < args.length - 1) {
dbName = args[++i];
// database
if ("-database".equals(args[i]) && i < args.length - 1) {
database = args[++i];
}
if ("-m".equals(args[i]) && i < args.length - 1) {
tbPrefix = args[++i];
if ("-keep".equals(args[i]) && i < args.length - 1) {
keep = Integer.parseInt(args[++i]);
}
if ("-t".equals(args[i]) && i < args.length - 1) {
numberOfTable = Integer.parseInt(args[++i]);
if ("-days".equals(args[i]) && i < args.length - 1) {
days = Integer.parseInt(args[++i]);
}
if ("-n".equals(args[i]) && i < args.length - 1) {
numberOfRecordsPerTable = Integer.parseInt(args[++i]);
if ("-replica".equals(args[i]) && i < args.length - 1) {
replica = Integer.parseInt(args[++i]);
}
if ("-r".equals(args[i]) && i < args.length - 1) {
numberOfRecordsPerRequest = Integer.parseInt(args[++i]);
// super table
if ("-doCreateTable".equals(args[i]) && i < args.length - 1) {
doCreateTable = Boolean.parseBoolean(args[++i]);
}
if ("-T".equals(args[i]) && i < args.length - 1) {
numberOfThreads = Integer.parseInt(args[++i]);
if ("-superTable".equals(args[i]) && i < args.length - 1) {
superTable = args[++i];
}
if ("-D".equals(args[i]) && i < args.length - 1) {
deleteTable = Boolean.parseBoolean(args[++i]);
if ("-prefixOfFields".equals(args[i]) && i < args.length - 1) {
prefixOfFields = args[++i];
}
if ("-numOfFields".equals(args[i]) && i < args.length - 1) {
numOfFields = Integer.parseInt(args[++i]);
}
if ("-prefixOfTags".equals(args[i]) && i < args.length - 1) {
prefixOfTags = args[++i];
}
if ("-numOfTags".equals(args[i]) && i < args.length - 1) {
numOfTags = Integer.parseInt(args[++i]);
}
if ("-superTableSQL".equals(args[i]) && i < args.length - 1) {
superTableSQL = args[++i];
}
// sub table
if ("-prefixOfTable".equals(args[i]) && i < args.length - 1) {
prefixOfTable = args[++i];
}
if ("-numOfTables".equals(args[i]) && i < args.length - 1) {
numOfTables = Long.parseLong(args[++i]);
}
if ("-autoCreateTable".equals(args[i]) && i < args.length - 1) {
autoCreateTable = Boolean.parseBoolean(args[++i]);
}
if ("-numOfThreadsForCreate".equals(args[i]) && i < args.length - 1) {
numOfThreadsForCreate = Integer.parseInt(args[++i]);
}
// insert task
if ("-numOfRowsPerTable".equals(args[i]) && i < args.length - 1) {
numOfRowsPerTable = Long.parseLong(args[++i]);
}
if ("-numOfThreadsForInsert".equals(args[i]) && i < args.length - 1) {
numOfThreadsForInsert = Integer.parseInt(args[++i]);
}
if ("-numOfTablesPerSQL".equals(args[i]) && i < args.length - 1) {
numOfTablesPerSQL = Integer.parseInt(args[++i]);
}
if ("-numOfValuesPerSQL".equals(args[i]) && i < args.length - 1) {
numOfValuesPerSQL = Integer.parseInt(args[++i]);
}
if ("-startTime".equals(args[i]) && i < args.length - 1) {
startTime = TimeStampUtil.datetimeToLong(args[++i]);
}
if ("-timeGap".equals(args[i]) && i < args.length - 1) {
timeGap = Long.parseLong(args[++i]);
}
if ("-frequency".equals(args[i]) && i < args.length - 1) {
frequency = Integer.parseInt(args[++i]);
}
if ("-order".equals(args[i]) && i < args.length - 1) {
order = Integer.parseInt(args[++i]);
}
if ("-rate".equals(args[i]) && i < args.length - 1) {
rate = Integer.parseInt(args[++i]);
if (rate < 0 || rate > 100)
throw new IllegalArgumentException("rate must between 0 and 100");
}
if ("-range".equals(args[i]) && i < args.length - 1) {
range = Integer.parseInt(args[++i]);
}
// select task
// drop task
if ("-dropTable".equals(args[i]) && i < args.length - 1) {
dropTable = Boolean.parseBoolean(args[++i]);
}
}
}
public String getHost() {
return host;
public static void main(String[] args) {
JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args);
}
public int getPort() {
return port;
}
public String getUser() {
return user;
}
public String getPassword() {
return password;
}
public String getDbName() {
return dbName;
}
public int getKeep() {
return keep;
}
public int getDays() {
return days;
}
public String getStbName() {
return stbName;
}
public String getTbPrefix() {
return tbPrefix;
}
public int getNumberOfTable() {
return numberOfTable;
}
public int getNumberOfRecordsPerTable() {
return numberOfRecordsPerTable;
}
public int getNumberOfThreads() {
return numberOfThreads;
}
public boolean isDeleteTable() {
return deleteTable;
}
public int getNumberOfRecordsPerRequest() {
return numberOfRecordsPerRequest;
}
}

View File

@ -28,7 +28,7 @@ public class CreateTableTask implements Runnable {
Connection connection = ConnectionFactory.build(config);
for (int i = startIndex; i < startIndex + tableNumber; i++) {
Statement statement = connection.createStatement();
String sql = SqlSpeller.createTableSQL(i + 1, config.getDbName(), config.getStbName());
String sql = SqlSpeller.createTableSQL(i + 1, config.database, config.superTable);
statement.execute(sql);
statement.close();
logger.info(">>> " + sql);

View File

@ -30,10 +30,10 @@ public class InsertTableDatetimeTask implements Runnable {
public void run() {
try {
Connection connection = ConnectionFactory.build(config);
int valuesCount = config.getNumberOfRecordsPerRequest();
int valuesCount = config.numOfValuesPerSQL;
for (long ts = startDatetime; ts < finishedDatetime; ts += valuesCount) {
for (int i = startTableIndex; i < startTableIndex + tableNumber; i++) {
String sql = SqlSpeller.insertBatchSizeRowsSQL(config.getDbName(), config.getTbPrefix(), i + 1, ts, valuesCount);
String sql = SqlSpeller.insertBatchSizeRowsSQL(config.database, config.prefixOfTable, i + 1, ts, valuesCount);
Statement statement = connection.createStatement();
statement.execute(sql);
statement.close();

View File

@ -31,7 +31,7 @@ public class InsertTableTask implements Runnable {
public void run() {
try {
Connection connection = ConnectionFactory.build(config);
int keep = config.getKeep();
int keep = config.keep;
Instant end = Instant.now();
Instant start = end.minus(Duration.ofDays(keep - 1));
long timeGap = ChronoUnit.MILLIS.between(start, end) / (recordsNumberPerTable - 1);
@ -41,7 +41,7 @@ public class InsertTableTask implements Runnable {
long ts = start.toEpochMilli() + (j * timeGap);
// insert data into echo table
for (int i = startTbIndex; i < startTbIndex + tableNumber; i++) {
String sql = SqlSpeller.insertBatchSizeRowsSQL(config.getDbName(), config.getTbPrefix(), i + 1, ts, config.getNumberOfRecordsPerRequest());
String sql = SqlSpeller.insertBatchSizeRowsSQL(config.database, config.prefixOfTable, i + 1, ts, config.numOfValuesPerSQL);
logger.info(Thread.currentThread().getName() + ">>> " + sql);
Statement statement = connection.createStatement();
statement.execute(sql);

View File

@ -11,7 +11,7 @@ import java.util.Properties;
public class ConnectionFactory {
public static Connection build(JdbcTaosdemoConfig config) throws SQLException {
return build(config.getHost(), config.getPort(), config.getDbName(), config.getUser(), config.getPassword());
return build(config.host, config.port, config.database, config.user, config.password);
}
public static Connection build(String host, int port, String dbName) throws SQLException {

View File

@ -82,12 +82,27 @@ public class ConnectionPoolDemo {
init(dataSource);
ExecutorService executor = Executors.newFixedThreadPool(threadCount);
while (true) {
executor.execute(new InsertTask(dataSource, dbName, tableSize, batchSize));
if (sleep > 0)
TimeUnit.MILLISECONDS.sleep(sleep);
try {
Connection connection = dataSource.getConnection();
Statement statement = connection.createStatement();
String sql = "insert into " + dbName + ".t_1 values('2020-01-01 00:00:00.000',12.12,111)";
int affectRows = statement.executeUpdate(sql);
System.out.println("affectRows >>> " + affectRows);
affectRows = statement.executeUpdate(sql);
System.out.println("affectRows >>> " + affectRows);
statement.close();
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
// ExecutorService executor = Executors.newFixedThreadPool(threadCount);
// while (true) {
// executor.execute(new InsertTask(dataSource, dbName, tableSize, batchSize));
// if (sleep > 0)
// TimeUnit.MILLISECONDS.sleep(sleep);
// }
}
private static void init(DataSource dataSource) {

View File

@ -6,7 +6,7 @@ import org.springframework.boot.autoconfigure.SpringBootApplication;
@MapperScan(basePackages = {"com.taosdata.jdbc.springbootdemo.dao"})
@SpringBootApplication
public class SpringbootdemoApplication {
public class cd {
public static void main(String[] args) {
SpringApplication.run(SpringbootdemoApplication.class, args);

View File

@ -2,41 +2,87 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.4.0</version>
<relativePath/> <!-- lookup parent from repository -->
</parent>
<groupId>com.taosdata</groupId>
<artifactId>taosdemo</artifactId>
<version>2.0</version>
<name>taosdemo</name>
<packaging>jar</packaging>
<description>Demo project for TDengine</description>
<properties>
<java.version>1.8</java.version>
<spring.version>5.3.2</spring.version>
</properties>
<dependencies>
<!-- spring -->
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-core</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-beans</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-expression</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-aop</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-aspects</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
<version>${spring.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-jdbc</artifactId>
<version>${spring.version}</version>
</dependency>
<!-- HikariCP -->
<dependency>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP</artifactId>
<version>3.4.5</version>
</dependency>
<!-- taos jdbc -->
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.14</version>
<version>2.0.15</version>
<scope>system</scope>
<systemPath>${project.basedir}/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar</systemPath>
</dependency>
<!-- mysql -->
<!-- fastjson -->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.2.75</version>
</dependency>
<!-- mysql: just for test -->
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.47</version>
</dependency>
<!-- mybatis-plus -->
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>3.1.2</version>
<scope>test</scope>
</dependency>
<!-- log4j -->
<dependency>
@ -44,46 +90,19 @@
<artifactId>log4j</artifactId>
<version>1.2.17</version>
</dependency>
<!-- springboot -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-jdbc</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-thymeleaf</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.mybatis.spring.boot</groupId>
<artifactId>mybatis-spring-boot-starter</artifactId>
<version>2.1.4</version>
</dependency>
<!-- junit -->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-devtools</artifactId>
<scope>runtime</scope>
<optional>true</optional>
</dependency>
<!-- lombok -->
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
<version>1.18.16</version>
<scope>provided</scope>
</dependency>
</dependencies>
@ -94,6 +113,7 @@
<includes>
<include>**/*.properties</include>
<include>**/*.xml</include>
<include>**/*.jar</include>
</includes>
<filtering>true</filtering>
</resource>
@ -108,10 +128,41 @@
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>8</source>
<target>8</target>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.1.0</version>
<configuration>
<archive>
<manifest>
<!-- 指定JdbcChecker为mainClass -->
<mainClass>com.taosdata.taosdemo.TaosDemoApplication</mainClass>
</manifest>
</archive>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,3 @@
需求:
1. 可以读lowa的配置文件
2. 支持对JNI方式和Restful方式的taos-driver

View File

@ -0,0 +1,112 @@
package com.taosdata.taosdemo;
import com.taosdata.taosdemo.components.DataSourceFactory;
import com.taosdata.taosdemo.components.JdbcTaosdemoConfig;
import com.taosdata.taosdemo.domain.SuperTableMeta;
import com.taosdata.taosdemo.service.DatabaseService;
import com.taosdata.taosdemo.service.SubTableService;
import com.taosdata.taosdemo.service.SuperTableService;
import com.taosdata.taosdemo.service.data.SuperTableMetaGenerator;
import org.apache.log4j.Logger;
import javax.sql.DataSource;
import java.io.IOException;
import java.time.Duration;
import java.time.Instant;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
public class TaosDemoApplication {
private static Logger logger = Logger.getLogger(TaosDemoApplication.class);
public static void main(String[] args) throws IOException {
// 读配置参数
JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args);
boolean isHelp = Arrays.asList(args).contains("--help");
if (isHelp || config.host == null || config.host.isEmpty()) {
// if (isHelp) {
JdbcTaosdemoConfig.printHelp();
System.exit(0);
}
// 初始化
final DataSource dataSource = DataSourceFactory.getInstance(config.host, config.port, config.user, config.password);
final DatabaseService databaseService = new DatabaseService(dataSource);
final SuperTableService superTableService = new SuperTableService(dataSource);
final SubTableService subTableService = new SubTableService(dataSource);
// 创建数据库
long start = System.currentTimeMillis();
Map<String, String> databaseParam = new HashMap<>();
databaseParam.put("database", config.database);
databaseParam.put("keep", Integer.toString(config.keep));
databaseParam.put("days", Integer.toString(config.days));
databaseParam.put("replica", Integer.toString(config.replica));
//TODO: other database parameters
databaseService.createDatabase(databaseParam);
databaseService.useDatabase(config.database);
long end = System.currentTimeMillis();
logger.info(">>> create database time cost : " + (end - start) + " ms.");
/**********************************************************************************/
// 构造超级表的meta
SuperTableMeta superTableMeta;
// create super table
if (config.superTableSQL != null) {
// use a sql to create super table
superTableMeta = SuperTableMetaGenerator.generate(config.superTableSQL);
if (config.database != null && !config.database.isEmpty())
superTableMeta.setDatabase(config.database);
} else if (config.numOfFields == 0) {
String sql = "create table " + config.database + "." + config.superTable + " (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)";
superTableMeta = SuperTableMetaGenerator.generate(sql);
} else {
// create super table with specified field size and tag size
superTableMeta = SuperTableMetaGenerator.generate(config.database, config.superTable, config.numOfFields, config.prefixOfFields, config.numOfTags, config.prefixOfTags);
}
/**********************************************************************************/
// 建表
start = System.currentTimeMillis();
if (config.doCreateTable) {
superTableService.drop(superTableMeta.getDatabase(), superTableMeta.getName());
superTableService.create(superTableMeta);
if (!config.autoCreateTable) {
// 批量建子表
subTableService.createSubTable(superTableMeta, config.numOfTables, config.prefixOfTable, config.numOfThreadsForCreate);
}
}
end = System.currentTimeMillis();
logger.error(">>> create table time cost : " + (end - start) + " ms.");
/**********************************************************************************/
// 插入
long tableSize = config.numOfTables;
int threadSize = config.numOfThreadsForInsert;
long startTime = getProperStartTime(config.startTime, config.keep);
if (tableSize < threadSize)
threadSize = (int) tableSize;
long gap = (long) Math.ceil((0.0d + tableSize) / threadSize);
start = System.currentTimeMillis();
// multi threads to insert
int affectedRows = subTableService.insertMultiThreads(superTableMeta, threadSize, tableSize, startTime, gap, config);
end = System.currentTimeMillis();
logger.error("insert " + affectedRows + " rows, time cost: " + (end - start) + " ms");
/**********************************************************************************/
// 删除表
if (config.dropTable) {
superTableService.drop(config.database, config.superTable);
}
System.exit(0);
}
private static long getProperStartTime(long startTime, int keep) {
Instant now = Instant.now();
long earliest = now.minus(Duration.ofDays(keep - 1)).toEpochMilli();
if (startTime == 0 || startTime < earliest) {
startTime = earliest;
}
return startTime;
}
}

View File

@ -1,15 +0,0 @@
package com.taosdata.taosdemo;
import org.mybatis.spring.annotation.MapperScan;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@MapperScan(basePackages = {"com.taosdata.taosdemo.mapper"})
@SpringBootApplication
public class TaosdemoApplication {
public static void main(String[] args) {
SpringApplication.run(TaosdemoApplication.class, args);
}
}

View File

@ -0,0 +1,63 @@
package com.taosdata.taosdemo.components;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import org.springframework.stereotype.Component;
import javax.sql.DataSource;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
@Component
public class DataSourceFactory {
private static DataSource instance;
public static DataSource getInstance(String host, int port, String user, String password) throws IOException {
if (instance == null) {
synchronized (DataSourceFactory.class) {
if (instance == null) {
InputStream is = DataSourceFactory.class.getClassLoader().getResourceAsStream("application.properties");
Properties properties = new Properties();
properties.load(is);
HikariConfig config = new HikariConfig();
if (properties.containsKey("jdbc.driver")) {
// String driverName = properties.getProperty("jdbc.driver");
// System.out.println(">>> load driver : " + driverName);
// try {
// Class.forName(driverName);
// } catch (ClassNotFoundException e) {
// e.printStackTrace();
// }
config.setDriverClassName(properties.getProperty("jdbc.driver"));
} else {
config.setDriverClassName("com.taosdata.jdbc.TSDBDriver");
}
if ("com.taosdata.jdbc.rs.RestfulDriver".equalsIgnoreCase(properties.getProperty("jdbc.driver")))
config.setJdbcUrl("jdbc:TAOS-RS://" + host + ":6041/?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8");
else
config.setJdbcUrl("jdbc:TAOS://" + host + ":" + port + "/?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8");
config.setUsername(user);
config.setPassword(password);
// maximum-pool-size
if (properties.containsKey("hikari.maximum-pool-size"))
config.setMaximumPoolSize(Integer.parseInt(properties.getProperty("hikari.maximum-pool-size")));
else
config.setMaximumPoolSize(500);
// minimum-idle
if (properties.containsKey("hikari.minimum-idle"))
config.setMinimumIdle(Integer.parseInt(properties.getProperty("hikari.minimum-idle")));
else
config.setMinimumIdle(100);
config.setMaxLifetime(0);
instance = new HikariDataSource(config);
}
}
}
return instance;
}
}

View File

@ -1,4 +1,6 @@
package com.taosdata.taosdemo.utils;
package com.taosdata.taosdemo.components;
import com.taosdata.taosdemo.utils.TimeStampUtil;
public final class JdbcTaosdemoConfig {
// instance
@ -7,10 +9,14 @@ public final class JdbcTaosdemoConfig {
public String user = "root"; //user
public String password = "taosdata"; //password
// database
public String database = "test"; //database
public String database = "jdbcdb"; //database
public int keep = 3650; //keep
public int days = 30; //days
public int replica = 1; //replica
public int blocks = 16;
public int cache = 8;
public String precision = "ms";
//super table
public boolean doCreateTable = true;
public String superTable = "weather"; //super table name
@ -20,19 +26,19 @@ public final class JdbcTaosdemoConfig {
public int numOfTags;
public String superTableSQL;
//sub table
public String tablePrefix = "t";
public int numOfTables = 100;
public int numOfThreadsForCreate = 1;
public String prefixOfTable = "t";
// insert task
public boolean autoCreateTable;
public int numOfRowsPerTable = 100;
public long numOfTables = 10;
public long numOfRowsPerTable = 10;
public int numOfTablesPerSQL = 1;
public int numOfValuesPerSQL = 1;
public int numOfThreadsForCreate = 1;
public int numOfThreadsForInsert = 1;
public int numOfTablesPerSQL = 10;
public int numOfValuesPerSQL = 10;
public long startTime;
public long timeGap;
public int sleep = 0;
public int order = 0;
public long timeGap = 1;
public int frequency;
public int order;
public int rate = 10;
public long range = 1000l;
// select task
@ -48,10 +54,14 @@ public final class JdbcTaosdemoConfig {
System.out.println("-user The TDengine user name to use when connecting to the server. Default is 'root'");
System.out.println("-password The password to use when connecting to the server.Default is 'taosdata'");
// database
System.out.println("-database Destination database. Default is 'test'");
System.out.println("-database Destination database. Default is 'jdbcdb'");
System.out.println("-keep database keep parameter. Default is 3650");
System.out.println("-days database days parameter. Default is 30");
System.out.println("-replica database replica parameter. Default 1, min: 1, max: 3");
System.out.println("-blocks database blocks parameter. Default is 16");
System.out.println("-cache database cache parameter. Default is 8");
System.out.println("-precision database precision parameter. Default is ms");
// super table
System.out.println("-doCreateTable do create super table and sub table, true or false, Default true");
System.out.println("-superTable super table name. Default 'weather'");
@ -63,7 +73,7 @@ public final class JdbcTaosdemoConfig {
" Default is 'create table weather(ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int). \n" +
" if you use this parameter, the numOfFields and numOfTags will be invalid'");
// sub table
System.out.println("-tablePrefix The prefix of sub tables. Default is 't'");
System.out.println("-prefixOfTable The prefix of sub tables. Default is 't'");
System.out.println("-numOfTables The number of tables. Default is 1");
System.out.println("-numOfThreadsForCreate The number of thread during create sub table. Default is 1");
// insert task
@ -74,11 +84,10 @@ public final class JdbcTaosdemoConfig {
System.out.println("-numOfValuesPerSQL The number of value per SQL. Default is 1");
System.out.println("-startTime start time for insert task, The format is \"yyyy-MM-dd HH:mm:ss.SSS\".");
System.out.println("-timeGap the number of time gap. Default is 1000 ms");
System.out.println("-sleep The number of milliseconds for sleep after each insert. default is 0");
System.out.println("-frequency the number of records per second inserted into one table. default is 0, do not control frequency");
System.out.println("-order Insert mode--0: In order, 1: Out of order. Default is in order");
System.out.println("-rate The proportion of data out of order. effective only if order is 1. min 0, max 100, default is 10");
System.out.println("-range The range of data out of order. effective only if order is 1. default is 1000 ms");
// query task
// System.out.println("-sqlFile The select sql file");
// drop task
@ -120,6 +129,15 @@ public final class JdbcTaosdemoConfig {
if ("-replica".equals(args[i]) && i < args.length - 1) {
replica = Integer.parseInt(args[++i]);
}
if ("-blocks".equals(args[i]) && i < args.length - 1) {
blocks = Integer.parseInt(args[++i]);
}
if ("-cache".equals(args[i]) && i < args.length - 1) {
cache = Integer.parseInt(args[++i]);
}
if ("-precision".equals(args[i]) && i < args.length - 1) {
precision = args[++i];
}
// super table
if ("-doCreateTable".equals(args[i]) && i < args.length - 1) {
doCreateTable = Boolean.parseBoolean(args[++i]);
@ -143,11 +161,11 @@ public final class JdbcTaosdemoConfig {
superTableSQL = args[++i];
}
// sub table
if ("-tablePrefix".equals(args[i]) && i < args.length - 1) {
tablePrefix = args[++i];
if ("-prefixOfTable".equals(args[i]) && i < args.length - 1) {
prefixOfTable = args[++i];
}
if ("-numOfTables".equals(args[i]) && i < args.length - 1) {
numOfTables = Integer.parseInt(args[++i]);
numOfTables = Long.parseLong(args[++i]);
}
if ("-autoCreateTable".equals(args[i]) && i < args.length - 1) {
autoCreateTable = Boolean.parseBoolean(args[++i]);
@ -157,7 +175,7 @@ public final class JdbcTaosdemoConfig {
}
// insert task
if ("-numOfRowsPerTable".equals(args[i]) && i < args.length - 1) {
numOfRowsPerTable = Integer.parseInt(args[++i]);
numOfRowsPerTable = Long.parseLong(args[++i]);
}
if ("-numOfThreadsForInsert".equals(args[i]) && i < args.length - 1) {
numOfThreadsForInsert = Integer.parseInt(args[++i]);
@ -174,8 +192,8 @@ public final class JdbcTaosdemoConfig {
if ("-timeGap".equals(args[i]) && i < args.length - 1) {
timeGap = Long.parseLong(args[++i]);
}
if ("-sleep".equals(args[i]) && i < args.length - 1) {
sleep = Integer.parseInt(args[++i]);
if ("-frequency".equals(args[i]) && i < args.length - 1) {
frequency = Integer.parseInt(args[++i]);
}
if ("-order".equals(args[i]) && i < args.length - 1) {
order = Integer.parseInt(args[++i]);
@ -197,8 +215,4 @@ public final class JdbcTaosdemoConfig {
}
}
public static void main(String[] args) {
JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args);
}
}

View File

@ -0,0 +1,39 @@
package com.taosdata.taosdemo.components;
import com.alibaba.fastjson.JSONObject;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
public class JsonConfig {
public static void main(String[] args) {
JsonConfig config = new JsonConfig();
String str = config.read("insert.json");
JSONObject jsonObject = JSONObject.parseObject(str);
System.out.println(jsonObject);
}
private String read(String fileName) {
try {
BufferedReader reader = new BufferedReader(
new InputStreamReader(JsonConfig.class.getClassLoader().getResourceAsStream(fileName))
);
StringBuilder sb = new StringBuilder();
String line = null;
while ((line = reader.readLine()) != null) {
sb.append(line);
}
return sb.toString();
} catch (IOException e) {
e.printStackTrace();
}
return fileName;
}
}

View File

@ -1,174 +0,0 @@
package com.taosdata.taosdemo.components;
import com.taosdata.taosdemo.domain.*;
import com.taosdata.taosdemo.service.DatabaseService;
import com.taosdata.taosdemo.service.SubTableService;
import com.taosdata.taosdemo.service.SuperTableService;
import com.taosdata.taosdemo.service.data.SubTableMetaGenerator;
import com.taosdata.taosdemo.service.data.SubTableValueGenerator;
import com.taosdata.taosdemo.service.data.SuperTableMetaGenerator;
import com.taosdata.taosdemo.utils.JdbcTaosdemoConfig;
import org.apache.log4j.Logger;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.CommandLineRunner;
import org.springframework.stereotype.Component;
import java.util.*;
import java.util.concurrent.TimeUnit;
@Component
public class TaosDemoCommandLineRunner implements CommandLineRunner {
private static Logger logger = Logger.getLogger(TaosDemoCommandLineRunner.class);
@Autowired
private DatabaseService databaseService;
@Autowired
private SuperTableService superTableService;
@Autowired
private SubTableService subTableService;
private SuperTableMeta superTableMeta;
private List<SubTableMeta> subTableMetaList;
private List<SubTableValue> subTableValueList;
private List<List<SubTableValue>> dataList;
@Override
public void run(String... args) throws Exception {
// 读配置参数
JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args);
boolean isHelp = Arrays.asList(args).contains("--help");
if (isHelp) {
JdbcTaosdemoConfig.printHelp();
System.exit(0);
}
// 准备数据
prepareData(config);
// 创建数据库
createDatabaseTask(config);
// 建表
createTableTask(config);
// 插入
insertTask(config);
// 查询: 1. 生成查询语句, 2. 执行查询
// 删除表
if (config.dropTable) {
superTableService.drop(config.database, config.superTable);
}
System.exit(0);
}
private void createDatabaseTask(JdbcTaosdemoConfig config) {
long start = System.currentTimeMillis();
Map<String, String> databaseParam = new HashMap<>();
databaseParam.put("database", config.database);
databaseParam.put("keep", Integer.toString(config.keep));
databaseParam.put("days", Integer.toString(config.days));
databaseParam.put("replica", Integer.toString(config.replica));
//TODO: other database parameters
databaseService.dropDatabase(config.database);
databaseService.createDatabase(databaseParam);
databaseService.useDatabase(config.database);
long end = System.currentTimeMillis();
logger.info(">>> insert time cost : " + (end - start) + " ms.");
}
// 建超级表三种方式1. 指定SQL2. 指定field和tags的个数3. 默认
private void createTableTask(JdbcTaosdemoConfig config) {
long start = System.currentTimeMillis();
if (config.doCreateTable) {
superTableService.create(superTableMeta);
// 批量建子表
subTableService.createSubTable(subTableMetaList, config.numOfThreadsForCreate);
}
long end = System.currentTimeMillis();
logger.info(">>> create table time cost : " + (end - start) + " ms.");
}
private void insertTask(JdbcTaosdemoConfig config) {
long start = System.currentTimeMillis();
int numOfThreadsForInsert = config.numOfThreadsForInsert;
int sleep = config.sleep;
if (config.autoCreateTable) {
// 批量插入自动建表
dataList.stream().forEach(subTableValues -> {
subTableService.insertAutoCreateTable(subTableValues, numOfThreadsForInsert);
sleep(sleep);
});
} else {
dataList.stream().forEach(subTableValues -> {
subTableService.insert(subTableValues, numOfThreadsForInsert);
sleep(sleep);
});
}
long end = System.currentTimeMillis();
logger.info(">>> insert time cost : " + (end - start) + " ms.");
}
private void prepareData(JdbcTaosdemoConfig config) {
long start = System.currentTimeMillis();
// 超级表的meta
superTableMeta = createSupertable(config);
// 子表的meta
subTableMetaList = SubTableMetaGenerator.generate(superTableMeta, config.numOfTables, config.tablePrefix);
// 子表的data
subTableValueList = SubTableValueGenerator.generate(subTableMetaList, config.numOfRowsPerTable, config.startTime, config.timeGap);
// 如果有乱序给数据搞乱
if (config.order != 0) {
SubTableValueGenerator.disrupt(subTableValueList, config.rate, config.range);
}
// 分割数据
int numOfTables = config.numOfTables;
int numOfTablesPerSQL = config.numOfTablesPerSQL;
int numOfRowsPerTable = config.numOfRowsPerTable;
int numOfValuesPerSQL = config.numOfValuesPerSQL;
dataList = SubTableValueGenerator.split(subTableValueList, numOfTables, numOfTablesPerSQL, numOfRowsPerTable, numOfValuesPerSQL);
long end = System.currentTimeMillis();
logger.info(">>> prepare data time cost : " + (end - start) + " ms.");
}
private SuperTableMeta createSupertable(JdbcTaosdemoConfig config) {
SuperTableMeta tableMeta;
// create super table
logger.info(">>> create super table <<<");
if (config.superTableSQL != null) {
// use a sql to create super table
tableMeta = SuperTableMetaGenerator.generate(config.superTableSQL);
} else if (config.numOfFields == 0) {
// default sql = "create table test.weather (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)";
SuperTableMeta superTableMeta = new SuperTableMeta();
superTableMeta.setDatabase(config.database);
superTableMeta.setName(config.superTable);
List<FieldMeta> fields = new ArrayList<>();
fields.add(new FieldMeta("ts", "timestamp"));
fields.add(new FieldMeta("temperature", "float"));
fields.add(new FieldMeta("humidity", "int"));
superTableMeta.setFields(fields);
List<TagMeta> tags = new ArrayList<>();
tags.add(new TagMeta("location", "nchar(64)"));
tags.add(new TagMeta("groupId", "int"));
superTableMeta.setTags(tags);
return superTableMeta;
} else {
// create super table with specified field size and tag size
tableMeta = SuperTableMetaGenerator.generate(config.database, config.superTable, config.numOfFields, config.prefixOfFields, config.numOfTags, config.prefixOfTags);
}
return tableMeta;
}
private static void sleep(int sleep) {
if (sleep <= 0)
return;
try {
TimeUnit.MILLISECONDS.sleep(sleep);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}

View File

@ -1,40 +0,0 @@
package com.taosdata.taosdemo.controller;
import com.taosdata.taosdemo.service.DatabaseService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import java.util.Map;
@RestController
@RequestMapping
public class DatabaseController {
@Autowired
private DatabaseService databaseService;
/**
* create database
***/
@PostMapping
public int create(@RequestBody Map<String, String> map) {
return databaseService.createDatabase(map);
}
/**
* drop database
**/
@DeleteMapping("/{dbname}")
public int delete(@PathVariable("dbname") String dbname) {
return databaseService.dropDatabase(dbname);
}
/**
* use database
**/
@GetMapping("/{dbname}")
public int use(@PathVariable("dbname") String dbname) {
return databaseService.useDatabase(dbname);
}
}

View File

@ -1,17 +0,0 @@
package com.taosdata.taosdemo.controller;
import org.springframework.web.bind.annotation.RestController;
@RestController
public class InsertController {
//TODO多线程写一张表, thread = 10, table = 1
//TODO一个批次写多张表, insert into t1 using weather values() t2 using weather values()
//TODO插入的频率
//TODO指定一张表内的records数量
//TODO是否乱序
//TODO乱序的比例乱序的范围
//TODO先建表自动建表
//TODO一个批次写多张表
}

View File

@ -1,45 +0,0 @@
package com.taosdata.taosdemo.controller;
import com.taosdata.taosdemo.domain.TableValue;
import com.taosdata.taosdemo.service.SuperTableService;
import com.taosdata.taosdemo.service.TableService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RestController;
@RestController
public class SubTableController {
@Autowired
private TableService tableService;
@Autowired
private SuperTableService superTableService;
//TODO: 使用supertable创建一个子表
//TODO使用supertable创建多个子表
//TODO使用supertable多线程创建子表
//TODO使用supertable多线程创建子表指定子表的name_prefix子表的数量使用线程的个数
/**
* 创建表超级表或者普通表
**/
/**
* 创建超级表的子表
**/
@PostMapping("/{database}/{superTable}")
public int createTable(@PathVariable("database") String database,
@PathVariable("superTable") String superTable,
@RequestBody TableValue tableMetadta) {
tableMetadta.setDatabase(database);
return 0;
}
}

View File

@ -1,26 +0,0 @@
package com.taosdata.taosdemo.controller;
import com.taosdata.taosdemo.domain.SuperTableMeta;
import com.taosdata.taosdemo.service.SuperTableService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
public class SuperTableController {
@Autowired
private SuperTableService superTableService;
@PostMapping("/{database}")
public int createTable(@PathVariable("database") String database, @RequestBody SuperTableMeta tableMetadta) {
tableMetadta.setDatabase(database);
return superTableService.create(tableMetadta);
}
//TODO: 删除超级表
//TODO查询超级表
//TODO统计查询表
}

View File

@ -1,11 +0,0 @@
package com.taosdata.taosdemo.controller;
public class TableController {
//TODO创建普通表create table(ts timestamp, temperature float)
//TODO创建普通表指定表的列数包括第一列timestamp
//TODO创建普通表指定表每列的name和type
}

View File

@ -0,0 +1,23 @@
package com.taosdata.taosdemo.dao;
import java.util.Map;
public interface DatabaseMapper {
// create database if not exists XXX
void createDatabase(String dbname);
// drop database if exists XXX
void dropDatabase(String dbname);
// create database if not exists XXX keep XX days XX replica XX
void createDatabaseWithParameters(Map<String, String> map);
// use XXX
void useDatabase(String dbname);
//TODO: alter database
//TODO: show database
}

View File

@ -0,0 +1,47 @@
package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.utils.SqlSpeller;
import org.apache.log4j.Logger;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
import java.util.Map;
public class DatabaseMapperImpl implements DatabaseMapper {
private static final Logger logger = Logger.getLogger(DatabaseMapperImpl.class);
private final JdbcTemplate jdbcTemplate;
public DatabaseMapperImpl(DataSource dataSource) {
this.jdbcTemplate = new JdbcTemplate(dataSource);
}
@Override
public void createDatabase(String dbname) {
String sql = "create database if not exists " + dbname;
jdbcTemplate.execute(sql);
logger.info("SQL >>> " + sql);
}
@Override
public void dropDatabase(String dbname) {
String sql = "drop database if exists " + dbname;
jdbcTemplate.update(sql);
logger.info("SQL >>> " + sql);
}
@Override
public void createDatabaseWithParameters(Map<String, String> map) {
String sql = SqlSpeller.createDatabase(map);
jdbcTemplate.execute(sql);
logger.info("SQL >>> " + sql);
}
@Override
public void useDatabase(String dbname) {
String sql = "use " + dbname;
jdbcTemplate.execute(sql);
logger.info("SQL >>> " + sql);
}
}

View File

@ -1,8 +1,7 @@
package com.taosdata.taosdemo.mapper;
package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.domain.SubTableMeta;
import com.taosdata.taosdemo.domain.SubTableValue;
import org.apache.ibatis.annotations.Param;
import org.springframework.stereotype.Repository;
import java.util.List;
@ -11,7 +10,7 @@ import java.util.List;
public interface SubTableMapper {
// 创建子表
int createUsingSuperTable(SubTableMeta subTableMeta);
void createUsingSuperTable(SubTableMeta subTableMeta);
// 插入一张子表多个values
int insertOneTableMultiValues(SubTableValue subTableValue);
@ -20,10 +19,10 @@ public interface SubTableMapper {
int insertOneTableMultiValuesUsingSuperTable(SubTableValue subTableValue);
// 插入多张表多个values
int insertMultiTableMultiValues(@Param("tables") List<SubTableValue> tables);
int insertMultiTableMultiValues(List<SubTableValue> tables);
// 插入多张表多个values自动建表
int insertMultiTableMultiValuesUsingSuperTable(@Param("tables") List<SubTableValue> tables);
int insertMultiTableMultiValuesUsingSuperTable(List<SubTableValue> tables);
//<!-- TODO:修改子表标签值 alter table ${tablename} set tag tagName=newTagValue-->

View File

@ -0,0 +1,81 @@
package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.domain.SubTableMeta;
import com.taosdata.taosdemo.domain.SubTableValue;
import com.taosdata.taosdemo.utils.SqlSpeller;
import org.apache.log4j.Logger;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
import java.util.List;
public class SubTableMapperImpl implements SubTableMapper {
private static final Logger logger = Logger.getLogger(SubTableMapperImpl.class);
private final JdbcTemplate jdbcTemplate;
public SubTableMapperImpl(DataSource dataSource) {
this.jdbcTemplate = new JdbcTemplate(dataSource);
}
@Override
public void createUsingSuperTable(SubTableMeta subTableMeta) {
String sql = SqlSpeller.createTableUsingSuperTable(subTableMeta);
logger.info("SQL >>> " + sql);
jdbcTemplate.execute(sql);
}
@Override
public int insertOneTableMultiValues(SubTableValue subTableValue) {
String sql = SqlSpeller.insertOneTableMultiValues(subTableValue);
logger.info("SQL >>> " + sql);
int affectRows = 0;
try {
affectRows = jdbcTemplate.update(sql);
} catch (Exception e) {
e.printStackTrace();
}
return affectRows;
}
@Override
public int insertOneTableMultiValuesUsingSuperTable(SubTableValue subTableValue) {
String sql = SqlSpeller.insertOneTableMultiValuesUsingSuperTable(subTableValue);
logger.info("SQL >>> " + sql);
int affectRows = 0;
try {
affectRows = jdbcTemplate.update(sql);
} catch (Exception e) {
e.printStackTrace();
}
return affectRows;
}
@Override
public int insertMultiTableMultiValues(List<SubTableValue> tables) {
String sql = SqlSpeller.insertMultiSubTableMultiValues(tables);
logger.info("SQL >>> " + sql);
int affectRows = 0;
try {
affectRows = jdbcTemplate.update(sql);
} catch (Exception e) {
e.printStackTrace();
}
return affectRows;
}
@Override
public int insertMultiTableMultiValuesUsingSuperTable(List<SubTableValue> tables) {
String sql = SqlSpeller.insertMultiTableMultiValuesUsingSuperTable(tables);
logger.info("SQL >>> " + sql);
int affectRows = 0;
try {
affectRows = jdbcTemplate.update(sql);
} catch (Exception e) {
e.printStackTrace();
}
return affectRows;
}
}

View File

@ -1,20 +1,16 @@
package com.taosdata.taosdemo.mapper;
package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.domain.SuperTableMeta;
import org.apache.ibatis.annotations.Param;
import org.springframework.stereotype.Repository;
@Repository
public interface SuperTableMapper {
// 创建超级表使用自己定义的SQL语句
int createSuperTableUsingSQL(@Param("createSuperTableSQL") String sql);
// 创建超级表 create table if not exists xxx.xxx (f1 type1, f2 type2, ... ) tags( t1 type1, t2 type2 ...)
int createSuperTable(SuperTableMeta tableMetadata);
void createSuperTable(SuperTableMeta tableMetadata);
// 删除超级表 drop table if exists xxx;
int dropSuperTable(@Param("database") String database, @Param("name") String name);
void dropSuperTable(String database, String name);
//<!-- TODO:查询所有超级表信息 show stables -->

View File

@ -0,0 +1,31 @@
package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.domain.SuperTableMeta;
import com.taosdata.taosdemo.utils.SqlSpeller;
import org.apache.log4j.Logger;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
public class SuperTableMapperImpl implements SuperTableMapper {
private static final Logger logger = Logger.getLogger(SuperTableMapperImpl.class);
private JdbcTemplate jdbcTemplate;
public SuperTableMapperImpl(DataSource dataSource) {
this.jdbcTemplate = new JdbcTemplate(dataSource);
}
@Override
public void createSuperTable(SuperTableMeta tableMetadata) {
String sql = SqlSpeller.createSuperTable(tableMetadata);
logger.info("SQL >>> " + sql);
jdbcTemplate.execute(sql);
}
@Override
public void dropSuperTable(String database, String name) {
String sql = "drop table if exists " + database + "." + name;
logger.info("SQL >>> " + sql);
jdbcTemplate.execute(sql);
}
}

View File

@ -1,8 +1,7 @@
package com.taosdata.taosdemo.mapper;
package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.domain.TableMeta;
import com.taosdata.taosdemo.domain.TableValue;
import org.apache.ibatis.annotations.Param;
import org.springframework.stereotype.Repository;
import java.util.List;
@ -11,7 +10,7 @@ import java.util.List;
public interface TableMapper {
// 创建普通表
int create(TableMeta tableMeta);
void create(TableMeta tableMeta);
// 插入一张表多个value
int insertOneTableMultiValues(TableValue values);
@ -20,9 +19,9 @@ public interface TableMapper {
int insertOneTableMultiValuesWithColumns(TableValue values);
// 插入多个表多个value
int insertMultiTableMultiValues(@Param("tables") List<TableValue> tables);
int insertMultiTableMultiValues(List<TableValue> tables);
// 插入多个表多个value, 指定的列
int insertMultiTableMultiValuesWithColumns(@Param("tables") List<TableValue> tables);
int insertMultiTableMultiValuesWithColumns(List<TableValue> tables);
}

View File

@ -0,0 +1,43 @@
package com.taosdata.taosdemo.dao;
import com.taosdata.taosdemo.dao.TableMapper;
import com.taosdata.taosdemo.domain.TableMeta;
import com.taosdata.taosdemo.domain.TableValue;
import com.taosdata.taosdemo.utils.SqlSpeller;
import org.springframework.jdbc.core.JdbcTemplate;
import java.util.List;
public class TableMapperImpl implements TableMapper {
private JdbcTemplate template;
@Override
public void create(TableMeta tableMeta) {
String sql = SqlSpeller.createTable(tableMeta);
template.execute(sql);
}
@Override
public int insertOneTableMultiValues(TableValue values) {
String sql = SqlSpeller.insertOneTableMultiValues(values);
return template.update(sql);
}
@Override
public int insertOneTableMultiValuesWithColumns(TableValue values) {
String sql = SqlSpeller.insertOneTableMultiValuesWithColumns(values);
return template.update(sql);
}
@Override
public int insertMultiTableMultiValues(List<TableValue> tables) {
String sql = SqlSpeller.insertMultiTableMultiValues(tables);
return template.update(sql);
}
@Override
public int insertMultiTableMultiValuesWithColumns(List<TableValue> tables) {
String sql = SqlSpeller.insertMultiTableMultiValuesWithColumns(tables);
return template.update(sql);
}
}

View File

@ -8,7 +8,6 @@ import java.util.List;
public class RowValue {
private List<FieldValue> fields;
public RowValue(List<FieldValue> fields) {
this.fields = fields;
}

View File

@ -1,27 +0,0 @@
package com.taosdata.taosdemo.mapper;
import org.apache.ibatis.annotations.Param;
import org.springframework.stereotype.Repository;
import java.util.Map;
@Repository
public interface DatabaseMapper {
// create database if not exists XXX
int createDatabase(@Param("database") String dbname);
// drop database if exists XXX
int dropDatabase(@Param("database") String dbname);
// create database if not exists XXX keep XX days XX replica XX
int createDatabaseWithParameters(Map<String, String> map);
// use XXX
int useDatabase(@Param("database") String dbname);
//TODO: alter database
//TODO: show database
}

View File

@ -1,48 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.taosdata.taosdemo.mapper.DatabaseMapper">
<!-- create database XXX -->
<update id="createDatabase" parameterType="java.lang.String">
create database if not exists ${database}
</update>
<update id="dropDatabase" parameterType="java.lang.String">
DROP database if exists ${database}
</update>
<update id="createDatabaseWithParameters" parameterType="map">
CREATE database if not exists ${database}
<if test="keep != null">
KEEP ${keep}
</if>
<if test="days != null">
DAYS ${days}
</if>
<if test="replica != null">
REPLICA ${replica}
</if>
<if test="cache != null">
cache ${cache}
</if>
<if test="blocks != null">
blocks ${blocks}
</if>
<if test="minrows != null">
minrows ${minrows}
</if>
<if test="maxrows != null">
maxrows ${maxrows}
</if>
</update>
<update id="useDatabase" parameterType="java.lang.String">
use ${database}
</update>
<!-- TODO: alter database -->
<!-- TODO: show database -->
</mapper>

View File

@ -1,81 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.taosdata.taosdemo.mapper.SubTableMapper">
<!-- 创建子表 -->
<update id="createUsingSuperTable">
CREATE table IF NOT EXISTS ${database}.${name} USING ${supertable} TAGS
<foreach collection="tags" item="tag" index="index" open="(" close=")" separator=",">
#{tag.value}
</foreach>
</update>
<!-- 插入:向一张表中插入多张表 -->
<insert id="insertOneTableMultiValues">
INSERT INTO ${database}.${name}
VALUES
<foreach collection="values" item="value">
<foreach collection="value.fields" item="field" open="(" close=")" separator=",">
#{field.value}
</foreach>
</foreach>
</insert>
<!-- 插入:使用自动建表模式,向一张表中插入多条数据 -->
<insert id="insertOneTableMultiValuesUsingSuperTable">
INSERT INTO ${database}.${name} USING ${supertable} TAGS
<foreach collection="tags" item="tag" index="index" open="(" close=")" separator=",">
#{tag.value}
</foreach>
VALUES
<foreach collection="values" item="value">
<foreach collection="value.fields" item="field" open="(" close=")" separator=",">
#{field.value}
</foreach>
</foreach>
</insert>
<!-- TODO:插入:向一张表中插入多张表, 指定列 -->
<!-- TODO:插入:向一张表中插入多张表, 自动建表,指定列 -->
<!-- 插入:向多张表中插入多条数据 -->
<insert id="insertMultiTableMultiValues">
INSERT INTO
<foreach collection="tables" item="table">
${table.database}.${table.name}
VALUES
<foreach collection="table.values" item="value">
<foreach collection="value.fields" item="field" open="(" close=")" separator=",">
#{field.value}
</foreach>
</foreach>
</foreach>
</insert>
<!-- 插入:向多张表中插入多条数据,自动建表 -->
<insert id="insertMultiTableMultiValuesUsingSuperTable">
INSERT INTO
<foreach collection="tables" item="table">
${table.database}.${table.name} USING ${table.supertable} TAGS
<foreach collection="table.tags" item="tag" index="index" open="(" close=")" separator=",">
#{tag.value}
</foreach>
VALUES
<foreach collection="table.values" item="value">
<foreach collection="value.fields" item="field" open="(" close=")" separator=",">
#{field.value}
</foreach>
</foreach>
</foreach>
</insert>
<!-- TODO:插入:向多张表中插入多张表, 指定列 -->
<!-- TODO:插入:向多张表中插入多张表, 自动建表,指定列 -->
<!-- TODO:修改子表标签值 alter table ${tablename} set tag tagName=newTagValue -->
<!-- TODO: -->
</mapper>

View File

@ -1,41 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.taosdata.taosdemo.mapper.SuperTableMapper">
<update id="createSuperTableUsingSQL">
${createSuperTableSQL}
</update>
<!-- 创建超级表 -->
<update id="createSuperTable">
create table if not exists ${database}.${name}
<foreach collection="fields" item="field" index="index" open="(" close=")" separator=",">
${field.name} ${field.type}
</foreach>
tags
<foreach collection="tags" item="tag" index="index" open="(" close=")" separator=",">
${tag.name} ${tag.type}
</foreach>
</update>
<!-- 删除超级表drop super table -->
<delete id="dropSuperTable">
drop table if exists ${database}.${name}
</delete>
<!-- TODO:查询所有超级表信息 show stables -->
<!-- TODO:查询表结构 describe stable -->
<!-- TODO:增加列 alter table ${tablename} add column fieldName dataType -->
<!-- TODO:删除列 alter table ${tablename} drop column fieldName -->
<!-- TODO:添加标签 alter table ${tablename} add tag new_tagName tag_type -->
<!-- TODO:删除标签 alter table ${tablename} drop tag_name -->
<!-- TODO:修改标签名 alter table ${tablename} change tag old_tagName new_tagName -->
</mapper>

View File

@ -1,68 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.taosdata.taosdemo.mapper.TableMapper">
<!-- 创建普通表 -->
<update id="create" parameterType="com.taosdata.taosdemo.domain.TableMeta">
create table if not exists ${database}.${name}
<foreach collection="fields" item="field" index="index" open="(" close=")" separator=",">
${field.name} ${field.type}
</foreach>
</update>
<!-- 插入:向一张普通表中插入多条数据 -->
<insert id="insertOneTableMultiValues" parameterType="com.taosdata.taosdemo.domain.TableValue">
insert into ${database}.${name} values
<foreach collection="values" item="value">
<foreach collection="value.fields" item="field" open="(" close=")" separator=",">
${field.value}
</foreach>
</foreach>
</insert>
<!-- 向一张表中插入指定列的数据 insert into XXX.xx (f1,f2,f3...) values(v1,v2,v3...) -->
<insert id="insertOneTableMultiValuesWithColumns" parameterType="com.taosdata.taosdemo.domain.TableValue">
insert into ${database}.${name}
<foreach collection="columns" item="column" open="(" close=")" separator=",">
${column.name}
</foreach>
values
<foreach collection="values" item="value">
<foreach collection="value.fields" item="field" open="(" close=")" separator=",">
${field.value}
</foreach>
</foreach>
</insert>
<!-- 向多个表中插入多条数据 -->
<insert id="insertMultiTableMultiValues">
insert into
<foreach collection="tables" item="table">
${table.database}.${table.name} values
<foreach collection="table.values" item="value">
<foreach collection="value.fields" item="field" open="(" close=")" separator=",">
${field.value}
</foreach>
</foreach>
</foreach>
</insert>
<!-- 向多张表中指定的列插入多条数据 -->
<insert id="insertMultiTableMultiValuesWithColumns">
insert into
<foreach collection="tables" item="table">
${table.database}.${table.name}
<foreach collection="table.columns" item="column" open="(" close=")" separator=",">
${column.name}
</foreach>
values
<foreach collection="table.values" item="value">
<foreach collection="value.fields" item="field" open="(" close=")" separator=",">
${field.value}
</foreach>
</foreach>
</foreach>
</insert>
</mapper>

View File

@ -1,38 +1,42 @@
package com.taosdata.taosdemo.service;
import com.taosdata.taosdemo.mapper.DatabaseMapper;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.taosdata.taosdemo.dao.DatabaseMapper;
import com.taosdata.taosdemo.dao.DatabaseMapperImpl;
import javax.sql.DataSource;
import java.util.Map;
@Service
public class DatabaseService {
@Autowired
private DatabaseMapper databaseMapper;
private final DatabaseMapper databaseMapper;
public DatabaseService(DataSource dataSource) {
this.databaseMapper = new DatabaseMapperImpl(dataSource);
}
// 建库指定 name
public int createDatabase(String database) {
return databaseMapper.createDatabase(database);
public void createDatabase(String database) {
databaseMapper.createDatabase(database);
}
// 建库指定参数 keep,days,replica等
public int createDatabase(Map<String, String> map) {
public void createDatabase(Map<String, String> map) {
if (map.isEmpty())
return 0;
if (map.containsKey("database") && map.size() == 1)
return databaseMapper.createDatabase(map.get("database"));
return databaseMapper.createDatabaseWithParameters(map);
return;
if (map.containsKey("database") && map.size() == 1) {
createDatabase(map.get("database"));
return;
}
databaseMapper.createDatabaseWithParameters(map);
}
// drop database
public int dropDatabase(String dbname) {
return databaseMapper.dropDatabase(dbname);
public void dropDatabase(String dbname) {
databaseMapper.dropDatabase(dbname);
}
// use database
public int useDatabase(String dbname) {
return databaseMapper.useDatabase(dbname);
public void useDatabase(String dbname) {
databaseMapper.useDatabase(dbname);
}
}

View File

@ -1,68 +1,63 @@
package com.taosdata.taosdemo.service;
import com.taosdata.taosdemo.components.JdbcTaosdemoConfig;
import com.taosdata.taosdemo.dao.SubTableMapper;
import com.taosdata.taosdemo.dao.SubTableMapperImpl;
import com.taosdata.taosdemo.domain.SubTableMeta;
import com.taosdata.taosdemo.domain.SubTableValue;
import com.taosdata.taosdemo.mapper.SubTableMapper;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.taosdata.taosdemo.domain.SuperTableMeta;
import com.taosdata.taosdemo.service.data.SubTableMetaGenerator;
import com.taosdata.taosdemo.service.data.SubTableValueGenerator;
import org.apache.log4j.Logger;
import javax.sql.DataSource;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.*;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
@Service
public class SubTableService extends AbstractService {
@Autowired
private SubTableMapper mapper;
private static final Logger logger = Logger.getLogger(SubTableService.class);
/**
* 1. 选择database找到所有supertable
* 2. 选择supertable可以拿到表结构包括field和tag
* 3. 指定子表的前缀和个数
* 4. 指定创建子表的线程数
*/
//TODO指定databasesupertable子表前缀子表个数线程数
public SubTableService(DataSource datasource) {
this.mapper = new SubTableMapperImpl(datasource);
}
// 多线程创建表指定线程个数
public int createSubTable(List<SubTableMeta> subTables, int threadSize) {
ExecutorService executor = Executors.newFixedThreadPool(threadSize);
List<Future<Integer>> futureList = new ArrayList<>();
for (SubTableMeta subTableMeta : subTables) {
Future<Integer> future = executor.submit(() -> createSubTable(subTableMeta));
futureList.add(future);
public void createSubTable(SuperTableMeta superTableMeta, long numOfTables, String prefixOfTable, int numOfThreadsForCreate) {
ExecutorService executor = Executors.newFixedThreadPool(numOfThreadsForCreate);
for (long i = 0; i < numOfTables; i++) {
long tableIndex = i;
executor.execute(() -> createSubTable(superTableMeta, prefixOfTable + (tableIndex + 1)));
}
executor.shutdown();
return getAffectRows(futureList);
try {
executor.awaitTermination(Long.MAX_VALUE,TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void createSubTable(SuperTableMeta superTableMeta, String tableName) {
// 构造数据
SubTableMeta meta = SubTableMetaGenerator.generate(superTableMeta, tableName);
createSubTable(meta);
}
// 创建一张子表可以指定databasesupertabletablenametag值
public int createSubTable(SubTableMeta subTableMeta) {
return mapper.createUsingSuperTable(subTableMeta);
}
// 单线程创建多张子表每张子表分别可以指定自己的databasesupertabletablenametag值
public int createSubTable(List<SubTableMeta> subTables) {
return createSubTable(subTables, 1);
public void createSubTable(SubTableMeta subTableMeta) {
mapper.createUsingSuperTable(subTableMeta);
}
/*************************************************************************************************************************/
// 插入多线程多表
public int insert(List<SubTableValue> subTableValues, int threadSize) {
public int insert(List<SubTableValue> subTableValues, int threadSize, int frequency) {
ExecutorService executor = Executors.newFixedThreadPool(threadSize);
Future<Integer> future = executor.submit(() -> insert(subTableValues));
executor.shutdown();
return getAffectRows(future);
}
// 插入多线程多表, 自动建表
public int insertAutoCreateTable(List<SubTableValue> subTableValues, int threadSize) {
ExecutorService executor = Executors.newFixedThreadPool(threadSize);
Future<Integer> future = executor.submit(() -> insertAutoCreateTable(subTableValues));
executor.shutdown();
//TODOfrequency
return getAffectRows(future);
}
@ -73,7 +68,7 @@ public class SubTableService extends AbstractService {
// 插入: 多表insert into xxx values(),()... xxx values(),()...
public int insert(List<SubTableValue> subTableValues) {
return mapper.insertMultiTableMultiValuesUsingSuperTable(subTableValues);
return mapper.insertMultiTableMultiValues(subTableValues);
}
// 插入单表自动建表, insert into xxx using xxx tags(...) values(),()...
@ -86,33 +81,128 @@ public class SubTableService extends AbstractService {
return mapper.insertMultiTableMultiValuesUsingSuperTable(subTableValues);
}
public int insertMultiThreads(SuperTableMeta superTableMeta, int threadSize, long tableSize, long startTime, long gap, JdbcTaosdemoConfig config) {
List<FutureTask> taskList = new ArrayList<>();
List<Thread> threads = IntStream.range(0, threadSize)
.mapToObj(i -> {
long startInd = i * gap;
long endInd = (i + 1) * gap < tableSize ? (i + 1) * gap : tableSize;
FutureTask<Integer> task = new FutureTask<>(
new InsertTask(superTableMeta,
startInd, endInd,
startTime, config.timeGap,
config.numOfRowsPerTable, config.numOfTablesPerSQL, config.numOfValuesPerSQL,
config.order, config.rate, config.range,
config.prefixOfTable, config.autoCreateTable)
);
taskList.add(task);
return new Thread(task, "InsertThread-" + i);
}).collect(Collectors.toList());
// ExecutorService executors = Executors.newFixedThreadPool(threadSize);
// int count = 0;
//
// //
// List<SubTableValue> subTableValues = new ArrayList<>();
// for (int tableIndex = 1; tableIndex <= numOfTablesPerSQL; tableIndex++) {
// // each table
// SubTableValue subTableValue = new SubTableValue();
// subTableValue.setDatabase();
// subTableValue.setName();
// subTableValue.setSupertable();
//
// List<RowValue> values = new ArrayList<>();
// for (int valueCnt = 0; valueCnt < numOfValuesPerSQL; valueCnt++) {
// List<FieldValue> fields = new ArrayList<>();
// for (int fieldInd = 0; fieldInd <; fieldInd++) {
// FieldValue<Object> field = new FieldValue<>("", "");
// fields.add(field);
// }
// RowValue row = new RowValue();
// row.setFields(fields);
// values.add(row);
// }
// subTableValue.setValues(values);
// subTableValues.add(subTableValue);
// }
threads.stream().forEach(Thread::start);
for (Thread thread : threads) {
try {
thread.join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
int affectedRows = 0;
for (FutureTask<Integer> task : taskList) {
try {
affectedRows += task.get();
} catch (InterruptedException e) {
e.printStackTrace();
} catch (ExecutionException e) {
e.printStackTrace();
}
}
return affectedRows;
}
private class InsertTask implements Callable<Integer> {
private final long startTableInd; // included
private final long endTableInd; // excluded
private final long startTime;
private final long timeGap;
private final long numOfRowsPerTable;
private long numOfTablesPerSQL;
private long numOfValuesPerSQL;
private final SuperTableMeta superTableMeta;
private final int order;
private final int rate;
private final long range;
private final String prefixOfTable;
private final boolean autoCreateTable;
public InsertTask(SuperTableMeta superTableMeta, long startTableInd, long endTableInd,
long startTime, long timeGap,
long numOfRowsPerTable, long numOfTablesPerSQL, long numOfValuesPerSQL,
int order, int rate, long range,
String prefixOfTable, boolean autoCreateTable) {
this.superTableMeta = superTableMeta;
this.startTableInd = startTableInd;
this.endTableInd = endTableInd;
this.startTime = startTime;
this.timeGap = timeGap;
this.numOfRowsPerTable = numOfRowsPerTable;
this.numOfTablesPerSQL = numOfTablesPerSQL;
this.numOfValuesPerSQL = numOfValuesPerSQL;
this.order = order;
this.rate = rate;
this.range = range;
this.prefixOfTable = prefixOfTable;
this.autoCreateTable = autoCreateTable;
}
@Override
public Integer call() {
long numOfTables = endTableInd - startTableInd;
if (numOfRowsPerTable < numOfValuesPerSQL)
numOfValuesPerSQL = (int) numOfRowsPerTable;
if (numOfTables < numOfTablesPerSQL)
numOfTablesPerSQL = (int) numOfTables;
int affectRows = 0;
// row
for (long rowCnt = 0; rowCnt < numOfRowsPerTable; ) {
long rowSize = numOfValuesPerSQL;
if (rowCnt + rowSize > numOfRowsPerTable) {
rowSize = numOfRowsPerTable - rowCnt;
}
//table
for (long tableCnt = startTableInd; tableCnt < endTableInd; ) {
long tableSize = numOfTablesPerSQL;
if (tableCnt + tableSize > endTableInd) {
tableSize = endTableInd - tableCnt;
}
long startTime = this.startTime + rowCnt * timeGap;
// System.out.println(Thread.currentThread().getName() + " >>> " + "rowCnt: " + rowCnt + ", rowSize: " + rowSize + ", " + "tableCnt: " + tableCnt + ",tableSize: " + tableSize + ", " + "startTime: " + startTime + ",timeGap: " + timeGap + "");
/***********************************************/
// 生成数据
List<SubTableValue> data = SubTableValueGenerator.generate(superTableMeta, prefixOfTable, tableCnt, tableSize, rowSize, startTime, timeGap);
// 乱序
if (order != 0)
SubTableValueGenerator.disrupt(data, rate, range);
// insert
if (autoCreateTable)
affectRows += insertAutoCreateTable(data);
else
affectRows += insert(data);
/***********************************************/
tableCnt += tableSize;
}
rowCnt += rowSize;
}
return affectRows;
}
}
}

View File

@ -1,19 +1,22 @@
package com.taosdata.taosdemo.service;
import com.taosdata.taosdemo.dao.SuperTableMapper;
import com.taosdata.taosdemo.dao.SuperTableMapperImpl;
import com.taosdata.taosdemo.domain.SuperTableMeta;
import com.taosdata.taosdemo.mapper.SuperTableMapper;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
@Service
import javax.sql.DataSource;
public class SuperTableService {
@Autowired
private SuperTableMapper superTableMapper;
public SuperTableService(DataSource dataSource) {
this.superTableMapper = new SuperTableMapperImpl(dataSource);
}
// 创建超级表指定每个field的名称和类型每个tag的名称和类型
public int create(SuperTableMeta superTableMeta) {
return superTableMapper.createSuperTable(superTableMeta);
public void create(SuperTableMeta superTableMeta) {
superTableMapper.createSuperTable(superTableMeta);
}
public void drop(String database, String name) {

View File

@ -1,41 +1,25 @@
package com.taosdata.taosdemo.service;
import com.taosdata.taosdemo.dao.TableMapper;
import com.taosdata.taosdemo.domain.TableMeta;
import com.taosdata.taosdemo.mapper.TableMapper;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
@Service
public class TableService extends AbstractService {
@Autowired
private TableMapper tableMapper;
//创建一张表
public int create(TableMeta tableMeta) {
return tableMapper.create(tableMeta);
public void create(TableMeta tableMeta) {
tableMapper.create(tableMeta);
}
//创建多张表
public int create(List<TableMeta> tables) {
return create(tables, 1);
}
//多线程创建多张表
public int create(List<TableMeta> tables, int threadSize) {
ExecutorService executors = Executors.newFixedThreadPool(threadSize);
List<Future<Integer>> futures = new ArrayList<>();
for (TableMeta table : tables) {
Future<Integer> future = executors.submit(() -> create(table));
futures.add(future);
}
return getAffectRows(futures);
public void create(List<TableMeta> tables) {
tables.stream().forEach(this::create);
}

View File

@ -27,4 +27,16 @@ public class SubTableMetaGenerator {
return subTableMetaList;
}
public static SubTableMeta generate(SuperTableMeta superTableMeta, String tableName) {
SubTableMeta subTableMeta = new SubTableMeta();
// create table xxx.xxx using xxx tags(...)
subTableMeta.setDatabase(superTableMeta.getDatabase());
subTableMeta.setName(tableName);
subTableMeta.setSupertable(superTableMeta.getName());
subTableMeta.setFields(superTableMeta.getFields());
List<TagValue> tagValues = TagValueGenerator.generate(superTableMeta.getTags());
subTableMeta.setTags(tagValues);
return subTableMeta;
}
}

View File

@ -1,8 +1,6 @@
package com.taosdata.taosdemo.service.data;
import com.taosdata.taosdemo.domain.RowValue;
import com.taosdata.taosdemo.domain.SubTableMeta;
import com.taosdata.taosdemo.domain.SubTableValue;
import com.taosdata.taosdemo.domain.*;
import com.taosdata.taosdemo.utils.TimeStampUtil;
import org.springframework.beans.BeanUtils;
@ -11,22 +9,26 @@ import java.util.List;
public class SubTableValueGenerator {
public static List<SubTableValue> generate(List<SubTableMeta> subTableMetaList, int numOfRowsPerTable, long start, long timeGap) {
List<SubTableValue> subTableValueList = new ArrayList<>();
subTableMetaList.stream().forEach((subTableMeta) -> {
// insert into xxx.xxx using xxxx tags(...) values(),()...
public static List<SubTableValue> generate(SuperTableMeta superTableMeta, String prefixOfTables, long tableIndex, long tableSize, long valueSize, long startTime, long timeGap) {
List<SubTableValue> subTableValues = new ArrayList<>();
for (int i = 1; i <= tableSize; i++) {
SubTableValue subTableValue = new SubTableValue();
subTableValue.setDatabase(subTableMeta.getDatabase());
subTableValue.setName(subTableMeta.getName());
subTableValue.setSupertable(subTableMeta.getSupertable());
subTableValue.setTags(subTableMeta.getTags());
TimeStampUtil.TimeTuple tuple = TimeStampUtil.range(start, timeGap, numOfRowsPerTable);
List<RowValue> values = FieldValueGenerator.generate(tuple.start, tuple.end, tuple.timeGap, subTableMeta.getFields());
subTableValue.setDatabase(superTableMeta.getDatabase());
subTableValue.setName(prefixOfTables + (tableIndex + i));
subTableValue.setSupertable(superTableMeta.getName());
TimeStampUtil.TimeTuple tuple = TimeStampUtil.range(startTime, timeGap, valueSize);
List<TagValue> tags = TagValueGenerator.generate(superTableMeta.getTags());
subTableValue.setTags(tags);
List<RowValue> values = FieldValueGenerator.generate(tuple.start, tuple.end, tuple.timeGap, superTableMeta.getFields());
subTableValue.setValues(values);
subTableValueList.add(subTableValue);
});
return subTableValueList;
subTableValues.add(subTableValue);
}
return subTableValues;
}
public static List<SubTableValue> generate(List<SubTableMeta> subTableMetaList, int numOfRowsPerTable, long start, long timeGap) {
return generate(subTableMetaList, 0, subTableMetaList.size(), numOfRowsPerTable, start, timeGap);
}
public static void disrupt(List<SubTableValue> subTableValueList, int rate, long range) {
@ -38,12 +40,10 @@ public class SubTableValueGenerator {
public static List<List<SubTableValue>> split(List<SubTableValue> subTableValueList, int numOfTables, int numOfTablesPerSQL, int numOfRowsPerTable, int numOfValuesPerSQL) {
List<List<SubTableValue>> dataList = new ArrayList<>();
if (numOfRowsPerTable < numOfValuesPerSQL)
numOfValuesPerSQL = numOfRowsPerTable;
if (numOfTables < numOfTablesPerSQL)
numOfTablesPerSQL = numOfTables;
//table
for (int tableCnt = 0; tableCnt < numOfTables; ) {
int tableSize = numOfTablesPerSQL;
@ -81,4 +81,20 @@ public class SubTableValueGenerator {
split(null, 99, 10, 99, 10);
}
public static List<SubTableValue> generate(List<SubTableMeta> subTableMetaList, int tableCnt, int tableSize, int rowSize, long startTime, long timeGap) {
List<SubTableValue> subTableValueList = new ArrayList<>();
for (int i = 0; i < tableSize; i++) {
SubTableMeta subTableMeta = subTableMetaList.get(tableCnt + i);
SubTableValue subTableValue = new SubTableValue();
subTableValue.setDatabase(subTableMeta.getDatabase());
subTableValue.setName(subTableMeta.getName());
subTableValue.setSupertable(subTableMeta.getSupertable());
subTableValue.setTags(subTableMeta.getTags());
TimeStampUtil.TimeTuple tuple = TimeStampUtil.range(startTime, timeGap, rowSize);
List<RowValue> values = FieldValueGenerator.generate(tuple.start, tuple.end, tuple.timeGap, subTableMeta.getFields());
subTableValue.setValues(values);
subTableValueList.add(subTableValue);
}
return subTableValueList;
}
}

View File

@ -0,0 +1,194 @@
package com.taosdata.taosdemo.utils;
import com.taosdata.taosdemo.domain.*;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
public class SqlSpeller {
// create database if not exists xxx keep xx days xx replica xx cache xx...
public static String createDatabase(Map<String, String> map) {
StringBuilder sb = new StringBuilder();
sb.append("create database if not exists ").append(map.get("database")).append(" ");
if (map.containsKey("keep"))
sb.append("keep ").append(map.get("keep")).append(" ");
if (map.containsKey("days"))
sb.append("days ").append(map.get("days")).append(" ");
if (map.containsKey("replica"))
sb.append("replica ").append(map.get("replica")).append(" ");
if (map.containsKey("cache"))
sb.append("cache ").append(map.get("cache")).append(" ");
if (map.containsKey("blocks"))
sb.append("blocks ").append(map.get("blocks")).append(" ");
if (map.containsKey("minrows"))
sb.append("minrows ").append(map.get("minrows")).append(" ");
if (map.containsKey("maxrows"))
sb.append("maxrows ").append(map.get("maxrows")).append(" ");
if (map.containsKey("precision"))
sb.append("precision ").append(map.get("precision")).append(" ");
if (map.containsKey("comp"))
sb.append("comp ").append(map.get("comp")).append(" ");
if (map.containsKey("walLevel"))
sb.append("walLevel ").append(map.get("walLevel")).append(" ");
if (map.containsKey("quorum"))
sb.append("quorum ").append(map.get("quorum")).append(" ");
if (map.containsKey("fsync"))
sb.append("fsync ").append(map.get("fsync")).append(" ");
if (map.containsKey("update"))
sb.append("update ").append(map.get("update")).append(" ");
return sb.toString();
}
// create table if not exists xx.xx using xx.xx tags(x,x,x)
public static String createTableUsingSuperTable(SubTableMeta subTableMeta) {
StringBuilder sb = new StringBuilder();
sb.append("create table if not exists ").append(subTableMeta.getDatabase()).append(".").append(subTableMeta.getName()).append(" ");
sb.append("using ").append(subTableMeta.getDatabase()).append(".").append(subTableMeta.getSupertable()).append(" ");
// String tagStr = subTableMeta.getTags().stream().filter(Objects::nonNull)
// .map(tagValue -> tagValue.getName() + " '" + tagValue.getValue() + "' ")
// .collect(Collectors.joining(",", "(", ")"));
sb.append("tags ").append(tagValues(subTableMeta.getTags()));
return sb.toString();
}
// insert into xx.xxx values(x,x,x),(x,x,x)...
public static String insertOneTableMultiValues(SubTableValue subTableValue) {
StringBuilder sb = new StringBuilder();
sb.append("insert into ").append(subTableValue.getDatabase()).append(".").append(subTableValue.getName() + " ");
sb.append("values").append(rowValues(subTableValue.getValues()));
return sb.toString();
}
//f1, f2, f3
private static String fieldValues(List<FieldValue> fields) {
return IntStream.range(0, fields.size()).mapToObj(i -> {
if (i == 0) {
return "" + fields.get(i).getValue() + "";
} else {
return "'" + fields.get(i).getValue() + "'";
}
}).collect(Collectors.joining(",", "(", ")"));
// return fields.stream()
// .filter(Objects::nonNull)
// .map(fieldValue -> "'" + fieldValue.getValue() + "'")
// .collect(Collectors.joining(",", "(", ")"));
}
//(f1, f2, f3),(f1, f2, f3)
private static String rowValues(List<RowValue> rowValues) {
return rowValues.stream().filter(Objects::nonNull)
.map(rowValue -> fieldValues(rowValue.getFields()))
.collect(Collectors.joining(",", "", ""));
}
// insert into xx.xxx using xx.xx tags(x,x,x) values(x,x,x),(x,x,x)...
public static String insertOneTableMultiValuesUsingSuperTable(SubTableValue subTableValue) {
StringBuilder sb = new StringBuilder();
sb.append("insert into ").append(subTableValue.getDatabase()).append(".").append(subTableValue.getName()).append(" ");
sb.append("using ").append(subTableValue.getDatabase()).append(".").append(subTableValue.getSupertable()).append(" ");
sb.append("tags ").append(tagValues(subTableValue.getTags()) + " ");
sb.append("values ").append(rowValues(subTableValue.getValues()));
return sb.toString();
}
// (t1,t2,t3...)
private static String tagValues(List<TagValue> tags) {
return tags.stream().filter(Objects::nonNull)
.map(tagValue -> "'" + tagValue.getValue() + "'")
.collect(Collectors.joining(",", "(", ")"));
}
// insert into xx.xx values(),(),()... xx.xx values(),()...
public static String insertMultiSubTableMultiValues(List<SubTableValue> tables) {
return "insert into " + tables.stream().filter(Objects::nonNull)
.map(table -> table.getDatabase() + "." + table.getName() + " values " + rowValues(table.getValues()))
.collect(Collectors.joining(" ", "", ""));
}
// insert into xx.xx using xx.xx tags(xx,xx) values(),()...
public static String insertMultiTableMultiValuesUsingSuperTable(List<SubTableValue> tables) {
return "insert into " + tables.stream().filter(Objects::nonNull)
.map(table -> {
StringBuilder sb = new StringBuilder();
sb.append(table.getDatabase()).append(".").append(table.getName());
sb.append(" using ").append(table.getDatabase()).append(".").append(table.getSupertable());
sb.append(" tags ").append(tagValues(table.getTags()));
sb.append(" values ").append(rowValues(table.getValues()));
return sb.toString();
}).collect(Collectors.joining(" "));
}
// create table if not exists xx.xx (f1 xx,f2 xx...) tags(t1 xx, t2 xx...)
public static String createSuperTable(SuperTableMeta tableMetadata) {
StringBuilder sb = new StringBuilder();
sb.append("create table if not exists ").append(tableMetadata.getDatabase()).append(".").append(tableMetadata.getName());
String fields = tableMetadata.getFields().stream()
.filter(Objects::nonNull).map(field -> field.getName() + " " + field.getType() + " ")
.collect(Collectors.joining(",", "(", ")"));
sb.append(fields);
sb.append(" tags ");
String tags = tableMetadata.getTags().stream().filter(Objects::nonNull)
.map(tag -> tag.getName() + " " + tag.getType() + " ")
.collect(Collectors.joining(",", "(", ")"));
sb.append(tags);
return sb.toString();
}
public static String createTable(TableMeta tableMeta) {
StringBuilder sb = new StringBuilder();
sb.append("create table if not exists ").append(tableMeta.getDatabase()).append(".").append(tableMeta.getName()).append(" ");
String fields = tableMeta.getFields().stream()
.filter(Objects::nonNull).map(field -> field.getName() + " " + field.getType() + " ")
.collect(Collectors.joining(",", "(", ")"));
sb.append(fields);
return sb.toString();
}
// insert into xx.xx values()
public static String insertOneTableMultiValues(TableValue table) {
StringBuilder sb = new StringBuilder();
sb.append("insert into ").append(table.getDatabase()).append(".").append(table.getName() + " ");
sb.append("values").append(rowValues(table.getValues()));
return sb.toString();
}
// insert into xx.xx (f1, f2, f3...) values(xx,xx,xx),(xx,xx,xx)...
public static String insertOneTableMultiValuesWithColumns(TableValue table) {
StringBuilder sb = new StringBuilder();
sb.append("insert into ").append(table.getDatabase()).append(".").append(table.getName()).append(" ");
sb.append(columnNames(table.getColumns()));
sb.append(" values ").append(rowValues(table.getValues()));
return sb.toString();
}
// (f1, f2, f3...)
private static String columnNames(List<FieldMeta> fields) {
return fields.stream()
.filter(Objects::nonNull)
.map(column -> column.getName() + " ")
.collect(Collectors.joining(",", "(", ")"));
}
public static String insertMultiTableMultiValuesWithColumns(List<TableValue> tables) {
StringBuilder sb = new StringBuilder();
sb.append("insert into ").append(tables.stream().filter(Objects::nonNull)
.map(table -> table.getDatabase() + "." + table.getName() + " " + columnNames(table.getColumns()) + " values " + rowValues(table.getValues()))
.collect(Collectors.joining(" ")));
return sb.toString();
}
public static String insertMultiTableMultiValues(List<TableValue> tables) {
StringBuilder sb = new StringBuilder();
sb.append("insert into ").append(tables.stream().filter(Objects::nonNull).map(table ->
table.getDatabase() + "." + table.getName() + " values " + rowValues(table.getValues())
).collect(Collectors.joining(" ")));
return sb.toString();
}
}

View File

@ -1,14 +1,5 @@
#spring.datasource.url=jdbc:mysql://master:3306/?useSSL=false&useUnicode=true&characterEncoding=UTF-8
#spring.datasource.driver-class-name=com.mysql.jdbc.Driver
#spring.datasource.username=root
#spring.datasource.password=123456
spring.datasource.url=jdbc:TAOS://master:6030/?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8
spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver
spring.datasource.username=root
spring.datasource.password=taosdata
spring.datasource.hikari.maximum-pool-size=10
spring.datasource.hikari.minimum-idle=10
spring.datasource.hikari.max-lifetime=600000
logging.level.com.taosdata.taosdemo.mapper=debug
jdbc.driver=com.taosdata.jdbc.rs.RestfulDriver
#jdbc.driver=com.taosdata.jdbc.TSDBDriver
hikari.maximum-pool-size=1
hikari.minimum-idle=1
hikari.max-lifetime=0

View File

@ -0,0 +1,119 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 2,
"databases": [
{
"dbinfo": {
"name": "db04",
"drop": "no",
"replica": 1,
"days": 2,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp": 2,
"walLevel": 1,
"quorum": 1,
"fsync": 3000,
"update": 0
},
"super_tables": [
{
"name": "stb04",
"child_table_exists": "no",
"childtable_count": 10,
"childtable_prefix": "stb01_",
"auto_create_table": "no",
"data_source": "rand",
"insert_mode": "taosc",
"insert_rate": 0,
"insert_rows": 100,
"multi_thread_write_one_tbl": "no",
"number_of_tbl_in_one_sql": 0,
"rows_per_tbl": 3,
"max_sql_len": 1024,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [
{
"type": "TINYINT"
},
{
"type": "SMALLINT"
},
{
"type": "INT"
},
{
"type": "BIGINT"
},
{
"type": "BOOL"
},
{
"type": "FLOAT"
},
{
"type": "DOUBLE"
},
{
"type": "TIMESTAMP"
},
{
"type": "BINARY",
"len": 16
},
{
"type": "NCHAR",
"len": 4
}
],
"tags": [
{
"type": "TINYINT"
},
{
"type": "SMALLINT"
},
{
"type": "INT"
},
{
"type": "BIGINT"
},
{
"type": "BOOL"
},
{
"type": "FLOAT"
},
{
"type": "DOUBLE"
},
{
"type": "BINARY",
"len": 16
},
{
"type": "NCHAR",
"len": 4
}
]
}
]
}
]
}

View File

@ -1,5 +1,5 @@
### 设置###
log4j.rootLogger=debug,stdout,DebugLog,ErrorLog
log4j.rootLogger=error,stdout
### 输出信息到控制抬 ###
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.Target=System.out

View File

@ -0,0 +1,17 @@
{
"filetype":"query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"databases": "db01",
"super_table_query":
{"rate":1, "concurrent":1,
"sqls": [{"sql": "select count(*) from stb01", "result": "./query_res0.txt"}]
},
"sub_table_query":
{"stblname": "stb01", "rate":1, "threads":1,
"sqls": [{"sql": "select count(*) from xxxx", "result": "./query_res1.txt"}]
}
}

View File

@ -5,6 +5,6 @@
<title>Index</title>
</head>
<body>
<h1>Hello~~~</h1>
<h1>Developing~~~</h1>
</body>
</html>

View File

@ -1,13 +0,0 @@
package com.taosdata.taosdemo;
import org.junit.jupiter.api.Test;
import org.springframework.boot.test.context.SpringBootTest;
@SpringBootTest
class TaosdemoApplicationTests {
@Test
void contextLoads() {
}
}

View File

@ -1,42 +0,0 @@
package com.taosdata.taosdemo.mapper;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
import java.util.HashMap;
import java.util.Map;
@RunWith(SpringRunner.class)
@SpringBootTest
public class DatabaseMapperTest {
@Autowired
private DatabaseMapper databaseMapper;
@Test
public void createDatabase() {
databaseMapper.createDatabase("db_test");
}
@Test
public void dropDatabase() {
databaseMapper.dropDatabase("db_test");
}
@Test
public void creatDatabaseWithParameters() {
Map<String, String> map = new HashMap<>();
map.put("dbname", "weather");
map.put("keep", "3650");
map.put("days", "30");
map.put("replica", "1");
databaseMapper.createDatabaseWithParameters(map);
}
@Test
public void useDatabase() {
databaseMapper.useDatabase("test");
}
}

View File

@ -1,88 +0,0 @@
package com.taosdata.taosdemo.mapper;
import com.taosdata.taosdemo.domain.*;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
import java.util.ArrayList;
import java.util.List;
@RunWith(SpringRunner.class)
@SpringBootTest
public class SubTableMapperTest {
@Autowired
private SubTableMapper subTableMapper;
private List<SubTableValue> tables;
@Test
public void createUsingSuperTable() {
SubTableMeta subTableMeta = new SubTableMeta();
subTableMeta.setDatabase("test");
subTableMeta.setSupertable("weather");
subTableMeta.setName("t1");
List<TagValue> tags = new ArrayList<>();
for (int i = 0; i < 3; i++) {
tags.add(new TagValue("tag" + (i + 1), "nchar(64)"));
}
subTableMeta.setTags(tags);
subTableMapper.createUsingSuperTable(subTableMeta);
}
@Test
public void insertOneTableMultiValues() {
subTableMapper.insertOneTableMultiValues(tables.get(0));
}
@Test
public void insertOneTableMultiValuesUsingSuperTable() {
subTableMapper.insertOneTableMultiValuesUsingSuperTable(tables.get(0));
}
@Test
public void insertMultiTableMultiValues() {
subTableMapper.insertMultiTableMultiValues(tables);
}
@Test
public void insertMultiTableMultiValuesUsingSuperTable() {
subTableMapper.insertMultiTableMultiValuesUsingSuperTable(tables);
}
@Before
public void before() {
tables = new ArrayList<>();
for (int ind = 0; ind < 3; ind++) {
SubTableValue table = new SubTableValue();
table.setDatabase("test");
// supertable
table.setSupertable("weather");
table.setName("t" + (ind + 1));
// tags
List<TagValue> tags = new ArrayList<>();
for (int i = 0; i < 3; i++) {
tags.add(new TagValue("tag" + (i + 1), "beijing"));
}
table.setTags(tags);
// values
List<RowValue> values = new ArrayList<>();
for (int i = 0; i < 2; i++) {
List<FieldValue> fields = new ArrayList<>();
for (int j = 0; j < 4; j++) {
fields.add(new FieldValue("f" + (j + 1), (j + 1) * 10));
}
values.add(new RowValue(fields));
}
table.setValues(values);
tables.add(table);
}
}
}

View File

@ -1,50 +0,0 @@
package com.taosdata.taosdemo.mapper;
import com.taosdata.taosdemo.domain.FieldMeta;
import com.taosdata.taosdemo.domain.SuperTableMeta;
import com.taosdata.taosdemo.domain.TagMeta;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
import java.util.ArrayList;
import java.util.List;
@RunWith(SpringRunner.class)
@SpringBootTest
public class SuperTableMapperTest {
@Autowired
private SuperTableMapper superTableMapper;
@Test
public void testCreateSuperTableUsingSQL() {
String sql = "create table test.weather (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)";
superTableMapper.createSuperTableUsingSQL(sql);
}
@Test
public void createSuperTable() {
SuperTableMeta superTableMeta = new SuperTableMeta();
superTableMeta.setDatabase("test");
superTableMeta.setName("weather");
List<FieldMeta> fields = new ArrayList<>();
for (int i = 0; i < 5; i++) {
fields.add(new FieldMeta("f" + (i + 1), "int"));
}
superTableMeta.setFields(fields);
List<TagMeta> tags = new ArrayList<>();
for (int i = 0; i < 3; i++) {
tags.add(new TagMeta("t" + (i + 1), "nchar(64)"));
}
superTableMeta.setTags(tags);
superTableMapper.createSuperTable(superTableMeta);
}
@Test
public void dropSuperTable() {
superTableMapper.dropSuperTable("test", "weather");
}
}

View File

@ -1,142 +0,0 @@
package com.taosdata.taosdemo.mapper;
import com.taosdata.taosdemo.domain.*;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
@SpringBootTest
@RunWith(SpringRunner.class)
public class TableMapperTest {
@Autowired
private TableMapper tableMapper;
private static Random random = new Random(System.currentTimeMillis());
@Test
public void create() {
TableMeta table = new TableMeta();
table.setDatabase("test");
table.setName("t1");
List<FieldMeta> fields = new ArrayList<>();
for (int i = 0; i < 3; i++) {
FieldMeta field = new FieldMeta();
field.setName("f" + (i + 1));
field.setType("nchar(64)");
fields.add(field);
}
table.setFields(fields);
tableMapper.create(table);
}
@Test
public void insertOneTableMultiValues() {
TableValue table = new TableValue();
table.setDatabase("test");
table.setName("t1");
List<RowValue> values = new ArrayList<>();
for (int j = 0; j < 5; j++) {
List<FieldValue> fields = new ArrayList<>();
for (int k = 0; k < 2; k++) {
FieldValue field = new FieldValue<>();
field.setValue((k + 1) * 100);
fields.add(field);
}
values.add(new RowValue(fields));
}
table.setValues(values);
tableMapper.insertOneTableMultiValues(table);
}
@Test
public void insertOneTableMultiValuesWithCoulmns() {
TableValue tableValue = new TableValue();
tableValue.setDatabase("test");
tableValue.setName("weather");
// columns
List<FieldMeta> columns = new ArrayList<>();
for (int i = 0; i < 3; i++) {
FieldMeta field = new FieldMeta();
field.setName("f" + (i + 1));
columns.add(field);
}
tableValue.setColumns(columns);
// values
List<RowValue> values = new ArrayList<>();
for (int i = 0; i < 3; i++) {
List<FieldValue> fields = new ArrayList<>();
for (int j = 0; j < 3; j++) {
FieldValue field = new FieldValue();
field.setValue(j);
fields.add(field);
}
values.add(new RowValue(fields));
}
tableValue.setValues(values);
tableMapper.insertOneTableMultiValuesWithColumns(tableValue);
}
@Test
public void insertMultiTableMultiValues() {
List<TableValue> tables = new ArrayList<>();
for (int i = 0; i < 3; i++) {
TableValue table = new TableValue();
table.setDatabase("test");
table.setName("t" + (i + 1));
List<RowValue> values = new ArrayList<>();
for (int j = 0; j < 5; j++) {
List<FieldValue> fields = new ArrayList<>();
for (int k = 0; k < 2; k++) {
FieldValue field = new FieldValue<>();
field.setValue((k + 1) * 10);
fields.add(field);
}
values.add(new RowValue(fields));
}
table.setValues(values);
tables.add(table);
}
tableMapper.insertMultiTableMultiValues(tables);
}
@Test
public void insertMultiTableMultiValuesWithCoulumns() {
List<TableValue> tables = new ArrayList<>();
for (int i = 0; i < 3; i++) {
TableValue table = new TableValue();
table.setDatabase("test");
table.setName("t" + (i + 1));
// columns
List<FieldMeta> columns = new ArrayList<>();
for (int j = 0; j < 3; j++) {
FieldMeta field = new FieldMeta();
field.setName("f" + (j + 1));
columns.add(field);
}
table.setColumns(columns);
// values
List<RowValue> values = new ArrayList<>();
for (int j = 0; j < 5; j++) {
List<FieldValue> fields = new ArrayList<>();
for (int k = 0; k < columns.size(); k++) {
FieldValue field = new FieldValue<>();
field.setValue((k + 1) * 10);
fields.add(field);
}
values.add(new RowValue(fields));
}
table.setValues(values);
tables.add(table);
}
tableMapper.insertMultiTableMultiValuesWithColumns(tables);
}
}

View File

@ -1,15 +1,9 @@
package com.taosdata.taosdemo.service;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
@RunWith(SpringRunner.class)
@SpringBootTest
public class DatabaseServiceTest {
@Autowired
private DatabaseService service;
@Test

View File

@ -4,18 +4,12 @@ import com.taosdata.taosdemo.domain.SubTableMeta;
import com.taosdata.taosdemo.domain.TagValue;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
import java.util.ArrayList;
import java.util.List;
@RunWith(SpringRunner.class)
@SpringBootTest
public class SubTableServiceTest {
@Autowired
private SubTableService service;
private List<SubTableMeta> subTables;
@ -38,13 +32,11 @@ public class SubTableServiceTest {
@Test
public void testCreateSubTable() {
int count = service.createSubTable(subTables);
System.out.println("count >>> " + count);
}
@Test
public void testCreateSubTableList() {
int count = service.createSubTable(subTables, 10);
System.out.println("count >>> " + count);
}
}

View File

@ -4,19 +4,12 @@ import com.taosdata.taosdemo.domain.FieldMeta;
import com.taosdata.taosdemo.domain.SuperTableMeta;
import com.taosdata.taosdemo.domain.TagMeta;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
import java.util.ArrayList;
import java.util.List;
@RunWith(SpringRunner.class)
@SpringBootTest
public class SuperTableServiceTest {
@Autowired
private SuperTableService service;
@Test

View File

@ -3,18 +3,11 @@ package com.taosdata.taosdemo.service;
import com.taosdata.taosdemo.domain.TableMeta;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
import java.util.ArrayList;
import java.util.List;
@RunWith(SpringRunner.class)
@SpringBootTest
public class TableServiceTest {
@Autowired
private TableService tableService;
private List<TableMeta> tables;
@ -32,12 +25,7 @@ public class TableServiceTest {
@Test
public void testCreate() {
int count = tableService.create(tables);
System.out.println(count);
tableService.create(tables);
}
@Test
public void testCreateMultiThreads() {
System.out.println(tableService.create(tables, 10));
}
}

View File

@ -0,0 +1,254 @@
package com.taosdata.taosdemo.utils;
import com.taosdata.taosdemo.domain.*;
import org.junit.Before;
import org.junit.Test;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
public class SqlSpellerTest {
@Test
public void createDatabase() {
HashMap<String, String> map = new HashMap<>();
map.put("database", "jdbcdb");
map.put("keep", "3650");
map.put("days", "30");
map.put("replica", "1");
map.put("minRows", "100");
map.put("maxRows", "1000");
map.put("cache", "16");
map.put("blocks", "8");
map.put("precision", "ms");
map.put("comp", "2");
map.put("walLevel", "1");
map.put("quorum", "1");
map.put("fsync", "3000");
map.put("update", "0");
String sql = SqlSpeller.createDatabase(map);
System.out.println(sql);
}
@Test
public void createTableUsingSuperTable() {
SubTableMeta subTableMeta = new SubTableMeta();
subTableMeta.setDatabase("test");
subTableMeta.setSupertable("weather");
subTableMeta.setName("t1");
List<TagValue> tags = new ArrayList<>();
for (int i = 0; i < 3; i++) {
tags.add(new TagValue("tag" + (i + 1), "nchar(64)"));
}
subTableMeta.setTags(tags);
String sql = SqlSpeller.createTableUsingSuperTable(subTableMeta);
System.out.println(sql);
}
@Test
public void insertOneTableMultiValues() {
String sql = SqlSpeller.insertOneTableMultiValues(tables.get(0));
System.out.println(sql);
}
@Test
public void insertOneTableMultiValuesUsingSuperTable() {
String sql = SqlSpeller.insertOneTableMultiValuesUsingSuperTable(tables.get(0));
System.out.println(sql);
}
@Test
public void insertMultiTableMultiValues() {
String sql = SqlSpeller.insertMultiSubTableMultiValues(tables);
System.out.println(sql);
}
@Test
public void insertMultiTableMultiValuesUsingSuperTable() {
String sql = SqlSpeller.insertMultiTableMultiValuesUsingSuperTable(tables);
System.out.println(sql);
}
private List<SubTableValue> tables;
@Before
public void before() {
tables = new ArrayList<>();
for (int ind = 0; ind < 3; ind++) {
SubTableValue table = new SubTableValue();
table.setDatabase("test");
// supertable
table.setSupertable("weather");
table.setName("t" + (ind + 1));
// tags
List<TagValue> tags = new ArrayList<>();
for (int i = 0; i < 3; i++) {
tags.add(new TagValue("tag" + (i + 1), "beijing"));
}
table.setTags(tags);
// values
List<RowValue> values = new ArrayList<>();
for (int i = 0; i < 2; i++) {
List<FieldValue> fields = new ArrayList<>();
for (int j = 0; j < 4; j++) {
fields.add(new FieldValue("f" + (j + 1), (j + 1) * 10));
}
values.add(new RowValue(fields));
}
table.setValues(values);
tables.add(table);
}
}
@Test
public void createSuperTable() {
SuperTableMeta superTableMeta = new SuperTableMeta();
superTableMeta.setDatabase("test");
superTableMeta.setName("weather");
List<FieldMeta> fields = new ArrayList<>();
for (int i = 0; i < 5; i++) {
fields.add(new FieldMeta("f" + (i + 1), "int"));
}
superTableMeta.setFields(fields);
List<TagMeta> tags = new ArrayList<>();
for (int i = 0; i < 3; i++) {
tags.add(new TagMeta("t" + (i + 1), "nchar(64)"));
}
superTableMeta.setTags(tags);
String sql = SqlSpeller.createSuperTable(superTableMeta);
System.out.println(sql);
}
@Test
public void createTable() {
TableMeta table = new TableMeta();
table.setDatabase("test");
table.setName("t1");
List<FieldMeta> fields = new ArrayList<>();
for (int i = 0; i < 3; i++) {
FieldMeta field = new FieldMeta();
field.setName("f" + (i + 1));
field.setType("nchar(64)");
fields.add(field);
}
table.setFields(fields);
String sql = SqlSpeller.createTable(table);
System.out.println(sql);
}
@Test
public void testInsertOneTableMultiValues() {
TableValue table = new TableValue();
table.setDatabase("test");
table.setName("t1");
List<RowValue> values = new ArrayList<>();
for (int j = 0; j < 5; j++) {
List<FieldValue> fields = new ArrayList<>();
for (int k = 0; k < 2; k++) {
FieldValue field = new FieldValue<>();
field.setValue((k + 1) * 100);
fields.add(field);
}
values.add(new RowValue(fields));
}
table.setValues(values);
String sql = SqlSpeller.insertOneTableMultiValues(table);
System.out.println(sql);
}
@Test
public void insertOneTableMultiValuesWithColumns() {
TableValue tableValue = new TableValue();
tableValue.setDatabase("test");
tableValue.setName("weather");
// columns
List<FieldMeta> columns = new ArrayList<>();
for (int i = 0; i < 3; i++) {
FieldMeta field = new FieldMeta();
field.setName("f" + (i + 1));
columns.add(field);
}
tableValue.setColumns(columns);
// values
List<RowValue> values = new ArrayList<>();
for (int i = 0; i < 3; i++) {
List<FieldValue> fields = new ArrayList<>();
for (int j = 0; j < 3; j++) {
FieldValue field = new FieldValue();
field.setValue(j);
fields.add(field);
}
values.add(new RowValue(fields));
}
tableValue.setValues(values);
String sql = SqlSpeller.insertOneTableMultiValuesWithColumns(tableValue);
System.out.println(sql);
}
@Test
public void insertMultiTableMultiValuesWithColumns() {
List<TableValue> tables = new ArrayList<>();
for (int i = 0; i < 3; i++) {
TableValue table = new TableValue();
table.setDatabase("test");
table.setName("t" + (i + 1));
// columns
List<FieldMeta> columns = new ArrayList<>();
for (int j = 0; j < 3; j++) {
FieldMeta field = new FieldMeta();
field.setName("f" + (j + 1));
columns.add(field);
}
table.setColumns(columns);
// values
List<RowValue> values = new ArrayList<>();
for (int j = 0; j < 5; j++) {
List<FieldValue> fields = new ArrayList<>();
for (int k = 0; k < columns.size(); k++) {
FieldValue field = new FieldValue<>();
field.setValue((k + 1) * 10);
fields.add(field);
}
values.add(new RowValue(fields));
}
table.setValues(values);
tables.add(table);
}
String sql = SqlSpeller.insertMultiTableMultiValuesWithColumns(tables);
System.out.println(sql);
}
@Test
public void testInsertMultiTableMultiValues() {
List<TableValue> tables = new ArrayList<>();
for (int i = 0; i < 3; i++) {
TableValue table = new TableValue();
table.setDatabase("test");
table.setName("t" + (i + 1));
List<RowValue> values = new ArrayList<>();
for (int j = 0; j < 5; j++) {
List<FieldValue> fields = new ArrayList<>();
for (int k = 0; k < 2; k++) {
FieldValue field = new FieldValue<>();
field.setValue((k + 1) * 10);
fields.add(field);
}
values.add(new RowValue(fields));
}
table.setValues(values);
tables.add(table);
}
String sql = SqlSpeller.insertMultiTableMultiValues(tables);
System.out.println(sql);
}
}

View File

@ -17,6 +17,8 @@ public class TimeStampUtilTest {
@Test
public void longToDatetime() {
System.out.println(TimeStampUtil.longToDatetime(1293334499006l));
String datetime = TimeStampUtil.longToDatetime(1510000000000L);
assertEquals("2017-11-07 04:26:40.000", datetime);
long timestamp = TimeStampUtil.datetimeToLong(datetime);

View File

@ -221,11 +221,14 @@ python3 ./test.py -f functions/function_sum.py -r 1
python3 ./test.py -f functions/function_top.py -r 1
python3 ./test.py -f functions/function_twa.py -r 1
python3 ./test.py -f functions/function_twa_test2.py
python3 ./test.py -f functions/all_null_value.py
python3 queryCount.py
python3 ./test.py -f query/queryGroupbyWithInterval.py
python3 client/twoClients.py
python3 test.py -f query/queryInterval.py
python3 test.py -f query/queryFillTest.py
python3 ./test.py -f query/queryInterval.py
python3 ./test.py -f query/queryFillTest.py
python3 ./test.py -f query/last_row_cache.py
python3 ./test.py -f query/last_cache.py
# tools
python3 test.py -f tools/taosdemoTest.py

View File

@ -0,0 +1,90 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.rowNum = 10
self.ts = 1537146000000
def run(self):
tdSql.prepare()
tdSql.execute("create table st(ts timestamp, c1 int, c2 int)")
for i in range(self.rowNum):
tdSql.execute("insert into st values(%d, null, null)" % (self.ts + i))
tdSql.query("select avg(c1) from st")
tdSql.checkRows(0)
tdSql.query("select max(c1) from st")
tdSql.checkRows(0)
tdSql.query("select min(c1) from st")
tdSql.checkRows(0)
tdSql.query("select bottom(c1, 1) from st")
tdSql.checkRows(0)
tdSql.query("select top(c1, 1) from st")
tdSql.checkRows(0)
tdSql.query("select diff(c1) from st")
tdSql.checkRows(0)
tdSql.query("select first(c1) from st")
tdSql.checkRows(0)
tdSql.query("select last(c1) from st")
tdSql.checkRows(0)
tdSql.query("select last_row(c1) from st")
tdSql.checkRows(1)
tdSql.checkData(0, 0, None)
tdSql.query("select count(c1) from st")
tdSql.checkRows(0)
tdSql.query("select leastsquares(c1, 1, 1) from st")
tdSql.checkRows(0)
tdSql.query("select c1 + c2 from st")
tdSql.checkRows(10)
tdSql.query("select spread(c1) from st")
tdSql.checkRows(0)
# tdSql.query("select stddev(c1) from st")
# tdSql.checkRows(0)
tdSql.query("select sum(c1) from st")
tdSql.checkRows(0)
tdSql.query("select twa(c1) from st")
tdSql.checkRows(0)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -110,6 +110,10 @@ class TDTestCase:
tdSql.query("select first(col9) from test1")
tdSql.checkRows(1)
tdSql.checkData(0, 0, '涛思数据1')
# TD-2607 first,last + where none exist condition + interval
tdSql.query("select first(*),last(*) from test1 where ts < 23 interval(1s)")
tdSql.checkRows(0)
def stop(self):
tdSql.close()

View File

@ -69,6 +69,15 @@ class TDTestCase:
tdSql.query("select max(col6) from test1")
tdSql.checkData(0, 0, np.max(floatData))
# test case: https://jira.taosdata.com:18080/browse/TD-2583
tdSql.execute("create database test days 2")
tdSql.execute("create table car(ts timestamp, speed int)")
tdSql.execute("insert into car values(now, -1)")
tdSql.execute("insert into car values(now-10d, null)")
tdSql.query("select max(speed) from car")
tdSql.checkData(0, 0, -1)
def stop(self):
tdSql.close()

View File

@ -69,6 +69,15 @@ class TDTestCase:
tdSql.query("select min(col6) from test1")
tdSql.checkData(0, 0, np.min(floatData))
# test case: https://jira.taosdata.com:18080/browse/TD-2583
tdSql.execute("create database test days 2")
tdSql.execute("create table car(ts timestamp, speed int)")
tdSql.execute("insert into car values(now, 1)")
tdSql.execute("insert into car values(now-10d, null)")
tdSql.query("select min(speed) from car")
tdSql.checkData(0, 0, 1)
def stop(self):
tdSql.close()

View File

@ -142,6 +142,14 @@ class TDTestCase:
tdSql.error("select percentile(voltage, 20) from meters")
tdSql.query("select apercentile(voltage, 20) from meters")
print("apercentile result: %s" % tdSql.getData(0, 0))
# Test case for: https://jira.taosdata.com:18080/browse/TD-2609
tdSql.execute("create table st(ts timestamp, k int)")
tdSql.execute("insert into st values(now, -100)")
tdSql.query("select apercentile(k, 20) from st")
tdSql.checkData(0, 0, -100.00)
def stop(self):
tdSql.close()

View File

@ -132,6 +132,22 @@ class TDTestCase:
tdSql.query('select twa(c) from t4 interval(10s)')
tdSql.checkData(0,1,10.999)
# Test case: https://jira.taosdata.com:18080/browse/TD-2624
tdSql.execute("create database test keep 7300")
tdSql.execute("use test")
tdSql.execute("create table st(ts timestamp, k int)")
tdSql.execute("insert into st values('2011-01-02 18:42:45.326', -1)")
tdSql.execute("insert into st values('2020-07-30 17:44:06.283', 0)")
tdSql.execute("insert into st values('2020-07-30 17:44:19.578', 9999999)")
tdSql.execute("insert into st values('2020-07-30 17:46:06.417', NULL)")
tdSql.execute("insert into st values('2020-11-09 18:42:25.538', 0)")
tdSql.execute("insert into st values('2020-12-29 17:43:11.641', 0)")
tdSql.execute("insert into st values('2020-12-29 18:43:17.129', 0)")
tdSql.execute("insert into st values('2020-12-29 18:46:19.109', NULL)")
tdSql.execute("insert into st values('2021-01-03 18:40:40.065', 0)")
tdSql.query("select twa(k),first(ts) as taos1 from st where k <50 interval(17s)")
tdSql.checkRows(6)
def stop(self):
tdSql.close()

View File

@ -16,7 +16,7 @@ TOP_DIR=`pwd`
TAOSD_DIR=`find . -name "taosd"|grep -v community|head -n1`
nohup $TAOSD_DIR >/dev/null &
cd -
./crash_gen.sh --valgrind -p -t 10 -s 250 -b 4
./crash_gen.sh --valgrind -p -t 10 -s 350 -b 4
pidof taosd|xargs kill -9
grep 'start to execute\|ERROR SUMMARY' valgrind.err|grep -v 'grep'|uniq|tee crash_gen_mem_err.log
@ -36,10 +36,14 @@ for defiMemError in `grep 'definitely lost:' crash_gen-definitely-lost-out.log |
do
defiMemError=(${defiMemError//,/})
if [ -n "$defiMemError" ]; then
if [ "$defiMemError" -gt 3 ]; then
if [ "$defiMemError" -gt 3 -a "$defiMemError" -lt 1013 ]; then
echo -e "${RED} ## Memory errors number valgrind reports \
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
exit 8
exit 8
elif [ "$defiMemError" -gt 1013 ];then #add for azure
echo -e "${RED} ## Memory errors number valgrind reports \
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
exit 8
fi
fi
done

View File

@ -15,4 +15,7 @@ python3 ./test.py -f update/merge_commit_last.py
python3 ./test.py -f update/bug_td2279.py
# wal
python3 ./test.py -f wal/addOldWalTest.py
python3 ./test.py -f wal/addOldWalTest.py
# function
python3 ./test.py -f functions/all_null_value.py

View File

@ -0,0 +1,133 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
from util.dnodes import tdDnodes
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tables = 10
self.rows = 20
self.perfix = 't'
self.ts = 1601481600000
def insertData(self):
print("==============step1")
tdSql.execute("create table st (ts timestamp, c1 int) tags(t1 int)")
for i in range(self.tables):
tdSql.execute("create table %s%d using st tags(%d)" % (self.perfix, i, i))
for j in range(self.rows):
tc = self.ts + j * 60000
tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, i, tc, j))
def executeQueries(self):
print("==============step2")
tdSql.query("select last(c1) from %s%d" % (self.perfix, 1))
tdSql.checkData(0, 0, 19)
tdSql.query("select last(c1) from %s%d where ts <= %d" % (self.perfix, 1, self.ts + 4 * 60000))
tdSql.checkData(0, 0, 4)
tdSql.query("select last(c1) as b from %s%d" % (self.perfix, 1))
tdSql.checkData(0, 0, 19)
tdSql.query("select last(c1) from %s%d interval(1m)" % (self.perfix, 1))
tdSql.checkData(1, 1, 1)
tdSql.query("select last(c1) from %s%d interval(1d)" % (self.perfix, 1))
tdSql.checkData(0, 1, 19)
tdSql.query("select last(c1) from %s%d where ts <= %d interval(1m)" % (self.perfix, 1, self.ts + 4 * 60000))
tdSql.checkRows(5)
tdSql.checkData(1, 1, 1)
tdSql.query("select last(c1) from st")
tdSql.checkData(0, 0, 19)
tdSql.query("select last(c1) as c from st where ts <= %d" % (self.ts + 4 * 60000))
tdSql.checkData(0, 0, 4)
tdSql.query("select last(c1) as c from st where t1 <= 5")
tdSql.checkData(0, 0, 19)
tdSql.query("select last(c1) as c from st where t1 <= 5 and ts <= %d" % (self.ts + 4 * 60000))
tdSql.checkData(0, 0, 4)
tdSql.query("select last(c1) from st interval(1m)")
tdSql.checkData(1, 1, 1)
tdSql.query("select last(c1) from st interval(1d)")
tdSql.checkData(0, 1, 19)
tdSql.query("select last(c1) from st group by t1")
tdSql.checkRows(10)
tdSql.checkData(0, 0, 19)
tdSql.query("select last(c1) as c from st where ts <= %d interval(1m) group by t1" % (self.ts + 4 * 60000))
tdSql.checkData(1, 1, 1)
tdSql.checkRows(50)
def run(self):
tdSql.prepare()
# last_cache_0.sim
tdSql.execute("create database test1 cachelast 0")
tdSql.execute("use test1")
self.insertData()
self.executeQueries()
tdSql.execute("alter database test1 cachelast 1")
self.executeQueries()
tdDnodes.stop(1)
tdDnodes.start(1)
self.executeQueries()
tdSql.execute("alter database test1 cachelast 0")
self.executeQueries()
tdDnodes.stop(1)
tdDnodes.start(1)
self.executeQueries()
# last_cache_1.sim
tdSql.execute("create database test2 cachelast 1")
tdSql.execute("use test2")
self.insertData()
self.executeQueries()
tdSql.execute("alter database test2 cachelast 0")
self.executeQueries()
tdDnodes.stop(1)
tdDnodes.start(1)
self.executeQueries()
tdSql.execute("alter database test2 cachelast 1")
self.executeQueries()
tdDnodes.stop(1)
tdDnodes.start(1)
self.executeQueries()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,186 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
from util.dnodes import tdDnodes
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tables = 10
self.rows = 20
self.perfix = 't'
self.ts = 1601481600000
def insertData(self):
print("==============step1")
tdSql.execute("create table st (ts timestamp, c1 int) tags(t1 int)")
for i in range(self.tables):
tdSql.execute("create table %s%d using st tags(%d)" % (self.perfix, i, i))
for j in range(self.rows):
tc = self.ts + j * 60000
tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, i, tc, j))
def executeQueries(self):
print("==============step2")
tdSql.query("select last_row(c1) from %s%d" % (self.perfix, 1))
tdSql.checkData(0, 0, 19)
tdSql.query("select last_row(c1) from %s%d where ts <= %d" % (self.perfix, 1, self.ts + 4 * 60000))
tdSql.checkData(0, 0, 4)
tdSql.query("select last_row(c1) as b from %s%d" % (self.perfix, 1))
tdSql.checkData(0, 0, 19)
tdSql.query("select last_row(c1) from st")
tdSql.checkData(0, 0, 19)
tdSql.query("select last_row(c1) as c from st where ts <= %d" % (self.ts + 4 * 60000))
tdSql.checkData(0, 0, 4)
tdSql.query("select last_row(c1) as c from st where t1 < 5")
tdSql.checkData(0, 0, 19)
tdSql.query("select last_row(c1) as c from st where t1 <= 5 and ts <= %d" % (self.ts + 4 * 60000))
tdSql.checkData(0, 0, 4)
tdSql.query("select last_row(c1) as c from st group by t1")
tdSql.checkRows(10)
tdSql.checkData(0, 0, 19)
tc = self.ts + 1 * 3600000
tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, 1, tc, 10))
tc = self.ts + 3 * 3600000
tdSql.execute("insert into %s%d values(%d, null)" %(self.perfix, 1, tc))
tc = self.ts + 5 * 3600000
tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, 1, tc, -1))
tc = self.ts + 7 * 3600000
tdSql.execute("insert into %s%d values(%d, null)" %(self.perfix, 1, tc))
def insertData2(self):
tc = self.ts + 1 * 3600000
tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, 1, tc, 10))
tc = self.ts + 3 * 3600000
tdSql.execute("insert into %s%d values(%d, null)" %(self.perfix, 1, tc))
tc = self.ts + 5 * 3600000
tdSql.execute("insert into %s%d values(%d, %d)" %(self.perfix, 1, tc, -1))
tc = self.ts + 7 * 3600000
tdSql.execute("insert into %s%d values(%d, null)" %(self.perfix, 1, tc))
def executeQueries2(self):
# For stable
tc = self.ts + 6 * 3600000
tdSql.query("select last_row(c1) from st where ts < %d " % tc)
tdSql.checkData(0, 0, -1)
tc = self.ts + 8 * 3600000
tdSql.query("select last_row(*) from st where ts < %d " % tc)
tdSql.checkData(0, 1, None)
tdSql.query("select last_row(*) from st")
tdSql.checkData(0, 1, None)
tc = self.ts + 4 * 3600000
tdSql.query("select last_row(*) from st where ts < %d " % tc)
tdSql.checkData(0, 1, None)
tc1 = self.ts + 1 * 3600000
tc2 = self.ts + 4 * 3600000
tdSql.query("select last_row(*) from st where ts > %d and ts <= %d" % (tc1, tc2))
tdSql.checkData(0, 1, None)
# For table
tc = self.ts + 6 * 3600000
tdSql.query("select last_row(*) from %s%d where ts <= %d" % (self.perfix, 1, tc))
tdSql.checkData(0, 1, -1)
tc = self.ts + 8 * 3600000
tdSql.query("select last_row(*) from %s%d where ts <= %d" % (self.perfix, 1, tc))
tdSql.checkData(0, 1, None)
tdSql.query("select last_row(*) from %s%d" % (self.perfix, 1))
tdSql.checkData(0, 1, None)
tc = self.ts + 4 * 3600000
tdSql.query("select last_row(*) from %s%d where ts <= %d" % (self.perfix, 1, tc))
tdSql.checkData(0, 1, None)
tc1 = self.ts + 1 * 3600000
tc2 = self.ts + 4 * 3600000
tdSql.query("select last_row(*) from st where ts > %d and ts <= %d" % (tc1, tc2))
tdSql.checkData(0, 1, None)
def run(self):
tdSql.prepare()
print("============== last_row_cache_0.sim")
tdSql.execute("create database test1 cachelast 0")
tdSql.execute("use test1")
self.insertData()
self.executeQueries()
self.insertData2()
self.executeQueries2()
print("============== alter last cache")
tdSql.execute("alter database test1 cachelast 1")
self.executeQueries2()
tdDnodes.stop(1)
tdDnodes.start(1)
self.executeQueries2()
tdSql.execute("alter database test1 cachelast 0")
self.executeQueries2()
tdDnodes.stop(1)
tdDnodes.start(1)
self.executeQueries2()
print("============== last_row_cache_1.sim")
tdSql.execute("create database test2 cachelast 1")
tdSql.execute("use test2")
self.insertData()
self.executeQueries()
self.insertData2()
self.executeQueries2()
tdSql.execute("alter database test2 cachelast 0")
self.executeQueries2()
tdDnodes.stop(1)
tdDnodes.start(1)
self.executeQueries2()
tdSql.execute("alter database test2 cachelast 1")
self.executeQueries2()
tdDnodes.stop(1)
tdDnodes.start(1)
self.executeQueries2()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -24,7 +24,7 @@ class TDTestCase:
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.ts = 1593548685000
self.ts = 1593548685000
def run(self):
tdSql.prepare()
@ -84,6 +84,22 @@ class TDTestCase:
tdDnodes.start(1)
tdSql.query("select last(*) from t interval(1s)")
tdSql.checkRows(10000)
# test case for https://jira.taosdata.com:18080/browse/TD-2601
newTs = 1601481600000
tdSql.execute("create database test2")
tdSql.execute("use test2")
tdSql.execute("create table t (ts timestamp, voltage int)")
for i in range(100):
tdSql.execute("insert into t values(%d, %d)" % (newTs + i * 10000000, i))
tdSql.query("select sum(voltage) from t where ts >='2020-10-01 00:00:00' and ts <='2020-12-01 00:00:00' interval(1n) fill(NULL)")
tdSql.checkRows(3)
tdSql.checkData(0, 1, 4950)
tdSql.checkData(1, 1, None)
tdSql.checkData(2, 1, None)
def stop(self):

View File

@ -51,7 +51,7 @@ class TDTestCase:
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
os.system("yes | %slowa -f tools/insert.json" % binPath)
os.system("yes | %staosdemox -f tools/insert.json" % binPath)
tdSql.execute("use db01")
tdSql.query("select count(*) from stb01")

Some files were not shown because too many files have changed in this diff Show More