Merge branch '3.0' of https://github.com/taosdata/TDengine into fix/TD-30837
|
@ -306,9 +306,9 @@ def pre_test_build_win() {
|
|||
cd %WIN_CONNECTOR_ROOT%
|
||||
python.exe -m pip install --upgrade pip
|
||||
python -m pip uninstall taospy -y
|
||||
python -m pip install taospy==2.7.13
|
||||
python -m pip install taospy==2.7.16
|
||||
python -m pip uninstall taos-ws-py -y
|
||||
python -m pip install taos-ws-py==0.3.1
|
||||
python -m pip install taos-ws-py==0.3.3
|
||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
||||
'''
|
||||
return 1
|
||||
|
|
|
@ -90,7 +90,7 @@ If `maven` is used to manage the projects, what needs to be done is only adding
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.3.2</version>
|
||||
<version>3.3.3</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.3.2</version>
|
||||
<version>3.3.3</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.locationtech.jts</groupId>
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.3.2</version>
|
||||
<version>3.3.3</version>
|
||||
</dependency>
|
||||
<!-- druid -->
|
||||
<dependency>
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.3.2</version>
|
||||
<version>3.3.3</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
|
|
|
@ -67,7 +67,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.3.2</version>
|
||||
<version>3.3.3</version>
|
||||
<!-- <scope>system</scope>-->
|
||||
<!-- <systemPath>${project.basedir}/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar</systemPath>-->
|
||||
</dependency>
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.3.2</version>
|
||||
<version>3.3.3</version>
|
||||
</dependency>
|
||||
<!-- ANCHOR_END: dep-->
|
||||
|
||||
|
|
|
@ -3,10 +3,7 @@ package com.taos.example;
|
|||
import com.taosdata.jdbc.TSDBPreparedStatement;
|
||||
import com.taosdata.jdbc.utils.StringUtils;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
import java.sql.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
@ -16,15 +13,32 @@ public class ParameterBindingFullDemo {
|
|||
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final Random random = new Random(System.currentTimeMillis());
|
||||
private static final int BINARY_COLUMN_SIZE = 50;
|
||||
private static final int BINARY_COLUMN_SIZE = 100;
|
||||
private static final String[] schemaList = {
|
||||
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
|
||||
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
|
||||
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
|
||||
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
|
||||
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))",
|
||||
"create table stable6(ts timestamp, f1 varbinary(" + BINARY_COLUMN_SIZE + ")) tags(t1 varbinary(" + BINARY_COLUMN_SIZE + "))",
|
||||
"create table stable7(ts timestamp, f1 geometry(" + BINARY_COLUMN_SIZE + ")) tags(t1 geometry(" + BINARY_COLUMN_SIZE + "))",
|
||||
"drop database if exists example_all_type_stmt",
|
||||
"CREATE DATABASE IF NOT EXISTS example_all_type_stmt",
|
||||
"USE example_all_type_stmt",
|
||||
"CREATE STABLE IF NOT EXISTS stb_json (" +
|
||||
"ts TIMESTAMP, " +
|
||||
"int_col INT) " +
|
||||
"tags (json_tag json)",
|
||||
"CREATE STABLE IF NOT EXISTS stb (" +
|
||||
"ts TIMESTAMP, " +
|
||||
"int_col INT, " +
|
||||
"double_col DOUBLE, " +
|
||||
"bool_col BOOL, " +
|
||||
"binary_col BINARY(100), " +
|
||||
"nchar_col NCHAR(100), " +
|
||||
"varbinary_col VARBINARY(100), " +
|
||||
"geometry_col GEOMETRY(100)) " +
|
||||
"tags (" +
|
||||
"int_tag INT, " +
|
||||
"double_tag DOUBLE, " +
|
||||
"bool_tag BOOL, " +
|
||||
"binary_tag BINARY(100), " +
|
||||
"nchar_tag NCHAR(100), " +
|
||||
"varbinary_tag VARBINARY(100), " +
|
||||
"geometry_tag GEOMETRY(100))"
|
||||
};
|
||||
private static final int numOfSubTable = 10, numOfRow = 10;
|
||||
|
||||
|
@ -34,55 +48,37 @@ public class ParameterBindingFullDemo {
|
|||
try (Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata")) {
|
||||
|
||||
init(conn);
|
||||
stmtJsonTag(conn);
|
||||
stmtAll(conn);
|
||||
|
||||
bindInteger(conn);
|
||||
bindFloat(conn);
|
||||
bindBoolean(conn);
|
||||
bindBytes(conn);
|
||||
bindString(conn);
|
||||
bindVarbinary(conn);
|
||||
bindGeometry(conn);
|
||||
|
||||
clean(conn);
|
||||
} catch (SQLException ex) {
|
||||
// handle any errors, please refer to the JDBC specifications for detailed exceptions info
|
||||
System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage());
|
||||
System.out.println("Failed to insert data using stmt, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage());
|
||||
throw ex;
|
||||
} catch (Exception ex){
|
||||
System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrMessage: " + ex.getMessage());
|
||||
} catch (Exception ex) {
|
||||
System.out.println("Failed to insert data using stmt, ErrMessage: " + ex.getMessage());
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
||||
private static void init(Connection conn) throws SQLException {
|
||||
clean(conn);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("create database if not exists test_parabind");
|
||||
stmt.execute("use test_parabind");
|
||||
for (int i = 0; i < schemaList.length; i++) {
|
||||
stmt.execute(schemaList[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
private static void clean(Connection conn) throws SQLException {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("drop database if exists test_parabind");
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindInteger(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
|
||||
private static void stmtJsonTag(Connection conn) throws SQLException {
|
||||
String sql = "INSERT INTO ? using stb_json tags(?) VALUES (?,?)";
|
||||
|
||||
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t1_" + i);
|
||||
pstmt.setTableName("ntb_json_" + i);
|
||||
// set tags
|
||||
pstmt.setTagByte(0, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
|
||||
pstmt.setTagShort(1, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
|
||||
pstmt.setTagInt(2, random.nextInt(Integer.MAX_VALUE));
|
||||
pstmt.setTagLong(3, random.nextLong());
|
||||
pstmt.setTagJson(0, "{\"device\":\"device_" + i + "\"}");
|
||||
// set columns
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
|
@ -90,45 +86,42 @@ public class ParameterBindingFullDemo {
|
|||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
ArrayList<Byte> f1List = new ArrayList<>();
|
||||
ArrayList<Integer> f1List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f1List.add(Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
|
||||
pstmt.setByte(1, f1List);
|
||||
|
||||
ArrayList<Short> f2List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f2List.add(Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
|
||||
pstmt.setShort(2, f2List);
|
||||
|
||||
ArrayList<Integer> f3List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f3List.add(random.nextInt(Integer.MAX_VALUE));
|
||||
pstmt.setInt(3, f3List);
|
||||
|
||||
ArrayList<Long> f4List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f4List.add(random.nextLong());
|
||||
pstmt.setLong(4, f4List);
|
||||
f1List.add(random.nextInt(Integer.MAX_VALUE));
|
||||
pstmt.setInt(1, f1List);
|
||||
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute column
|
||||
pstmt.columnDataExecuteBatch();
|
||||
System.out.println("Successfully inserted rows to example_all_type_stmt.ntb_json");
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindFloat(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)";
|
||||
private static void stmtAll(Connection conn) throws SQLException {
|
||||
String sql = "INSERT INTO ? using stb tags(?,?,?,?,?,?,?) VALUES (?,?,?,?,?,?,?,?)";
|
||||
|
||||
TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class);
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t2_" + i);
|
||||
pstmt.setTableName("ntb" + i);
|
||||
// set tags
|
||||
pstmt.setTagFloat(0, random.nextFloat());
|
||||
pstmt.setTagDouble(1, random.nextDouble());
|
||||
pstmt.setTagInt(0, i);
|
||||
pstmt.setTagDouble(1, 1.1);
|
||||
pstmt.setTagBoolean(2, true);
|
||||
pstmt.setTagString(3, "binary_value");
|
||||
pstmt.setTagNString(4, "nchar_value");
|
||||
pstmt.setTagVarbinary(5, new byte[]{(byte) 0x98, (byte) 0xf4, 0x6e});
|
||||
pstmt.setTagGeometry(6, new byte[]{
|
||||
0x01, 0x01, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59,
|
||||
0x40, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59, 0x40});
|
||||
|
||||
// set columns
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
|
@ -136,190 +129,54 @@ public class ParameterBindingFullDemo {
|
|||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
ArrayList<Float> f1List = new ArrayList<>();
|
||||
ArrayList<Integer> f1List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f1List.add(random.nextFloat());
|
||||
pstmt.setFloat(1, f1List);
|
||||
f1List.add(random.nextInt(Integer.MAX_VALUE));
|
||||
pstmt.setInt(1, f1List);
|
||||
|
||||
ArrayList<Double> f2List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f2List.add(random.nextDouble());
|
||||
pstmt.setDouble(2, f2List);
|
||||
|
||||
ArrayList<Boolean> f3List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f3List.add(true);
|
||||
pstmt.setBoolean(3, f3List);
|
||||
|
||||
ArrayList<String> f4List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f4List.add("binary_value");
|
||||
pstmt.setString(4, f4List, BINARY_COLUMN_SIZE);
|
||||
|
||||
ArrayList<String> f5List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f5List.add("nchar_value");
|
||||
pstmt.setNString(5, f5List, BINARY_COLUMN_SIZE);
|
||||
|
||||
ArrayList<byte[]> f6List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f6List.add(new byte[]{(byte) 0x98, (byte) 0xf4, 0x6e});
|
||||
pstmt.setVarbinary(6, f6List, BINARY_COLUMN_SIZE);
|
||||
|
||||
ArrayList<byte[]> f7List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f7List.add(new byte[]{
|
||||
0x01, 0x01, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59,
|
||||
0x40, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59, 0x40});
|
||||
pstmt.setGeometry(7, f7List, BINARY_COLUMN_SIZE);
|
||||
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute
|
||||
pstmt.columnDataExecuteBatch();
|
||||
System.out.println("Successfully inserted rows to example_all_type_stmt.ntb");
|
||||
// close if no try-with-catch statement is used
|
||||
pstmt.close();
|
||||
}
|
||||
|
||||
private static void bindBoolean(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable3 tags(?) values(?,?)";
|
||||
|
||||
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t3_" + i);
|
||||
// set tags
|
||||
pstmt.setTagBoolean(0, random.nextBoolean());
|
||||
// set columns
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
ArrayList<Boolean> f1List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
f1List.add(random.nextBoolean());
|
||||
pstmt.setBoolean(1, f1List);
|
||||
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute
|
||||
pstmt.columnDataExecuteBatch();
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindBytes(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable4 tags(?) values(?,?)";
|
||||
|
||||
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t4_" + i);
|
||||
// set tags
|
||||
pstmt.setTagString(0, new String("abc"));
|
||||
|
||||
// set columns
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
ArrayList<String> f1List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
f1List.add(new String("abc"));
|
||||
}
|
||||
pstmt.setString(1, f1List, BINARY_COLUMN_SIZE);
|
||||
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute
|
||||
pstmt.columnDataExecuteBatch();
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindString(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable5 tags(?) values(?,?)";
|
||||
|
||||
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t5_" + i);
|
||||
// set tags
|
||||
pstmt.setTagNString(0, "California.SanFrancisco");
|
||||
|
||||
// set columns
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
ArrayList<String> f1List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
f1List.add("California.LosAngeles");
|
||||
}
|
||||
pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE);
|
||||
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute
|
||||
pstmt.columnDataExecuteBatch();
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindVarbinary(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable6 tags(?) values(?,?)";
|
||||
|
||||
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t6_" + i);
|
||||
// set tags
|
||||
byte[] bTag = new byte[]{0,2,3,4,5};
|
||||
bTag[0] = (byte) i;
|
||||
pstmt.setTagVarbinary(0, bTag);
|
||||
|
||||
// set columns
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
ArrayList<byte[]> f1List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
byte[] v = new byte[]{0,2,3,4,5,6};
|
||||
v[0] = (byte)j;
|
||||
f1List.add(v);
|
||||
}
|
||||
pstmt.setVarbinary(1, f1List, BINARY_COLUMN_SIZE);
|
||||
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute
|
||||
pstmt.columnDataExecuteBatch();
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindGeometry(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable7 tags(?) values(?,?)";
|
||||
|
||||
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
|
||||
|
||||
byte[] g1 = StringUtils.hexToBytes("0101000000000000000000F03F0000000000000040");
|
||||
byte[] g2 = StringUtils.hexToBytes("0102000020E610000002000000000000000000F03F000000000000004000000000000008400000000000001040");
|
||||
List<byte[]> listGeo = new ArrayList<>();
|
||||
listGeo.add(g1);
|
||||
listGeo.add(g2);
|
||||
|
||||
for (int i = 1; i <= 2; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t7_" + i);
|
||||
// set tags
|
||||
pstmt.setTagGeometry(0, listGeo.get(i - 1));
|
||||
|
||||
// set columns
|
||||
ArrayList<Long> tsList = new ArrayList<>();
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++)
|
||||
tsList.add(current + j);
|
||||
pstmt.setTimestamp(0, tsList);
|
||||
|
||||
ArrayList<byte[]> f1List = new ArrayList<>();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
f1List.add(listGeo.get(i - 1));
|
||||
}
|
||||
pstmt.setGeometry(1, f1List, BINARY_COLUMN_SIZE);
|
||||
|
||||
// add column
|
||||
pstmt.columnDataAddBatch();
|
||||
}
|
||||
// execute
|
||||
pstmt.columnDataExecuteBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
// ANCHOR_END: para_bind
|
||||
|
|
|
@ -11,11 +11,30 @@ public class WSParameterBindingFullDemo {
|
|||
private static final Random random = new Random(System.currentTimeMillis());
|
||||
private static final int BINARY_COLUMN_SIZE = 30;
|
||||
private static final String[] schemaList = {
|
||||
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
|
||||
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
|
||||
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
|
||||
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
|
||||
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))"
|
||||
"drop database if exists example_all_type_stmt",
|
||||
"CREATE DATABASE IF NOT EXISTS example_all_type_stmt",
|
||||
"USE example_all_type_stmt",
|
||||
"CREATE STABLE IF NOT EXISTS stb_json (" +
|
||||
"ts TIMESTAMP, " +
|
||||
"int_col INT) " +
|
||||
"tags (json_tag json)",
|
||||
"CREATE STABLE IF NOT EXISTS stb (" +
|
||||
"ts TIMESTAMP, " +
|
||||
"int_col INT, " +
|
||||
"double_col DOUBLE, " +
|
||||
"bool_col BOOL, " +
|
||||
"binary_col BINARY(100), " +
|
||||
"nchar_col NCHAR(100), " +
|
||||
"varbinary_col VARBINARY(100), " +
|
||||
"geometry_col GEOMETRY(100)) " +
|
||||
"tags (" +
|
||||
"int_tag INT, " +
|
||||
"double_tag DOUBLE, " +
|
||||
"bool_tag BOOL, " +
|
||||
"binary_tag BINARY(100), " +
|
||||
"nchar_tag NCHAR(100), " +
|
||||
"varbinary_tag VARBINARY(100), " +
|
||||
"geometry_tag GEOMETRY(100))"
|
||||
};
|
||||
private static final int numOfSubTable = 10, numOfRow = 10;
|
||||
|
||||
|
@ -27,153 +46,91 @@ public class WSParameterBindingFullDemo {
|
|||
|
||||
init(conn);
|
||||
|
||||
bindInteger(conn);
|
||||
stmtJsonTag(conn);
|
||||
|
||||
bindFloat(conn);
|
||||
|
||||
bindBoolean(conn);
|
||||
|
||||
bindBytes(conn);
|
||||
|
||||
bindString(conn);
|
||||
stmtAll(conn);
|
||||
|
||||
} catch (SQLException ex) {
|
||||
// handle any errors, please refer to the JDBC specifications for detailed exceptions info
|
||||
System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage());
|
||||
System.out.println("Failed to insert data using stmt, ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage());
|
||||
throw ex;
|
||||
} catch (Exception ex){
|
||||
System.out.println("Failed to insert to table meters using stmt, url: " + jdbcUrl + "; ErrMessage: " + ex.getMessage());
|
||||
} catch (Exception ex) {
|
||||
System.out.println("Failed to insert data using stmt, ErrMessage: " + ex.getMessage());
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
||||
private static void init(Connection conn) throws SQLException {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("drop database if exists test_ws_parabind");
|
||||
stmt.execute("create database if not exists test_ws_parabind");
|
||||
stmt.execute("use test_ws_parabind");
|
||||
for (int i = 0; i < schemaList.length; i++) {
|
||||
stmt.execute(schemaList[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindInteger(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
|
||||
private static void stmtJsonTag(Connection conn) throws SQLException {
|
||||
String sql = "INSERT INTO ? using stb_json tags(?) VALUES (?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t1_" + i);
|
||||
pstmt.setTableName("ntb_json_" + i);
|
||||
// set tags
|
||||
pstmt.setTagByte(1, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
|
||||
pstmt.setTagShort(2, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
|
||||
pstmt.setTagInt(3, random.nextInt(Integer.MAX_VALUE));
|
||||
pstmt.setTagLong(4, random.nextLong());
|
||||
pstmt.setTagJson(1, "{\"device\":\"device_" + i + "\"}");
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setByte(2, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
|
||||
pstmt.setShort(3, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
|
||||
pstmt.setInt(4, random.nextInt(Integer.MAX_VALUE));
|
||||
pstmt.setLong(5, random.nextLong());
|
||||
pstmt.setInt(2, j);
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
System.out.println("Successfully inserted rows to example_all_type_stmt.ntb_json");
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindFloat(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)";
|
||||
|
||||
try(TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t2_" + i);
|
||||
// set tags
|
||||
pstmt.setTagFloat(1, random.nextFloat());
|
||||
pstmt.setTagDouble(2, random.nextDouble());
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setFloat(2, random.nextFloat());
|
||||
pstmt.setDouble(3, random.nextDouble());
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindBoolean(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable3 tags(?) values(?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t3_" + i);
|
||||
// set tags
|
||||
pstmt.setTagBoolean(1, random.nextBoolean());
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setBoolean(2, random.nextBoolean());
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void bindBytes(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable4 tags(?) values(?,?)";
|
||||
private static void stmtAll(Connection conn) throws SQLException {
|
||||
String sql = "INSERT INTO ? using stb tags(?,?,?,?,?,?,?) VALUES (?,?,?,?,?,?,?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t4_" + i);
|
||||
// set tags
|
||||
pstmt.setTagString(1, new String("abc"));
|
||||
// set table name
|
||||
pstmt.setTableName("ntb");
|
||||
// set tags
|
||||
pstmt.setTagInt(1, 1);
|
||||
pstmt.setTagDouble(2, 1.1);
|
||||
pstmt.setTagBoolean(3, true);
|
||||
pstmt.setTagString(4, "binary_value");
|
||||
pstmt.setTagNString(5, "nchar_value");
|
||||
pstmt.setTagVarbinary(6, new byte[]{(byte) 0x98, (byte) 0xf4, 0x6e});
|
||||
pstmt.setTagGeometry(7, new byte[]{
|
||||
0x01, 0x01, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59,
|
||||
0x40, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59, 0x40});
|
||||
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(1, new Timestamp(current + j));
|
||||
pstmt.setString(2, "abc");
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
long current = System.currentTimeMillis();
|
||||
|
||||
private static void bindString(Connection conn) throws SQLException {
|
||||
String sql = "insert into ? using stable5 tags(?) values(?,?)";
|
||||
|
||||
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
|
||||
|
||||
for (int i = 1; i <= numOfSubTable; i++) {
|
||||
// set table name
|
||||
pstmt.setTableName("t5_" + i);
|
||||
// set tags
|
||||
pstmt.setTagNString(1, "California.SanFrancisco");
|
||||
|
||||
// set columns
|
||||
long current = System.currentTimeMillis();
|
||||
for (int j = 0; j < numOfRow; j++) {
|
||||
pstmt.setTimestamp(0, new Timestamp(current + j));
|
||||
pstmt.setNString(1, "California.SanFrancisco");
|
||||
pstmt.addBatch();
|
||||
}
|
||||
pstmt.executeBatch();
|
||||
}
|
||||
pstmt.setTimestamp(1, new Timestamp(current));
|
||||
pstmt.setInt(2, 1);
|
||||
pstmt.setDouble(3, 1.1);
|
||||
pstmt.setBoolean(4, true);
|
||||
pstmt.setString(5, "binary_value");
|
||||
pstmt.setNString(6, "nchar_value");
|
||||
pstmt.setVarbinary(7, new byte[]{(byte) 0x98, (byte) 0xf4, 0x6e});
|
||||
pstmt.setGeometry(8, new byte[]{
|
||||
0x01, 0x01, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59,
|
||||
0x40, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x59, 0x40});
|
||||
pstmt.addBatch();
|
||||
pstmt.executeBatch();
|
||||
System.out.println("Successfully inserted rows to example_all_type_stmt.ntb");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,36 +50,68 @@ public class TestAll {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testRestInsert() throws SQLException {
|
||||
dropDB("power");
|
||||
RestInsertExample.main(args);
|
||||
RestQueryExample.main(args);
|
||||
public void testWsConnect() throws Exception {
|
||||
WSConnectExample.main(args);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStmtInsert() throws SQLException {
|
||||
public void testBase() throws Exception {
|
||||
JdbcCreatDBDemo.main(args);
|
||||
JdbcInsertDataDemo.main(args);
|
||||
JdbcQueryDemo.main(args);
|
||||
|
||||
dropDB("power");
|
||||
StmtInsertExample.main(args);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSubscribe() {
|
||||
public void testWsSchemaless() throws Exception {
|
||||
dropDB("power");
|
||||
SchemalessWsTest.main(args);
|
||||
}
|
||||
@Test
|
||||
public void testJniSchemaless() throws Exception {
|
||||
dropDB("power");
|
||||
SchemalessJniTest.main(args);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testJniStmtBasic() throws Exception {
|
||||
dropDB("power");
|
||||
ParameterBindingBasicDemo.main(args);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testJniStmtFull() throws Exception {
|
||||
dropDB("power");
|
||||
ParameterBindingFullDemo.main(args);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWsStmtBasic() throws Exception {
|
||||
dropDB("power");
|
||||
WSParameterBindingBasicDemo.main(args);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWsStmtFull() throws Exception {
|
||||
dropDB("power");
|
||||
WSParameterBindingFullDemo.main(args);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConsumer() throws Exception {
|
||||
dropDB("power");
|
||||
SubscribeDemo.main(args);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testSubscribeOverWebsocket() {
|
||||
WebsocketSubscribeDemo.main(args);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSchemaless() throws SQLException {
|
||||
LineProtocolExample.main(args);
|
||||
TelnetLineProtocolExample.main(args);
|
||||
// for json protocol, tags may be double type. but for telnet protocol tag must be nchar type.
|
||||
// To avoid type mismatch, we delete database test.
|
||||
dropDB("test");
|
||||
JSONProtocolExample.main(args);
|
||||
}
|
||||
// @Test
|
||||
// public void testSubscribeJni() throws SQLException, InterruptedException {
|
||||
// dropDB("power");
|
||||
// ConsumerLoopFull.main(args);
|
||||
// }
|
||||
// @Test
|
||||
// public void testSubscribeWs() throws SQLException, InterruptedException {
|
||||
// dropDB("power");
|
||||
// WsConsumerLoopFull.main(args);
|
||||
// }
|
||||
}
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
const taos = require("@tdengine/websocket");
|
||||
|
||||
let dsn = 'ws://localhost:6041';
|
||||
async function json_tag_example() {
|
||||
let wsSql = null;
|
||||
try {
|
||||
let conf = new taos.WSConfig(dsn);
|
||||
conf.setUser('root');
|
||||
conf.setPwd('taosdata');
|
||||
wsSql = await taos.sqlConnect(conf);
|
||||
console.log("Connected to " + dsn + " successfully.");
|
||||
|
||||
// create database
|
||||
await wsSql.exec('CREATE DATABASE IF NOT EXISTS example_json_tag');
|
||||
console.log("Create database example_json_tag successfully.");
|
||||
|
||||
// create table
|
||||
await wsSql.exec('create table if not exists example_json_tag.stb (ts timestamp, v int) tags(jt json)');
|
||||
|
||||
console.log("Create stable example_json_tag.stb successfully");
|
||||
|
||||
let insertQuery = 'INSERT INTO ' +
|
||||
'example_json_tag.tb1 USING example_json_tag.stb TAGS(\'{"name":"value"}\') ' +
|
||||
"values(now, 1) ";
|
||||
taosResult = await wsSql.exec(insertQuery);
|
||||
console.log("Successfully inserted " + taosResult.getAffectRows() + " rows to example_json_tag.stb.");
|
||||
|
||||
let sql = 'SELECT ts, v, jt FROM example_json_tag.stb limit 100';
|
||||
wsRows = await wsSql.query(sql);
|
||||
while (await wsRows.next()) {
|
||||
let row = wsRows.getData();
|
||||
console.log('ts: ' + row[0] + ', v: ' + row[1] + ', jt: ' + row[2]);
|
||||
}
|
||||
|
||||
} catch (err) {
|
||||
console.error(`Failed to create database example_json_tag or stable stb, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
||||
} finally {
|
||||
if (wsSql) {
|
||||
await wsSql.close();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
async function all_type_example() {
|
||||
let wsSql = null;
|
||||
try {
|
||||
let conf = new taos.WSConfig(dsn);
|
||||
conf.setUser('root');
|
||||
conf.setPwd('taosdata');
|
||||
wsSql = await taos.sqlConnect(conf);
|
||||
console.log("Connected to " + dsn + " successfully.");
|
||||
|
||||
// create database
|
||||
await wsSql.exec('CREATE DATABASE IF NOT EXISTS all_type_example');
|
||||
console.log("Create database all_type_example successfully.");
|
||||
|
||||
// create table
|
||||
await wsSql.exec('create table if not exists all_type_example.stb (ts timestamp, ' +
|
||||
'int_col INT, double_col DOUBLE, bool_col BOOL, binary_col BINARY(100),' +
|
||||
'nchar_col NCHAR(100), varbinary_col VARBINARY(100), geometry_col GEOMETRY(100)) ' +
|
||||
'tags(int_tag INT, double_tag DOUBLE, bool_tag BOOL, binary_tag BINARY(100),' +
|
||||
'nchar_tag NCHAR(100), varbinary_tag VARBINARY(100), geometry_tag GEOMETRY(100));');
|
||||
|
||||
console.log("Create stable all_type_example.stb successfully");
|
||||
|
||||
let insertQuery = "INSERT INTO all_type_example.tb1 using all_type_example.stb "
|
||||
+ "tags(1, 1.1, true, 'binary_value', 'nchar_value', '\\x98f46e', 'POINT(100 100)') "
|
||||
+ "values(now, 1, 1.1, true, 'binary_value', 'nchar_value', '\\x98f46e', 'POINT(100 100)')";
|
||||
taosResult = await wsSql.exec(insertQuery);
|
||||
console.log("Successfully inserted " + taosResult.getAffectRows() + " rows to all_type_example.stb.");
|
||||
|
||||
let sql = 'SELECT * FROM all_type_example.stb limit 100';
|
||||
let wsRows = await wsSql.query(sql);
|
||||
let meta = wsRows.getMeta();
|
||||
console.log("wsRow:meta:=>", meta);
|
||||
while (await wsRows.next()) {
|
||||
let row = wsRows.getData();
|
||||
console.log(row);
|
||||
}
|
||||
|
||||
} catch (err) {
|
||||
console.error(`Failed to create database all_type_example or stable stb, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
||||
} finally {
|
||||
if (wsSql) {
|
||||
await wsSql.close();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
async function test() {
|
||||
await json_tag_example()
|
||||
await all_type_example()
|
||||
taos.destroy();
|
||||
}
|
||||
|
||||
test()
|
|
@ -0,0 +1,149 @@
|
|||
const taos = require("@tdengine/websocket");
|
||||
|
||||
|
||||
let dsn = 'ws://localhost:6041';
|
||||
async function json_tag_example() {
|
||||
let wsSql = null;
|
||||
try {
|
||||
let conf = new taos.WSConfig(dsn);
|
||||
conf.setUser('root');
|
||||
conf.setPwd('taosdata');
|
||||
wsSql = await taos.sqlConnect(conf);
|
||||
console.log("Connected to " + dsn + " successfully.");
|
||||
|
||||
// create database
|
||||
await wsSql.exec('CREATE DATABASE IF NOT EXISTS example_json_tag');
|
||||
console.log("Create database example_json_tag successfully.");
|
||||
|
||||
await wsSql.exec('use example_json_tag');
|
||||
|
||||
// create table
|
||||
await wsSql.exec('create table if not exists stb (ts timestamp, v int) tags(jt json)');
|
||||
|
||||
console.log("Create stable example_json_tag.stb successfully");
|
||||
|
||||
let stmt = await wsSql.stmtInit();
|
||||
await stmt.prepare("INSERT INTO ? using stb tags(?) VALUES (?,?)");
|
||||
await stmt.setTableName(`tb1`);
|
||||
let tagParams = stmt.newStmtParam();
|
||||
tagParams.setJson(['{"name":"value"}'])
|
||||
await stmt.setTags(tagParams);
|
||||
let bindParams = stmt.newStmtParam();
|
||||
const currentMillis = new Date().getTime();
|
||||
bindParams.setTimestamp([currentMillis]);
|
||||
bindParams.setInt([1]);
|
||||
await stmt.bind(bindParams);
|
||||
await stmt.batch();
|
||||
await stmt.exec();
|
||||
await stmt.close();
|
||||
|
||||
let sql = 'SELECT ts, v, jt FROM example_json_tag.stb limit 100';
|
||||
wsRows = await wsSql.query(sql);
|
||||
while (await wsRows.next()) {
|
||||
let row = wsRows.getData();
|
||||
console.log('ts: ' + row[0] + ', v: ' + row[1] + ', jt: ' + row[2]);
|
||||
}
|
||||
|
||||
} catch (err) {
|
||||
console.error(`Failed to create database example_json_tag or stable stb, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
||||
} finally {
|
||||
if (wsSql) {
|
||||
await wsSql.close();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
async function all_type_example() {
|
||||
let wsSql = null;
|
||||
let stmt = null;
|
||||
try {
|
||||
let conf = new taos.WSConfig(dsn);
|
||||
conf.setUser('root');
|
||||
conf.setPwd('taosdata');
|
||||
wsSql = await taos.sqlConnect(conf);
|
||||
console.log("Connected to " + dsn + " successfully.");
|
||||
|
||||
// create database
|
||||
await wsSql.exec('CREATE DATABASE IF NOT EXISTS all_type_example');
|
||||
console.log("Create database all_type_example successfully.");
|
||||
|
||||
await wsSql.exec('use all_type_example');
|
||||
|
||||
// create table
|
||||
await wsSql.exec('create table if not exists stb (ts timestamp, ' +
|
||||
'int_col INT, double_col DOUBLE, bool_col BOOL, binary_col BINARY(100),' +
|
||||
'nchar_col NCHAR(100), varbinary_col VARBINARY(100), geometry_col GEOMETRY(100)) ' +
|
||||
'tags(int_tag INT, double_tag DOUBLE, bool_tag BOOL, binary_tag BINARY(100),' +
|
||||
'nchar_tag NCHAR(100), varbinary_tag VARBINARY(100), geometry_tag GEOMETRY(100));');
|
||||
|
||||
console.log("Create stable all_type_example.stb successfully");
|
||||
|
||||
let geometryData = new Uint8Array([0x01,0x01,0x00,0x00,0x00,0x00,0x00,0x00,
|
||||
0x00,0x00,0x00,0x59,0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x59,0x40,]).buffer;
|
||||
|
||||
const encoder = new TextEncoder();
|
||||
let vbData = encoder.encode(`Hello, world!`).buffer;
|
||||
|
||||
stmt = await wsSql.stmtInit();
|
||||
await stmt.prepare("INSERT INTO ? using stb tags(?,?,?,?,?,?,?) VALUES (?,?,?,?,?,?,?,?)");
|
||||
await stmt.setTableName(`tb1`);
|
||||
let tagParams = stmt.newStmtParam();
|
||||
tagParams.setInt([1]);
|
||||
tagParams.setDouble([1.1]);
|
||||
tagParams.setBoolean([true]);
|
||||
tagParams.setVarchar(["hello"]);
|
||||
tagParams.setNchar(["stmt"]);
|
||||
tagParams.setGeometry([geometryData]);
|
||||
tagParams.setVarBinary([vbData]);
|
||||
await stmt.setTags(tagParams);
|
||||
|
||||
|
||||
let bindParams = stmt.newStmtParam();
|
||||
const currentMillis = new Date().getTime();
|
||||
bindParams.setTimestamp([currentMillis]);
|
||||
bindParams.setInt([1]);
|
||||
bindParams.setDouble([1.1]);
|
||||
bindParams.setBoolean([true]);
|
||||
bindParams.setVarchar(["hello"]);
|
||||
bindParams.setNchar(["stmt"]);
|
||||
bindParams.setGeometry([geometryData]);
|
||||
bindParams.setVarBinary([vbData]);
|
||||
|
||||
await stmt.bind(bindParams);
|
||||
await stmt.batch();
|
||||
await stmt.exec();
|
||||
|
||||
let sql = 'SELECT * FROM all_type_example.stb limit 100';
|
||||
let wsRows = await wsSql.query(sql);
|
||||
let meta = wsRows.getMeta();
|
||||
console.log("wsRow:meta:=>", meta);
|
||||
while (await wsRows.next()) {
|
||||
let row = wsRows.getData();
|
||||
console.log(row);
|
||||
}
|
||||
|
||||
} catch (err) {
|
||||
console.error(`Failed to create database all_type_example or stable stb, ErrCode: ${err.code}, ErrMessage: ${err.message}`);
|
||||
} finally {
|
||||
if (stmt) {
|
||||
await stmt.close();
|
||||
}
|
||||
if (wsSql) {
|
||||
await wsSql.close();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
async function test() {
|
||||
taos.setLevel("debug")
|
||||
await json_tag_example()
|
||||
await all_type_example()
|
||||
taos.destroy();
|
||||
}
|
||||
|
||||
test()
|
||||
|
|
@ -24,13 +24,18 @@ async function createConnect() {
|
|||
async function createDbAndTable() {
|
||||
let wsSql = null;
|
||||
try {
|
||||
wsSql = await createConnect();
|
||||
let conf = new taos.WSConfig(dsn);
|
||||
conf.setUser('root');
|
||||
conf.setPwd('taosdata');
|
||||
conf.setDb('power');
|
||||
wsSql = await taos.sqlConnect(conf);
|
||||
console.log("Connected to " + dsn + " successfully.");
|
||||
// create database
|
||||
await wsSql.exec('CREATE DATABASE IF NOT EXISTS power');
|
||||
console.log("Create database power successfully.");
|
||||
// create table
|
||||
await wsSql.exec('CREATE STABLE IF NOT EXISTS power.meters ' +
|
||||
'(_ts timestamp, current float, voltage int, phase float) ' +
|
||||
'(ts timestamp, current float, voltage int, phase float) ' +
|
||||
'TAGS (location binary(64), groupId int);');
|
||||
|
||||
console.log("Create stable power.meters successfully");
|
||||
|
|
|
@ -8,6 +8,7 @@ edition = "2021"
|
|||
anyhow = "1"
|
||||
chrono = "0.4"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
tokio = { version = "1", features = ["rt", "macros", "rt-multi-thread"] }
|
||||
log = "0.4"
|
||||
pretty_env_logger = "0.5.0"
|
||||
|
|
|
@ -0,0 +1,121 @@
|
|||
use taos::*;
|
||||
use taos_query::util::hex::hex_string_to_bytes;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let dsn = "taos://";
|
||||
let taos = TaosBuilder::from_dsn(dsn)?.build().await?;
|
||||
|
||||
taos.exec("DROP DATABASE IF EXISTS example_all_type_stmt")
|
||||
.await?;
|
||||
taos.create_database("example_all_type_stmt").await?;
|
||||
taos.use_database("example_all_type_stmt").await?;
|
||||
taos.exec(
|
||||
r#"
|
||||
CREATE STABLE IF NOT EXISTS stb (
|
||||
ts TIMESTAMP,
|
||||
int_col INT,
|
||||
double_col DOUBLE,
|
||||
bool_col BOOL,
|
||||
binary_col BINARY(100),
|
||||
nchar_col NCHAR(100),
|
||||
varbinary_col VARBINARY(100),
|
||||
geometry_col GEOMETRY(100))
|
||||
TAGS (
|
||||
int_tag INT,
|
||||
double_tag DOUBLE,
|
||||
bool_tag BOOL,
|
||||
binary_tag BINARY(100),
|
||||
nchar_tag NCHAR(100))
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut stmt = Stmt::init(&taos).await?;
|
||||
stmt.prepare("INSERT INTO ? using stb tags(?,?,?,?,?) VALUES (?,?,?,?,?,?,?,?)")
|
||||
.await?;
|
||||
|
||||
const NUM_TABLES: usize = 10;
|
||||
const NUM_ROWS: usize = 10;
|
||||
for i in 0..NUM_TABLES {
|
||||
let table_name = format!("d_bind_{}", i);
|
||||
let tags = vec![
|
||||
Value::Int(i as i32),
|
||||
Value::Double(1.1),
|
||||
Value::Bool(true),
|
||||
Value::VarChar("binary_value".into()),
|
||||
Value::NChar("nchar_value".into()),
|
||||
// Value::VarBinary(vec![0x98, 0xf4, 0x6e].into()),
|
||||
// Value::Geometry(
|
||||
// vec![
|
||||
// 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x40,
|
||||
// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x40,
|
||||
// ]
|
||||
// .into(),
|
||||
// ),
|
||||
];
|
||||
|
||||
// set table name and tags for the prepared statement.
|
||||
match stmt.set_tbname_tags(&table_name, &tags).await {
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
eprintln!(
|
||||
"Failed to set table name and tags, table_name:{}, tags:{:?}, ErrMessage: {}",
|
||||
table_name, tags, err
|
||||
);
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
for j in 0..NUM_ROWS {
|
||||
let values = vec![
|
||||
ColumnView::from_millis_timestamp(vec![1648432611249 + j as i64]),
|
||||
ColumnView::from_ints(vec![j as i32]),
|
||||
ColumnView::from_doubles(vec![1.1]),
|
||||
ColumnView::from_bools(vec![true]),
|
||||
ColumnView::from_varchar(vec!["ABC"]),
|
||||
ColumnView::from_nchar(vec!["涛思数据"]),
|
||||
ColumnView::from_bytes(vec![hex_string_to_bytes("123456").to_vec()]),
|
||||
ColumnView::from_geobytes(vec![hex_string_to_bytes(
|
||||
"0101000000000000000000F03F0000000000000040",
|
||||
)
|
||||
.to_vec()]),
|
||||
];
|
||||
// bind values to the prepared statement.
|
||||
match stmt.bind(&values).await {
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
eprintln!(
|
||||
"Failed to bind values, values:{:?}, ErrMessage: {}",
|
||||
values, err
|
||||
);
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match stmt.add_batch().await {
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
eprintln!("Failed to add batch, ErrMessage: {}", err);
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// execute.
|
||||
match stmt.execute().await {
|
||||
Ok(affected_rows) => println!(
|
||||
"Successfully inserted {} rows to example_all_type_stmt.stb.",
|
||||
affected_rows
|
||||
),
|
||||
Err(err) => {
|
||||
eprintln!(
|
||||
"Failed to insert to table stb using stmt, ErrMessage: {}",
|
||||
err
|
||||
);
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
use serde_json::json;
|
||||
use taos::*;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let dsn = "taos://";
|
||||
let taos = TaosBuilder::from_dsn(dsn)?.build().await?;
|
||||
|
||||
taos.exec("DROP DATABASE IF EXISTS example_all_type_stmt")
|
||||
.await?;
|
||||
taos.create_database("example_all_type_stmt").await?;
|
||||
taos.use_database("example_all_type_stmt").await?;
|
||||
taos.exec(
|
||||
r#"
|
||||
CREATE STABLE IF NOT EXISTS stb_json (
|
||||
ts TIMESTAMP,
|
||||
int_col INT)
|
||||
TAGS (
|
||||
json_tag JSON)
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut stmt = Stmt::init(&taos).await?;
|
||||
stmt.prepare("INSERT INTO ? using stb_json tags(?) VALUES (?,?)")
|
||||
.await?;
|
||||
|
||||
const NUM_TABLES: usize = 1;
|
||||
const NUM_ROWS: usize = 1;
|
||||
for i in 0..NUM_TABLES {
|
||||
let table_name = format!("d_bind_{}", i);
|
||||
let json_value: serde_json::Value = json!({
|
||||
"name": "value"
|
||||
});
|
||||
|
||||
dbg!(json_value.to_string());
|
||||
|
||||
let tags = vec![Value::Json(json_value)];
|
||||
|
||||
// set table name and tags for the prepared statement.
|
||||
match stmt.set_tbname_tags(&table_name, &tags).await {
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
eprintln!(
|
||||
"Failed to set table name and tags, table_name:{}, tags:{:?}, ErrMessage: {}",
|
||||
table_name, tags, err
|
||||
);
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
for j in 0..NUM_ROWS {
|
||||
let values = vec![
|
||||
ColumnView::from_millis_timestamp(vec![1648432611249 + j as i64]),
|
||||
ColumnView::from_ints(vec![j as i32]),
|
||||
];
|
||||
// bind values to the prepared statement.
|
||||
match stmt.bind(&values).await {
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
eprintln!(
|
||||
"Failed to bind values, values:{:?}, ErrMessage: {}",
|
||||
values, err
|
||||
);
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match stmt.add_batch().await {
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
eprintln!("Failed to add batch, ErrMessage: {}", err);
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// execute.
|
||||
match stmt.execute().await {
|
||||
Ok(affected_rows) => println!(
|
||||
"Successfully inserted {} rows to example_all_type_stmt.stb_json.",
|
||||
affected_rows
|
||||
),
|
||||
Err(err) => {
|
||||
eprintln!(
|
||||
"Failed to insert to table stb_json using stmt, ErrMessage: {}",
|
||||
err
|
||||
);
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,121 @@
|
|||
use taos::*;
|
||||
use taos_query::util::hex::hex_string_to_bytes;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let dsn = "ws://";
|
||||
let taos = TaosBuilder::from_dsn(dsn)?.build().await?;
|
||||
|
||||
taos.exec("DROP DATABASE IF EXISTS example_all_type_stmt")
|
||||
.await?;
|
||||
taos.create_database("example_all_type_stmt").await?;
|
||||
taos.use_database("example_all_type_stmt").await?;
|
||||
taos.exec(
|
||||
r#"
|
||||
CREATE STABLE IF NOT EXISTS stb (
|
||||
ts TIMESTAMP,
|
||||
int_col INT,
|
||||
double_col DOUBLE,
|
||||
bool_col BOOL,
|
||||
binary_col BINARY(100),
|
||||
nchar_col NCHAR(100),
|
||||
varbinary_col VARBINARY(100),
|
||||
geometry_col GEOMETRY(100))
|
||||
TAGS (
|
||||
int_tag INT,
|
||||
double_tag DOUBLE,
|
||||
bool_tag BOOL,
|
||||
binary_tag BINARY(100),
|
||||
nchar_tag NCHAR(100))
|
||||
"#,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut stmt = Stmt::init(&taos).await?;
|
||||
stmt.prepare("INSERT INTO ? using stb tags(?,?,?,?,?) VALUES (?,?,?,?,?,?,?,?)")
|
||||
.await?;
|
||||
|
||||
const NUM_TABLES: usize = 10;
|
||||
const NUM_ROWS: usize = 10;
|
||||
for i in 0..NUM_TABLES {
|
||||
let table_name = format!("d_bind_{}", i);
|
||||
let tags = vec![
|
||||
Value::Int(i as i32),
|
||||
Value::Double(1.1),
|
||||
Value::Bool(true),
|
||||
Value::VarChar("binary_value".into()),
|
||||
Value::NChar("nchar_value".into()),
|
||||
// Value::VarBinary(vec![0x98, 0xf4, 0x6e].into()),
|
||||
// Value::Geometry(
|
||||
// vec![
|
||||
// 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x40,
|
||||
// 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x40,
|
||||
// ]
|
||||
// .into(),
|
||||
// ),
|
||||
];
|
||||
|
||||
// set table name and tags for the prepared statement.
|
||||
match stmt.set_tbname_tags(&table_name, &tags).await {
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
eprintln!(
|
||||
"Failed to set table name and tags, table_name:{}, tags:{:?}, ErrMessage: {}",
|
||||
table_name, tags, err
|
||||
);
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
for j in 0..NUM_ROWS {
|
||||
let values = vec![
|
||||
ColumnView::from_millis_timestamp(vec![1648432611249 + j as i64]),
|
||||
ColumnView::from_ints(vec![j as i32]),
|
||||
ColumnView::from_doubles(vec![1.1]),
|
||||
ColumnView::from_bools(vec![true]),
|
||||
ColumnView::from_varchar(vec!["ABC"]),
|
||||
ColumnView::from_nchar(vec!["涛思数据"]),
|
||||
ColumnView::from_bytes(vec![hex_string_to_bytes("123456").to_vec()]),
|
||||
ColumnView::from_geobytes(vec![hex_string_to_bytes(
|
||||
"0101000000000000000000F03F0000000000000040",
|
||||
)
|
||||
.to_vec()]),
|
||||
];
|
||||
// bind values to the prepared statement.
|
||||
match stmt.bind(&values).await {
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
eprintln!(
|
||||
"Failed to bind values, values:{:?}, ErrMessage: {}",
|
||||
values, err
|
||||
);
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match stmt.add_batch().await {
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
eprintln!("Failed to add batch, ErrMessage: {}", err);
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// execute.
|
||||
match stmt.execute().await {
|
||||
Ok(affected_rows) => println!(
|
||||
"Successfully inserted {} rows to example_all_type_stmt.stb.",
|
||||
affected_rows
|
||||
),
|
||||
Err(err) => {
|
||||
eprintln!(
|
||||
"Failed to insert to table stb using stmt, ErrMessage: {}",
|
||||
err
|
||||
);
|
||||
return Err(err.into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -8,7 +8,7 @@ TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-se
|
|||
|
||||
TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论如何,请您仔细阅读[快速入门](./basic)一章。
|
||||
|
||||
如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。
|
||||
如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。对 REST API、各种编程语言的连接器(Connector)想做更多详细了解的话,请看[连接器](./reference/connector)一章。
|
||||
|
||||
我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[运维管理](./operation)一章。
|
||||
|
||||
|
@ -16,8 +16,6 @@ TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移
|
|||
|
||||
如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出、配置参数,如何监测 TDengine 是否健康运行,如何提升系统运行的性能,请仔细参考[运维指南](./operation)一章。
|
||||
|
||||
如果你对 TDengine 的产品组件、REST API、各种编程语言的连接器(Connector)想做更多详细了解,请看[参考指南](./reference)一章。
|
||||
|
||||
最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。
|
||||
|
||||
Together, we make a difference!
|
||||
|
|
|
@ -14,9 +14,9 @@ TDengine 是一个高性能、分布式的时序数据库。通过集成的缓
|
|||
|
||||
TDengine OSS 是一个开源的高性能时序数据库,与其他时序数据库相比,它的核心优势在于其集群开源、高性能和云原生架构。而且除了基础的写入、查询和存储功能外,TDengine OSS 还集成了缓存、流式计算和数据订阅等高级功能,这些功能显著简化了系统设计,降低了企业的研发和运营成本。
|
||||
|
||||
在 TDengine OSS 的基础上,企业版 TDengine Enterprise 提供了增强的辅助功能,包括数据的备份恢复、异地容灾、多级存储、视图、权限控制、安全加密、IP 白名单、支持 MQTT、OPC-UA、OPC-DA、PI、Wonderware、Kafka 等各种数据源。这些功能为企业提供了更为全面、安全、可靠和高效的时序数据管理解决方案。
|
||||
在 TDengine OSS 的基础上,企业版 TDengine Enterprise 提供了增强的辅助功能,包括数据的备份恢复、异地容灾、多级存储、视图、权限控制、安全加密、IP 白名单、支持 MQTT、OPC-UA、OPC-DA、PI、Wonderware、Kafka 等各种数据源。这些功能为企业提供了更为全面、安全、可靠和高效的时序数据管理解决方案。更多的细节请看 [TDengine Enterprise](https://www.taosdata.com/tdengine-pro)
|
||||
|
||||
此外,TDengine Cloud 作为一种全托管的云服务,存储与计算分离,分开计费,为企业提供了企业级的工具和服务,彻底解决了运维难题,尤其适合中小规模的用户使用。
|
||||
此外,TDengine Cloud 作为一种全托管的云服务,存储与计算分离,分开计费,为企业提供了企业级的工具和服务,彻底解决了运维难题,尤其适合中小规模的用户使用。更多的细节请看[TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
|
||||
|
||||
## TDengine 主要功能与特性
|
||||
|
||||
|
@ -135,9 +135,3 @@ TDengine 经过特别优化,以适应时间序列数据的独特需求,引
|
|||
- [TDengine 与 InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse 等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf)
|
||||
|
||||
|
||||
## 主要产品
|
||||
|
||||
TDengine 有两个主要产品:TDengine Enterprise (即 TDengine 企业版)和 TDengine Cloud,关于它们的具体定义请参考
|
||||
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
|
||||
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
sidebar_label: Docker
|
||||
title: 通过 Docker 快速体验 TDengine
|
||||
sidebar_label: 用Docker快速体验
|
||||
title: 用 Docker 快速体验 TDengine
|
||||
description: 使用 Docker 快速体验 TDengine 的高效写入和查询
|
||||
---
|
||||
|
||||
|
@ -90,7 +90,7 @@ taosBenchmark 提供了丰富的选项,允许用户自定义测试参数,如
|
|||
taosBenchmark --help
|
||||
```
|
||||
|
||||
有关taosBenchmark 的详细使用方法,请参考[taosBenchmark 参考手册](../../reference/components/taosbenchmark)
|
||||
有关taosBenchmark 的详细使用方法,请参考[taosBenchmark 参考手册](../../reference/tools/taosbenchmark)
|
||||
|
||||
### 体验查询
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
sidebar_label: 安装包
|
||||
title: 使用安装包立即开始
|
||||
sidebar_label: 用安装包快速体验
|
||||
title: 使用安装包快速体验 TDengine
|
||||
description: 使用安装包快速体验 TDengine
|
||||
---
|
||||
|
||||
|
@ -263,7 +263,7 @@ SELECT * FROM t;
|
|||
Query OK, 2 row(s) in set (0.003128s)
|
||||
```
|
||||
|
||||
除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在机器上运行,更多细节请参考 [TDengine 命令行](../../reference/components/taos-cli/)。
|
||||
除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在机器上运行,更多细节请参考 [TDengine 命令行](../../reference/tools/taos-cli/)。
|
||||
|
||||
## 快速体验
|
||||
|
||||
|
@ -286,7 +286,7 @@ taosBenchmark 提供了丰富的选项,允许用户自定义测试参数,如
|
|||
taosBenchmark --help
|
||||
```
|
||||
|
||||
有关taosBenchmark 的详细使用方法,请参考[taosBenchmark 参考手册](../../reference/components/taosbenchmark)
|
||||
有关taosBenchmark 的详细使用方法,请参考[taosBenchmark 参考手册](../../reference/tools/taosbenchmark)
|
||||
|
||||
### 体验查询
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
sidebar_label: 云服务
|
||||
title: 通过 云服务 快速体验 TDengine
|
||||
sidebar_label: 用云服务快速体验
|
||||
title: 通过云服务 快速体验 TDengine
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
title: 立即开始
|
||||
title: 快速体验
|
||||
description: '快速设置 TDengine 环境并体验其高效写入和查询'
|
||||
---
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: 快速入门
|
||||
description: 'TDengine 基本功能'
|
||||
title: 基础功能
|
||||
description: 'TDengine 基础功能'
|
||||
---
|
||||
|
||||
本章主要介绍 TDengine 的数据模型以及写入和查询功能。
|
||||
|
|
|
@ -72,15 +72,30 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T
|
|||
|
||||
taosX 可以使用 JSON 提取器解析数据,并允许用户在数据库中指定数据模型,包括,指定表名称和超级表名,设置普通列和标签列等。
|
||||
|
||||
|
||||
#### 6.1 解析
|
||||
|
||||
有三种获取示例数据的方法:
|
||||
|
||||
点击 **从服务器检索** 按钮,从 MQTT 获取示例数据。
|
||||
|
||||
点击 **文件上传** 按钮,上传 CSV 文件,获取示例数据。
|
||||
|
||||
在 **消息体** 中填写 MQTT 消息体中的示例数据,例如:`{"id": 1, "message": "hello-word"}{"id": 2, "message": "hello-word"}`。之后会使用这条示例数据来配置提取和过滤条件。
|
||||
在 **消息体** 中填写 MQTT 消息体中的示例数据。
|
||||
|
||||
json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解析一下数据:
|
||||
|
||||
``` json
|
||||
{"id": 1, "message": "hello-word"}
|
||||
{"id": 2, "message": "hello-word"}
|
||||
```
|
||||
|
||||
或者
|
||||
|
||||
``` json
|
||||
[{"id": 1, "message": "hello-word"},{"id": 2, "message": "hello-word"}]
|
||||
```
|
||||
|
||||
解析结果如下所示:
|
||||
|
||||

|
||||
|
||||
|
|
|
@ -80,13 +80,29 @@ TDengine 可以高效地从 Kafka 读取数据并将其写入 TDengine,以实
|
|||
在 **Payload 解析** 区域填写 Payload 解析相关的配置参数。
|
||||
|
||||
#### 7.1 解析
|
||||
|
||||
有三种获取示例数据的方法:
|
||||
|
||||
点击 **从服务器检索** 按钮,从 Kafka 获取示例数据。
|
||||
|
||||
点击 **文件上传** 按钮,上传 CSV 文件,获取示例数据。
|
||||
|
||||
在 **消息体** 中填写 Kafka 消息体中的示例数据,例如:`{"id": 1, "message": "hello-word"}{"id": 2, "message": "hello-word"}`。之后会使用这条示例数据来配置提取和过滤条件。
|
||||
在 **消息体** 中填写 Kafka 消息体中的示例数据。
|
||||
|
||||
json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解析一下数据:
|
||||
|
||||
``` json
|
||||
{"id": 1, "message": "hello-word"}
|
||||
{"id": 2, "message": "hello-word"}
|
||||
```
|
||||
|
||||
或者
|
||||
|
||||
``` json
|
||||
[{"id": 1, "message": "hello-word"},{"id": 2, "message": "hello-word"}]
|
||||
```
|
||||
|
||||
解析结果如下所示:
|
||||
|
||||

|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ InfluxDB 是一种流行的开源时间序列数据库,它针对处理大量
|
|||
|
||||
在数据写入页面中点击左上角的 **+新增数据源** 按钮进入新增数据源页面,如下图所示:
|
||||
|
||||

|
||||

|
||||
|
||||
### 2. 配置基本信息
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ OpenTSDB 是一个架构在 HBase 系统之上的实时监控信息收集和展
|
|||
|
||||
在数据写入页面中点击左上角的 **+新增数据源** 按钮进入新增数据源页面,如下图所示:
|
||||
|
||||

|
||||

|
||||
|
||||
### 2. 配置基本信息
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ MySQL 是最流行的关系型数据库之一。很多系统都曾经或正在
|
|||
|
||||
在数据写入页面中点击左上角的 **+新增数据源** 按钮进入新增数据源页面,如下图所示:
|
||||
|
||||

|
||||

|
||||
|
||||
### 2. 配置基本信息
|
||||
|
||||
|
@ -56,11 +56,19 @@ MySQL 是最流行的关系型数据库之一。很多系统都曾经或正在
|
|||
|
||||
### 6. 配置 SQL 查询
|
||||
|
||||
**子表字段** 用于拆分子表的字段,它是一条 select distinct 的 SQL 语句,查询指定字段组合的非重复项,通常与 transform 中的 tag 相对应:
|
||||
> 此项配置主要为了解决数据迁移乱序问题,需要结合**SQL 模板**共同使用,否则不能达到预期效果,使用示例如下:
|
||||
> 1. 子表字段填写语句 `select distinct col_name1, col_name2 from table`,它表示使用源表中的字段 col_name1 与 col_name2 拆分目标超级表的子表
|
||||
> 2. 在**SQL 模板**中添加子表字段占位符,例如 `select * from table where ts >= ${start} and ts < ${end} and ${col_name1} and ${col_name2}` 中的 `${col_name1} and ${col_name2}` 部分
|
||||
> 3. 在 **transform** 中配置 `col_name1` 与 `col_name2` 两个 tag 映射
|
||||
|
||||
**SQL 模板** 用于查询的 SQL 语句模板,SQL 语句中必须包含时间范围条件,且开始时间和结束时间必须成对出现。SQL 语句模板中定义的时间范围由源数据库中的某个代表时间的列和下面定义的占位符组成。
|
||||
> SQL使用不同的占位符表示不同的时间格式要求,具体有以下占位符格式:
|
||||
> 1. `${start}`、`${end}`:表示 RFC3339 格式时间戳,如: 2024-03-14T08:00:00+0800
|
||||
> 2. `${start_no_tz}`、`${end_no_tz}`: 表示不带时区的 RFC3339 字符串:2024-03-14T08:00:00
|
||||
> 3. `${start_date}`、`${end_date}`:表示仅日期,如:2024-03-14
|
||||
>
|
||||
> 为了解决迁移数据乱序的问题,应在查询语句中添加排序条件,例如 `order by ts asc`。
|
||||
|
||||
**起始时间** 迁移数据的起始时间,此项为必填字段。
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ TDengine 可以高效地从 PostgreSQL 读取数据并将其写入 TDengine,
|
|||
|
||||
在数据写入页面中点击左上角的 **+新增数据源** 按钮进入新增数据源页面,如下图所示:
|
||||
|
||||

|
||||

|
||||
|
||||
### 2. 配置基本信息
|
||||
|
||||
|
@ -57,11 +57,19 @@ TDengine 可以高效地从 PostgreSQL 读取数据并将其写入 TDengine,
|
|||
|
||||
### 6. 配置 SQL 查询
|
||||
|
||||
**子表字段** 用于拆分子表的字段,它是一条 select distinct 的 SQL 语句,查询指定字段组合的非重复项,通常与 transform 中的 tag 相对应:
|
||||
> 此项配置主要为了解决数据迁移乱序问题,需要结合**SQL 模板**共同使用,否则不能达到预期效果,使用示例如下:
|
||||
> 1. 子表字段填写语句 `select distinct col_name1, col_name2 from table`,它表示使用源表中的字段 col_name1 与 col_name2 拆分目标超级表的子表
|
||||
> 2. 在**SQL 模板**中添加子表字段占位符,例如 `select * from table where ts >= ${start} and ts < ${end} and ${col_name1} and ${col_name2}` 中的 `${col_name1} and ${col_name2}` 部分
|
||||
> 3. 在 **transform** 中配置 `col_name1` 与 `col_name2` 两个 tag 映射
|
||||
|
||||
**SQL 模板** 用于查询的 SQL 语句模板,SQL 语句中必须包含时间范围条件,且开始时间和结束时间必须成对出现。SQL 语句模板中定义的时间范围由源数据库中的某个代表时间的列和下面定义的占位符组成。
|
||||
> SQL使用不同的占位符表示不同的时间格式要求,具体有以下占位符格式:
|
||||
> 1. `${start}`、`${end}`:表示 RFC3339 格式时间戳,如: 2024-03-14T08:00:00+0800
|
||||
> 2. `${start_no_tz}`、`${end_no_tz}`: 表示不带时区的 RFC3339 字符串:2024-03-14T08:00:00
|
||||
> 3. `${start_date}`、`${end_date}`:表示仅日期,如:2024-03-14
|
||||
>
|
||||
> 为了解决迁移数据乱序的问题,应在查询语句中添加排序条件,例如 `order by ts asc`。
|
||||
|
||||
**起始时间** 迁移数据的起始时间,此项为必填字段。
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ TDengine 可以高效地从 Oracle 读取数据并将其写入 TDengine,以实
|
|||
|
||||
在数据写入页面中点击左上角的 **+新增数据源** 按钮进入新增数据源页面,如下图所示:
|
||||
|
||||

|
||||

|
||||
|
||||
### 2. 配置基本信息
|
||||
|
||||
|
@ -49,11 +49,19 @@ TDengine 可以高效地从 Oracle 读取数据并将其写入 TDengine,以实
|
|||
|
||||
### 5. 配置 SQL 查询
|
||||
|
||||
**子表字段** 用于拆分子表的字段,它是一条 select distinct 的 SQL 语句,查询指定字段组合的非重复项,通常与 transform 中的 tag 相对应:
|
||||
> 此项配置主要为了解决数据迁移乱序问题,需要结合**SQL 模板**共同使用,否则不能达到预期效果,使用示例如下:
|
||||
> 1. 子表字段填写语句 `select distinct col_name1, col_name2 from table`,它表示使用源表中的字段 col_name1 与 col_name2 拆分目标超级表的子表
|
||||
> 2. 在**SQL 模板**中添加子表字段占位符,例如 `select * from table where ts >= ${start} and ts < ${end} and ${col_name1} and ${col_name2}` 中的 `${col_name1} and ${col_name2}` 部分
|
||||
> 3. 在 **transform** 中配置 `col_name1` 与 `col_name2` 两个 tag 映射
|
||||
|
||||
**SQL 模板** 用于查询的 SQL 语句模板,SQL 语句中必须包含时间范围条件,且开始时间和结束时间必须成对出现。SQL 语句模板中定义的时间范围由源数据库中的某个代表时间的列和下面定义的占位符组成。
|
||||
> SQL使用不同的占位符表示不同的时间格式要求,具体有以下占位符格式:
|
||||
> 1. `${start}`、`${end}`:表示 RFC3339 格式时间戳,如: 2024-03-14T08:00:00+0800
|
||||
> 2. `${start_no_tz}`、`${end_no_tz}`: 表示不带时区的 RFC3339 字符串:2024-03-14T08:00:00
|
||||
> 3. `${start_date}`、`${end_date}`:表示仅日期,但 Oracle 中没有纯日期类型,所以它会带零时零分零秒,如:2024-03-14 00:00:00,所以使用 `date <= ${end_date}` 时需要注意,它不能包含 2024-03-14 当天数据
|
||||
>
|
||||
> 为了解决迁移数据乱序的问题,应在查询语句中添加排序条件,例如 `order by ts asc`。
|
||||
|
||||
**起始时间** 迁移数据的起始时间,此项为必填字段。
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ Microsoft SQL Server 是最流行的关系型数据库之一。很多系统都
|
|||
|
||||
在数据写入页面中点击左上角的 **+新增数据源** 按钮进入新增数据源页面,如下图所示:
|
||||
|
||||

|
||||

|
||||
|
||||
### 2. 配置基本信息
|
||||
|
||||
|
@ -61,6 +61,12 @@ Microsoft SQL Server 是最流行的关系型数据库之一。很多系统都
|
|||
|
||||
### 6. 配置 SQL 查询
|
||||
|
||||
**子表字段** 用于拆分子表的字段,它是一条 select distinct 的 SQL 语句,查询指定字段组合的非重复项,通常与 transform 中的 tag 相对应:
|
||||
> 此项配置主要为了解决数据迁移乱序问题,需要结合**SQL 模板**共同使用,否则不能达到预期效果,使用示例如下:
|
||||
> 1. 子表字段填写语句 `select distinct col_name1, col_name2 from table`,它表示使用源表中的字段 col_name1 与 col_name2 拆分目标超级表的子表
|
||||
> 2. 在**SQL 模板**中添加子表字段占位符,例如 `select * from table where ts >= ${start} and ts < ${end} and ${col_name1} and ${col_name2}` 中的 `${col_name1} and ${col_name2}` 部分
|
||||
> 3. 在 **transform** 中配置 `col_name1` 与 `col_name2` 两个 tag 映射
|
||||
|
||||
**SQL 模板** 用于查询的 SQL 语句模板,SQL 语句中必须包含时间范围条件,且开始时间和结束时间必须成对出现。SQL 语句模板中定义的时间范围由源数据库中的某个代表时间的列和下面定义的占位符组成。
|
||||
> SQL使用不同的占位符表示不同的时间格式要求,具体有以下占位符格式:
|
||||
> 1. `${start}`、`${end}`:表示 RFC3339 格式时间戳,如: 2024-03-14T08:00:00+0800
|
||||
|
@ -68,6 +74,8 @@ Microsoft SQL Server 是最流行的关系型数据库之一。很多系统都
|
|||
> 3. `${start_date}`、`${end_date}`:表示仅日期,如:2024-03-14
|
||||
>
|
||||
> 注意:只有 `datetime2` 与 `datetimeoffset` 支持使用 start/end 查询,`datetime` 与 `smalldatetime` 只能使用 start_no_tz/end_no_tz 查询,而 `timestamp` 不能用作查询条件。
|
||||
>
|
||||
> 为了解决迁移数据乱序的问题,应在查询语句中添加排序条件,例如 `order by ts asc`。
|
||||
|
||||
**起始时间** 迁移数据的起始时间,此项为必填字段。
|
||||
|
||||
|
|
|
@ -0,0 +1,135 @@
|
|||
---
|
||||
title: "MongoDB"
|
||||
sidebar_label: "MongoDB"
|
||||
---
|
||||
|
||||
本节讲述如何通过 Explorer 界面创建数据迁移任务,从 MongoDB 迁移数据到当前 TDengine 集群。
|
||||
|
||||
## 功能概述
|
||||
|
||||
MongoDB 是一个介于关系型数据库与非关系型数据库之间的产品,被广泛应用于内容管理系统、移动应用与物联网等众多领域。从 TDengine 企业版 3.3.3.0 开始,TDengine 可以高效地从 MongoDB 读取数据并将其写入 TDengine,以实现历史数据迁移或实时数据同步,解决业务面临的技术痛点。
|
||||
|
||||
## 创建任务
|
||||
|
||||
### 1. 新增数据源
|
||||
|
||||
在数据写入页面中点击左上角的 **+新增数据源** 按钮进入新增数据源页面,如下图所示:
|
||||
|
||||

|
||||
|
||||
### 2. 配置基本信息
|
||||
|
||||
在 **名称** 字段中输入任务名称,例如 `test_mongodb_01` 。
|
||||
|
||||
选择 **类型** 下拉框中的 `MongoDB` ,如下图所示(选择完成后页面中的字段会发生变化)。
|
||||
|
||||
**代理** 是非必填项,如有需要,可以在下拉框中选择指定的代理,也可以先点击右侧的 **+创建新的代理** 按钮创建一个新的代理。
|
||||
|
||||
**目标数据库** 是必填项,可以在下拉框中选择指定的数据库,也可以先点击右侧的 **+创建数据库** 按钮创建一个新的数据库。
|
||||
|
||||

|
||||
|
||||
### 3. 配置连接信息
|
||||
|
||||
在 **连接配置** 区域填写 *`源 MongoDB 数据库的连接信息`*,如下图所示:
|
||||
|
||||

|
||||
|
||||
### 4. 配置认证信息
|
||||
|
||||
**用户** 输入源 MongoDB 数据库的用户,该用户必须在 MongoDB 系统中拥有读取权限。
|
||||
|
||||
**密码** 输入源 MongoDB 数据库中上方用户的登陆密码。
|
||||
|
||||
**认证数据库** MongoDB 中存储用户信息的数据库,默认为 admin。
|
||||
|
||||

|
||||
|
||||
### 5. 配置连接选项
|
||||
|
||||
**应用名称** 设置应用程序名称,用于标识连接的应用程序。
|
||||
|
||||
**SSL 证书** 设置是否使用加密连接,默认关闭,如果开启,需要上传以下两个文件:
|
||||
|
||||
  1. **CA 文件** 上传 SSL 加密的证书授权文件。
|
||||
|
||||
  2. **证书文件** 上传 SSL 加密的证书文件。
|
||||
|
||||

|
||||
|
||||
然后点击 **检查连通性** 按钮,用户可以点击此按钮检查上方填写的信息是否可以正常获取源 MongoDB 数据库的数据。
|
||||
|
||||
### 6. 配置数据查询
|
||||
|
||||
**数据库** MongoDB 中源数据库,可以使用占位符进行动态配置,例如 `database_${Y}`,可用占位符列表见下方表格。
|
||||
|
||||
**集合** MongoDB 中集合,可以使用占位符进行动态配置,例如 `collection_${md}`,可用占位符列表见下方表格。
|
||||
|
||||
|占位符|描述|示例数据|
|
||||
| :-----: | :------------: |:--------:|
|
||||
|Y|完整的公历年表示,零填充的 4 位整数|2024|
|
||||
|y|公历年除以 100,零填充的 2 位整数|24|
|
||||
|M|整数月份(1 - 12)|1|
|
||||
|m|整数月份(01 - 12)|01|
|
||||
|B|月份英文全拼|January|
|
||||
|b|月份英文的缩写(3 个字母)|Jan|
|
||||
|D|日期的数字表示(1 - 31)|1|
|
||||
|d|日期的数字表示(01 - 31)|01|
|
||||
|J|一年中的第几天(1 - 366)|1|
|
||||
|j|一年中的第几天(001 - 366)|001|
|
||||
|F|相当于 `${Y}-${m}-${d}`|2024-01-01|
|
||||
|
||||
**子表字段** 用于拆分子表的字段,通常与 transform 中的 tag 相对应,多个字段使用英文逗号分隔,例如 col_name1,col_name2。
|
||||
此项配置主要为了解决数据迁移乱序问题,需要结合**查询模板**共同使用,否则不能达到预期效果,使用示例如下:
|
||||
1. 配置两个子表字段 `col_name1,col_name2`
|
||||
2. 在**查询模板**中添加子表字段占位符,例如 `{"ddate":{"$gte":${start_datetime},"$lt":${end_datetime}}, ${col_name1}, ${col_name2}}` 中的 `${col_name1}, ${col_name2}` 部分
|
||||
3. 在 **transform** 中配置 `col_name1` 与 `col_name2` 两个 tag 映射
|
||||
|
||||
**查询模板** 用于查询数据的查询语句,JSON格式,语句中必须包含时间范围条件,且开始时间和结束时间必须成对出现。模板中定义的时间范围由源数据库中的某个代表时间的列和下面定义的占位符组成。
|
||||
使用不同的占位符表示不同的时间格式要求,具体有以下占位符格式:
|
||||
1. `${start_datetime}`、`${end_datetime}`:对应后端 datetime 类型字段的筛选,如:`{"ddate":{"$gte":${start_datetime},"$lt":${end_datetime}}}` 将被转换为 `{"ddate":{"$gte":{"$date":"2024-06-01T00:00:00+00:00"},"$lt":{"$date":"2024-07-01T00:00:00+00:00"}}}`
|
||||
2. `${start_timestamp}`、`${end_timestamp}`: 对应后端 timestamp 类型字段的筛选,如:`{"ttime":{"$gte":${start_timestamp},"$lt":${end_timestamp}}}` 将被转换为 `{"ttime":{"$gte":{"$timestamp":{"t":123,"i":456}},"$lt":{"$timestamp":{"t":123,"i":456}}}}`
|
||||
|
||||
**查询排序** 执行查询时的排序条件,JSON格式,它必须符合 MongoDB 排序条件的格式规范,使用示例如下:
|
||||
1. `{"createtime":1}`:MongoDB 查询结果按 createtime 正序返回。
|
||||
2. `{"createdate":1, "createtime":1}`:MongoDB 查询结果按 createdate 正序、createtime 正序返回。
|
||||
|
||||
**起始时间** 迁移数据的起始时间,此项为必填字段。
|
||||
|
||||
**结束时间** 迁移数据的结束时间,可留空。如果设置,则迁移任务执行到结束时间后,任务完成自动停止;如果留空,则持续同步实时数据,任务不会自动停止。
|
||||
|
||||
**查询间隔** 分段查询数据的时间间隔,默认1天。为了避免查询数据量过大,一次数据同步子任务会使用查询间隔分时间段查询数据。
|
||||
|
||||
**延迟时长** 实时同步数据场景中,为了避免延迟写入的数据丢失,每次同步任务会读取延迟时长之前的数据。
|
||||
|
||||

|
||||
|
||||
### 7. 配置数据映射
|
||||
|
||||
在 **Payload 转换** 区域填写数据映射相关的配置参数。
|
||||
|
||||
点击 **从服务器检索** 按钮,从 MongoDB 服务器获取示例数据。
|
||||
|
||||
在 **解析** 中选择 JSON/Regex/UDT 三种规则解析原始消息体,配置完成后点击右侧的 **预览** 按钮可以查看解析的结果。
|
||||
|
||||
在 **从列中提取或拆分** 中填写从消息体中提取或拆分的字段,例如:将 vValue 字段拆分成 `vValue_0` 和 `vValue_1` 这 2 个字段,选择 split 提取器,seperator 填写分割符 `,`, number 填写 2,配置完成后点击右侧的 **预览** 按钮可以查看转换的结果。
|
||||
|
||||
在 **过滤** 中,填写过滤条件,例如:填写 `Value > 0`,则只有 Value 大于 0 的数据才会被写入 TDengine,配置完成后点击右侧的 **预览** 按钮可以查看过滤的结果。
|
||||
|
||||
在 **映射** 中,选择要映射到 TDengine 的超级表,以及映射到超级表的列,配置完成后点击右侧的 **预览** 按钮可以查看映射的结果。
|
||||
|
||||

|
||||
|
||||
### 8. 配置高级选项
|
||||
|
||||
**高级选项** 区域是默认折叠的,点击右侧 `>` 可以展开,如下图所示:
|
||||
|
||||
**最大读取并发数** 数据源连接数或读取线程数限制,当默认参数不满足需要或需要调整资源使用量时修改此参数。
|
||||
|
||||
**批次大小** 单次发送的最大消息数或行数。默认是 10000。
|
||||
|
||||

|
||||
|
||||
### 9. 创建完成
|
||||
|
||||
点击 **提交** 按钮,完成创建 MongoDB 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。
|
|
@ -38,7 +38,6 @@ TDengine Enterprise 配备了一个强大的可视化数据管理工具—taosEx
|
|||
|
||||
下面详细讲解数据转换规则
|
||||
|
||||
|
||||
### 解析
|
||||
|
||||
仅非结构化的数据源需要这个步骤,目前 MQTT 和 Kafka 数据源会使用这个步骤提供的规则来解析非结构化数据,以初步获取结构化数据,即可以以字段描述的行列数据。在 explorer 中您需要提供示例数据和解析规则,来预览解析出以表格呈现的结构化数据。
|
||||
|
@ -53,13 +52,15 @@ TDengine Enterprise 配备了一个强大的可视化数据管理工具—taosEx
|
|||
2. 点击右侧按钮 “从服务器检索” 则从配置的服务器获取示例数据,并追加到示例数据 textarea 中;
|
||||
3. 上传文件,将文件内容追加到示例数据 textarea 中。
|
||||
|
||||
每一条示例数据以回车符结尾。
|
||||
|
||||
#### 解析<a name="parse"></a>
|
||||
|
||||
解析就是通过解析规则,将非结构化字符串解析为结构化数据。消息体的解析规则目前支持 JSON、Regex 和 UDT。
|
||||
|
||||
##### JSON 解析
|
||||
|
||||
如下 JSON 示例数据,可自动解析出字段:`groupid`、`voltage`、`current`、`ts`、`inuse`、`location`。
|
||||
JSON 解析支持 JSONObject 或者 JSONArray。 如下 JSON 示例数据,可自动解析出字段:`groupid`、`voltage`、`current`、`ts`、`inuse`、`location`。
|
||||
|
||||
``` json
|
||||
{"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"}
|
||||
|
@ -67,6 +68,16 @@ TDengine Enterprise 配备了一个强大的可视化数据管理工具—taosEx
|
|||
{"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"}
|
||||
```
|
||||
|
||||
或者
|
||||
|
||||
``` json
|
||||
[{"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"},
|
||||
{"groupid": 170001, "voltage": "220V", "current": 12.2, "ts": "2023-12-18T22:12:02", "inuse": true, "location": "beijing.chaoyang.datun"},
|
||||
{"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"}]
|
||||
```
|
||||
|
||||
后续示例仅以JSONObject 为例说明。
|
||||
|
||||
如下嵌套结构的 JSON 数据,可自动解析出字段`groupid`、`data_voltage`、`data_current`、`ts`、`inuse`、`location_0_province`、`location_0_city`、`location_0_datun`,也可以选择要解析的字段,并设置解析的别名。
|
||||
|
||||
``` json
|
||||
|
|
After Width: | Height: | Size: 91 KiB |
Before Width: | Height: | Size: 32 KiB |
Before Width: | Height: | Size: 32 KiB |
Before Width: | Height: | Size: 32 KiB |
Before Width: | Height: | Size: 32 KiB |
After Width: | Height: | Size: 13 KiB |
After Width: | Height: | Size: 7.4 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 21 KiB |
After Width: | Height: | Size: 37 KiB |
After Width: | Height: | Size: 107 KiB |
After Width: | Height: | Size: 13 KiB |
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 30 KiB |
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 30 KiB |
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 30 KiB |
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 31 KiB |
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
|
@ -89,7 +89,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.3.2</version>
|
||||
<version>3.3.3</version>
|
||||
</dependency>
|
||||
```
|
||||
|
Before Width: | Height: | Size: 210 KiB After Width: | Height: | Size: 210 KiB |
Before Width: | Height: | Size: 283 KiB After Width: | Height: | Size: 283 KiB |
Before Width: | Height: | Size: 302 KiB After Width: | Height: | Size: 302 KiB |
Before Width: | Height: | Size: 7.1 KiB After Width: | Height: | Size: 7.1 KiB |
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 18 KiB |
Before Width: | Height: | Size: 48 KiB After Width: | Height: | Size: 48 KiB |
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 15 KiB |
Before Width: | Height: | Size: 246 KiB |
Before Width: | Height: | Size: 161 KiB After Width: | Height: | Size: 161 KiB |
Before Width: | Height: | Size: 104 KiB After Width: | Height: | Size: 104 KiB |
Before Width: | Height: | Size: 186 KiB After Width: | Height: | Size: 186 KiB |