Merge branch '3.0' of https://github.com/taosdata/TDengine into fix/TD-31212

This commit is contained in:
54liuyao 2024-08-06 08:40:29 +08:00
commit 9b438f3087
805 changed files with 23789 additions and 13839 deletions

View File

@ -172,7 +172,7 @@ npm install @tdengine/rest
Just need to add the reference to [TDengine.Connector](https://www.nuget.org/packages/TDengine.Connector/) in the project configuration file.
```xml title=csharp.csproj {12}
```xml title=csharp.csproj
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>

View File

@ -65,10 +65,16 @@ interp_clause:
RANGE(ts_val [, ts_val]) EVERY(every_val) FILL(fill_mod_and_val)
partition_by_clause:
PARTITION BY expr [, expr] ...
PARTITION BY partition_by_expr [, partition_by_expr] ...
partition_by_expr:
{expr | position | c_alias}
group_by_clause:
GROUP BY expr [, expr] ... HAVING condition
GROUP BY group_by_expr [, group_by_expr] ... HAVING condition
group_by_expr:
{expr | position | c_alias}
order_by_clasue:
ORDER BY order_expr [, order_expr] ...
@ -274,7 +280,13 @@ If you use a GROUP BY clause, the SELECT list can only include the following ite
The GROUP BY clause groups each row of data by the value of the expression following the clause and returns a combined result for each group.
The expressions in a GROUP BY clause can include any column in any table or view. It is not necessary that the expressions appear in the SELECT list.
In the GROUP BY clause, columns from a table or view can be grouped by specifying the column name. These columns do not need to be included in the SELECT list.
You can specify integers in GROUP BY expression to indicate the expressions in the select list used for grouping. For example, 1 indicates the first item in the select list.
You can specify column names in result set to indicate the expressions in the select list used for grouping.
When using position and result set column names for grouping in the GROUP BY clause, the corresponding expressions in the select list must not be aggregate functions.
The GROUP BY clause does not guarantee that the results are ordered. If you want to ensure that grouped data is ordered, use the ORDER BY clause.

View File

@ -13,12 +13,18 @@ int main() {
uint16_t port = 0; // 0 means use the default port
TAOS *taos = taos_connect(host, user, passwd, db, port);
if (taos == NULL) {
int errno = taos_errno(NULL);
char *msg = taos_errstr(NULL);
int errno = taos_errno(NULL);
const char *msg = taos_errstr(NULL);
printf("%d, %s\n", errno, msg);
} else {
printf("connected\n");
taos_close(taos);
printf("failed to connect to server %s, errno: %d, msg: %s\n", host, errno, msg);
taos_cleanup();
return -1;
}
printf("success to connect server %s\n", host);
/* put your code here for read and write */
// close & clean
taos_close(taos);
taos_cleanup();
}

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// TAOS standard API example. The same syntax as MySQL, but only a subset
// to compile: gcc -o create_db_demo create_db_demo.c -ltaos
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "taos.h"
static int DemoCreateDB() {
// ANCHOR: create_db_and_table
const char *ip = "localhost";
const char *user = "root";
const char *password = "taosdata";
// connect
TAOS *taos = taos_connect(ip, user, password, NULL, 0);
if (taos == NULL) {
printf("failed to connect to server %s, reason: %s\n", ip, taos_errstr(NULL));
taos_cleanup();
return -1;
}
printf("success to connect server %s\n", ip);
// create database
TAOS_RES *result = taos_query(taos, "CREATE DATABASE IF NOT EXISTS power");
int code = taos_errno(result);
if (code != 0) {
printf("failed to create database power, reason: %s\n", taos_errstr(result));
taos_close(taos);
taos_cleanup();
return -1;
}
taos_free_result(result);
printf("success to create database power\n");
// use database
result = taos_query(taos, "USE power");
taos_free_result(result);
// create table
const char* sql = "CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))";
result = taos_query(taos, sql);
code = taos_errno(result);
if (code != 0) {
printf("failed to create stable meters, reason: %s\n", taos_errstr(result));
taos_close(taos);
taos_cleanup();
return -1;
}
taos_free_result(result);
printf("success to create table meters\n");
// close & clean
taos_close(taos);
taos_cleanup();
return 0;
// ANCHOR_END: create_db_and_table
}
int main(int argc, char *argv[]) {
return DemoCreateDB();
}

View File

@ -0,0 +1,77 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// TAOS standard API example. The same syntax as MySQL, but only a subset
// to compile: gcc -o insert_data_demo insert_data_demo.c -ltaos
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "taos.h"
static int DemoInsertData() {
// ANCHOR: insert_data
const char *ip = "localhost";
const char *user = "root";
const char *password = "taosdata";
// connect
TAOS *taos = taos_connect(ip, user, password, NULL, 0);
if (taos == NULL) {
printf("failed to connect to server %s, reason: %s\n", ip, taos_errstr(NULL));
taos_cleanup();
return -1;
}
printf("success to connect server %s\n", ip);
// use database
TAOS_RES *result = taos_query(taos, "USE power");
taos_free_result(result);
// insert data, please make sure the database and table are already created
const char* sql = "INSERT INTO " \
"power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " \
"VALUES " \
"(NOW + 1a, 10.30000, 219, 0.31000) " \
"(NOW + 2a, 12.60000, 218, 0.33000) " \
"(NOW + 3a, 12.30000, 221, 0.31000) " \
"power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " \
"VALUES " \
"(NOW + 1a, 10.30000, 218, 0.25000) ";
result = taos_query(taos, sql);
int code = taos_errno(result);
if (code != 0) {
printf("failed to insert data to power.meters, ip: %s, reason: %s\n", ip, taos_errstr(result));
taos_close(taos);
taos_cleanup();
return -1;
}
taos_free_result(result);
// you can check affectedRows here
int rows = taos_affected_rows(result);
printf("success to insert %d rows data to power.meters\n", rows);
// close & clean
taos_close(taos);
taos_cleanup();
return 0;
// ANCHOR_END: insert_data
}
int main(int argc, char *argv[]) {
return DemoInsertData();
}

View File

@ -0,0 +1,84 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// TAOS standard API example. The same syntax as MySQL, but only a subset
// to compile: gcc -o query_data_demo query_data_demo.c -ltaos
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "taos.h"
static int DemoQueryData() {
// ANCHOR: query_data
const char *ip = "localhost";
const char *user = "root";
const char *password = "taosdata";
// connect
TAOS *taos = taos_connect(ip, user, password, NULL, 0);
if (taos == NULL) {
printf("failed to connect to server %s, reason: %s\n", ip, taos_errstr(NULL));
taos_cleanup();
return -1;
}
printf("success to connect server %s\n", ip);
// use database
TAOS_RES *result = taos_query(taos, "USE power");
taos_free_result(result);
// query data, please make sure the database and table are already created
const char* sql = "SELECT ts, current, location FROM power.meters limit 100";
result = taos_query(taos, sql);
int code = taos_errno(result);
if (code != 0) {
printf("failed to query data from power.meters, ip: %s, reason: %s\n", ip, taos_errstr(result));
taos_close(taos);
taos_cleanup();
return -1;
}
TAOS_ROW row = NULL;
int rows = 0;
int num_fields = taos_field_count(result);
TAOS_FIELD *fields = taos_fetch_fields(result);
printf("fields: %d\n", num_fields);
printf("sql: %s, result:\n", sql);
// fetch the records row by row
while ((row = taos_fetch_row(result))) {
char temp[1024] = {0};
rows++;
taos_print_row(temp, row, fields, num_fields);
printf("%s\n", temp);
}
printf("total rows: %d\n", rows);
taos_free_result(result);
printf("success to query data from power.meters\n");
// close & clean
taos_close(taos);
taos_cleanup();
return 0;
// ANCHOR_END: query_data
}
int main(int argc, char *argv[]) {
return DemoQueryData();
}

View File

@ -0,0 +1,123 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// TAOS standard API example. The same syntax as MySQL, but only a subset
// to compile: gcc -o sml_insert_demo sml_insert_demo.c -ltaos
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "taos.h"
static int DemoSmlInsert() {
// ANCHOR: schemaless
const char *ip = "localhost";
const char *user = "root";
const char *password = "taosdata";
// connect
TAOS *taos = taos_connect(ip, user, password, NULL, 0);
if (taos == NULL) {
printf("failed to connect to server %s, reason: %s\n", ip, taos_errstr(NULL));
taos_cleanup();
return -1;
}
printf("success to connect server %s\n", ip);
// create database
TAOS_RES *result = taos_query(taos, "CREATE DATABASE IF NOT EXISTS power");
int code = taos_errno(result);
if (code != 0) {
printf("failed to create database power, reason: %s\n", taos_errstr(result));
taos_close(taos);
taos_cleanup();
return -1;
}
taos_free_result(result);
printf("success to create database power\n");
// use database
result = taos_query(taos, "USE power");
taos_free_result(result);
// schemaless demo data
char * line_demo = "meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639";
char * telnet_demo = "metric_telnet 1707095283260 4 host=host0 interface=eth0";
char * json_demo = "{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
// influxdb line protocol
char *lines[] = {line_demo};
result = taos_schemaless_insert(taos, lines, 1, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS);
if (taos_errno(result) != 0) {
printf("failed to insert schemaless line data, reason: %s\n", taos_errstr(result));
taos_close(taos);
taos_cleanup();
return -1;
}
int rows = taos_affected_rows(result);
printf("success to insert %d rows of schemaless line data\n", rows);
taos_free_result(result);
// opentsdb telnet protocol
char *telnets[] = {telnet_demo};
result = taos_schemaless_insert(taos, telnets, 1, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS);
if (taos_errno(result) != 0) {
printf("failed to insert schemaless telnet data, reason: %s\n", taos_errstr(result));
taos_close(taos);
taos_cleanup();
return -1;
}
rows = taos_affected_rows(result);
printf("success to insert %d rows of schemaless telnet data\n", rows);
taos_free_result(result);
// opentsdb json protocol
char *jsons[1] = {0};
// allocate memory for json data. can not use static memory.
jsons[0] = malloc(1024);
if (jsons[0] == NULL) {
printf("failed to allocate memory\n");
taos_close(taos);
taos_cleanup();
return -1;
}
(void)strncpy(jsons[0], json_demo, 1023);
result = taos_schemaless_insert(taos, jsons, 1, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
if (taos_errno(result) != 0) {
free(jsons[0]);
printf("failed to insert schemaless json data, reason: %s\n", taos_errstr(result));
taos_close(taos);
taos_cleanup();
return -1;
}
free(jsons[0]);
rows = taos_affected_rows(result);
printf("success to insert %d rows of schemaless json data\n", rows);
taos_free_result(result);
// close & clean
taos_close(taos);
taos_cleanup();
return 0;
// ANCHOR_END: schemaless
}
int main(int argc, char *argv[]) {
return DemoSmlInsert();
}

View File

@ -0,0 +1,175 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// TAOS standard API example. The same syntax as MySQL, but only a subset
// to compile: gcc -o stmt_insert_demo stmt_insert_demo.c -ltaos
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "taos.h"
/**
* @brief execute sql only.
*
* @param taos
* @param sql
*/
void executeSQL(TAOS *taos, const char *sql) {
TAOS_RES *res = taos_query(taos, sql);
int code = taos_errno(res);
if (code != 0) {
printf("%s\n", taos_errstr(res));
taos_free_result(res);
taos_close(taos);
exit(EXIT_FAILURE);
}
taos_free_result(res);
}
/**
* @brief check return status and exit program when error occur.
*
* @param stmt
* @param code
* @param msg
*/
void checkErrorCode(TAOS_STMT *stmt, int code, const char *msg) {
if (code != 0) {
printf("%s. code: %d, error: %s\n", msg,code,taos_stmt_errstr(stmt));
taos_stmt_close(stmt);
exit(EXIT_FAILURE);
}
}
typedef struct {
int64_t ts;
float current;
int voltage;
float phase;
} Row;
int num_of_sub_table = 10;
int num_of_row = 10;
/**
* @brief insert data using stmt API
*
* @param taos
*/
void insertData(TAOS *taos) {
// init
TAOS_STMT *stmt = taos_stmt_init(taos);
// prepare
const char *sql = "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)";
int code = taos_stmt_prepare(stmt, sql, 0);
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
for (int i = 1; i <= num_of_sub_table; i++) {
char table_name[20];
sprintf(table_name, "d_bind_%d", i);
char location[20];
sprintf(location, "location_%d", i);
// set table name and tags
TAOS_MULTI_BIND tags[2];
// groupId
tags[0].buffer_type = TSDB_DATA_TYPE_INT;
tags[0].buffer_length = sizeof(int);
tags[0].length = (int32_t *)&tags[0].buffer_length;
tags[0].buffer = &i;
tags[0].is_null = NULL;
tags[0].num = 1;
// location
tags[1].buffer_type = TSDB_DATA_TYPE_BINARY;
tags[1].buffer_length = strlen(location);
tags[1].length =(int32_t *) &tags[1].buffer_length;
tags[1].buffer = location;
tags[1].is_null = NULL;
tags[1].num = 1;
code = taos_stmt_set_tbname_tags(stmt, table_name, tags);
checkErrorCode(stmt, code, "failed to set table name and tags\n");
// insert rows
TAOS_MULTI_BIND params[4];
// ts
params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
params[0].buffer_length = sizeof(int64_t);
params[0].length = (int32_t *)&params[0].buffer_length;
params[0].is_null = NULL;
params[0].num = 1;
// current
params[1].buffer_type = TSDB_DATA_TYPE_FLOAT;
params[1].buffer_length = sizeof(float);
params[1].length = (int32_t *)&params[1].buffer_length;
params[1].is_null = NULL;
params[1].num = 1;
// voltage
params[2].buffer_type = TSDB_DATA_TYPE_INT;
params[2].buffer_length = sizeof(int);
params[2].length = (int32_t *)&params[2].buffer_length;
params[2].is_null = NULL;
params[2].num = 1;
// phase
params[3].buffer_type = TSDB_DATA_TYPE_FLOAT;
params[3].buffer_length = sizeof(float);
params[3].length = (int32_t *)&params[3].buffer_length;
params[3].is_null = NULL;
params[3].num = 1;
for (int j = 0; j < num_of_row; j++) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long milliseconds = tv.tv_sec * 1000LL + tv.tv_usec / 1000; // current timestamp in milliseconds
int64_t ts = milliseconds + j;
float current = (float)rand() / RAND_MAX * 30;
int voltage = rand() % 300;
float phase = (float)rand() / RAND_MAX;
params[0].buffer = &ts;
params[1].buffer = &current;
params[2].buffer = &voltage;
params[3].buffer = &phase;
// bind param
code = taos_stmt_bind_param(stmt, params);
checkErrorCode(stmt, code, "failed to bind param");
}
// add batch
code = taos_stmt_add_batch(stmt);
checkErrorCode(stmt, code, "failed to add batch");
// execute batch
code = taos_stmt_execute(stmt);
checkErrorCode(stmt, code, "failed to exec stmt");
// get affected rows
int affected = taos_stmt_affected_rows_once(stmt);
printf("table %s insert %d rows.\n", table_name, affected);
}
taos_stmt_close(stmt);
}
int main() {
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 6030);
if (taos == NULL) {
printf("failed to connect to server\n");
exit(EXIT_FAILURE);
}
// create database and table
executeSQL(taos, "CREATE DATABASE IF NOT EXISTS power");
executeSQL(taos, "USE power");
executeSQL(taos,
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS "
"(groupId INT, location BINARY(24))");
insertData(taos);
taos_close(taos);
taos_cleanup();
}

412
docs/examples/c/tmq_demo.c Normal file
View File

@ -0,0 +1,412 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <assert.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include "taos.h"
volatile int thread_stop = 0;
static int running = 1;
const char* topic_name = "topic_meters";
void* prepare_data(void* arg) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
if (pConn == NULL) {
return NULL;
}
TAOS_RES* pRes;
int i = 1;
while (!thread_stop) {
char buf[200] = {0};
i++;
snprintf(
buf, sizeof(buf),
"INSERT INTO power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') VALUES (NOW + %da, 10.30000, "
"219, 0.31000)",
i);
pRes = taos_query(pConn, buf);
if (taos_errno(pRes) != 0) {
printf("error in insert data to power.meters, reason:%s\n", taos_errstr(pRes));
}
taos_free_result(pRes);
sleep(1);
}
printf("prepare data thread exit\n");
return NULL;
}
// ANCHOR: msg_process
static int32_t msg_process(TAOS_RES* msg) {
char buf[1024]; // buf to store the row content
int32_t rows = 0;
const char* topicName = tmq_get_topic_name(msg);
const char* dbName = tmq_get_db_name(msg);
int32_t vgroupId = tmq_get_vgroup_id(msg);
printf("topic: %s\n", topicName);
printf("db: %s\n", dbName);
printf("vgroup id: %d\n", vgroupId);
while (1) {
// get one row data from message
TAOS_ROW row = taos_fetch_row(msg);
if (row == NULL) break;
// get the field information
TAOS_FIELD* fields = taos_fetch_fields(msg);
// get the number of fields
int32_t numOfFields = taos_field_count(msg);
// get the precision of the result
int32_t precision = taos_result_precision(msg);
rows++;
// print the row content
if (taos_print_row(buf, row, fields, numOfFields) < 0) {
printf("failed to print row\n");
break;
}
// print the precision and row content to the console
printf("precision: %d, row content: %s\n", precision, buf);
}
return rows;
}
// ANCHOR_END: msg_process
static int32_t init_env() {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
if (pConn == NULL) {
return -1;
}
TAOS_RES* pRes;
// drop database if exists
printf("create database\n");
pRes = taos_query(pConn, "drop topic if exists topic_meters");
if (taos_errno(pRes) != 0) {
printf("error in drop topic_meters, reason:%s\n", taos_errstr(pRes));
}
taos_free_result(pRes);
pRes = taos_query(pConn, "drop database if exists power");
if (taos_errno(pRes) != 0) {
printf("error in drop power, reason:%s\n", taos_errstr(pRes));
}
taos_free_result(pRes);
// create database
pRes = taos_query(pConn, "create database power precision 'ms' WAL_RETENTION_PERIOD 3600");
if (taos_errno(pRes) != 0) {
printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes));
goto END;
}
taos_free_result(pRes);
// create super table
printf("create super table\n");
pRes = taos_query(
pConn,
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS "
"(groupId INT, location BINARY(24))");
if (taos_errno(pRes) != 0) {
printf("failed to create super table meters, reason:%s\n", taos_errstr(pRes));
goto END;
}
taos_free_result(pRes);
taos_close(pConn);
return 0;
END:
taos_free_result(pRes);
taos_close(pConn);
return -1;
}
int32_t create_topic() {
printf("create topic\n");
TAOS_RES* pRes;
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
if (pConn == NULL) {
return -1;
}
pRes = taos_query(pConn, "use power");
if (taos_errno(pRes) != 0) {
printf("error in use tmqdb, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(
pConn,
"CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM meters");
if (taos_errno(pRes) != 0) {
printf("failed to create topic topic_meters, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
taos_close(pConn);
return 0;
}
void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
printf("tmq_commit_cb_print() code: %d, tmq: %p, param: %p\n", code, tmq, param);
}
// ANCHOR: create_consumer_1
tmq_t* build_consumer() {
tmq_conf_res_t code;
tmq_t* tmq = NULL;
// create a configuration object
tmq_conf_t* conf = tmq_conf_new();
// set the configuration parameters
code = tmq_conf_set(conf, "enable.auto.commit", "true");
if (TMQ_CONF_OK != code) {
tmq_conf_destroy(conf);
return NULL;
}
code = tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
if (TMQ_CONF_OK != code) {
tmq_conf_destroy(conf);
return NULL;
}
code = tmq_conf_set(conf, "group.id", "group1");
if (TMQ_CONF_OK != code) {
tmq_conf_destroy(conf);
return NULL;
}
code = tmq_conf_set(conf, "client.id", "client1");
if (TMQ_CONF_OK != code) {
tmq_conf_destroy(conf);
return NULL;
}
code = tmq_conf_set(conf, "td.connect.user", "root");
if (TMQ_CONF_OK != code) {
tmq_conf_destroy(conf);
return NULL;
}
code = tmq_conf_set(conf, "td.connect.pass", "taosdata");
if (TMQ_CONF_OK != code) {
tmq_conf_destroy(conf);
return NULL;
}
code = tmq_conf_set(conf, "auto.offset.reset", "latest");
if (TMQ_CONF_OK != code) {
tmq_conf_destroy(conf);
return NULL;
}
// set the callback function for auto commit
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
// create a consumer object
tmq = tmq_consumer_new(conf, NULL, 0);
_end:
// destroy the configuration object
tmq_conf_destroy(conf);
return tmq;
}
// ANCHOR_END: create_consumer_1
// ANCHOR: build_topic_list
// build a topic list used to subscribe
tmq_list_t* build_topic_list() {
// create a empty topic list
tmq_list_t* topicList = tmq_list_new();
const char* topic_name = "topic_meters";
// append topic name to the list
int32_t code = tmq_list_append(topicList, topic_name);
if (code) {
// if failed, destroy the list and return NULL
tmq_list_destroy(topicList);
return NULL;
}
// if success, return the list
return topicList;
}
// ANCHOR_END: build_topic_list
// ANCHOR: basic_consume_loop
void basic_consume_loop(tmq_t* tmq) {
int32_t totalRows = 0; // total rows consumed
int32_t msgCnt = 0; // total messages consumed
int32_t timeout = 5000; // poll timeout
while (running) {
// poll message from TDengine
TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeout);
if (tmqmsg) {
msgCnt++;
// process the message
totalRows += msg_process(tmqmsg);
// free the message
taos_free_result(tmqmsg);
}
if (msgCnt > 50) {
// consume 50 messages and break
break;
}
}
// print the result: total messages and total rows consumed
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
}
// ANCHOR_END: basic_consume_loop
// ANCHOR: consume_repeatly
void consume_repeatly(tmq_t* tmq) {
int32_t numOfAssignment = 0;
tmq_topic_assignment* pAssign = NULL;
// get the topic assignment
int32_t code = tmq_get_topic_assignment(tmq, topic_name, &pAssign, &numOfAssignment);
if (code != 0 || pAssign == NULL || numOfAssignment == 0) {
fprintf(stderr, "failed to get assignment, reason:%s", tmq_err2str(code));
return;
}
// seek to the earliest offset
for (int32_t i = 0; i < numOfAssignment; ++i) {
tmq_topic_assignment* p = &pAssign[i];
code = tmq_offset_seek(tmq, topic_name, p->vgId, p->begin);
if (code != 0) {
fprintf(stderr, "failed to seek to %d, reason:%s", (int)p->begin, tmq_err2str(code));
}
}
// free the assignment array
tmq_free_assignment(pAssign);
// let's consume the messages again
basic_consume_loop(tmq);
}
// ANCHOR_END: consume_repeatly
// ANCHOR: manual_commit
void manual_commit(tmq_t* tmq) {
int32_t totalRows = 0; // total rows consumed
int32_t msgCnt = 0; // total messages consumed
int32_t timeout = 5000; // poll timeout
while (running) {
// poll message from TDengine
TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeout);
if (tmqmsg) {
msgCnt++;
// process the message
totalRows += msg_process(tmqmsg);
// commit the message
int32_t code = tmq_commit_sync(tmq, tmqmsg);
if (code) {
fprintf(stderr, "Failed to commit message: %s\n", tmq_err2str(code));
// free the message
taos_free_result(tmqmsg);
break;
}
// free the message
taos_free_result(tmqmsg);
}
if (msgCnt > 50) {
// consume 50 messages and break
break;
}
}
// print the result: total messages and total rows consumed
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
}
// ANCHOR_END: manual_commit
int main(int argc, char* argv[]) {
int32_t code;
pthread_t thread_id;
if (init_env() < 0) {
return -1;
}
if (create_topic() < 0) {
return -1;
}
if (pthread_create(&thread_id, NULL, &prepare_data, NULL)) {
fprintf(stderr, "create thread failed\n");
return 1;
}
// ANCHOR: create_consumer_2
tmq_t* tmq = build_consumer();
if (NULL == tmq) {
fprintf(stderr, "build consumer to localhost fail!\n");
return -1;
}
printf("build consumer to localhost successfully \n");
// ANCHOR_END: create_consumer_2
// ANCHOR: subscribe_3
tmq_list_t* topic_list = build_topic_list();
if (NULL == topic_list) {
return -1;
}
if ((code = tmq_subscribe(tmq, topic_list))) {
fprintf(stderr, "Failed to tmq_subscribe(): %s\n", tmq_err2str(code));
}
tmq_list_destroy(topic_list);
basic_consume_loop(tmq);
// ANCHOR_END: subscribe_3
consume_repeatly(tmq);
manual_commit(tmq);
// ANCHOR: unsubscribe_and_close
// unsubscribe the topic
code = tmq_unsubscribe(tmq);
if (code) {
fprintf(stderr, "Failed to tmq_unsubscribe(): %s\n", tmq_err2str(code));
}
fprintf(stderr, "Unsubscribed consumer successfully.\n");
// close the consumer
code = tmq_consumer_close(tmq);
if (code) {
fprintf(stderr, "Failed to close consumer: %s\n", tmq_err2str(code));
} else {
fprintf(stderr, "Consumer closed successfully.\n");
}
// ANCHOR_END: unsubscribe_and_close
thread_stop = 1;
pthread_join(thread_id, NULL);
return 0;
}

View File

@ -0,0 +1,77 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// TAOS standard API example. The same syntax as MySQL, but only a subset
// to compile: gcc -o with_reqid_demo with_reqid_demo.c -ltaos
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "taos.h"
static int DemoWithReqId() {
// ANCHOR: with_reqid
const char *ip = "localhost";
const char *user = "root";
const char *password = "taosdata";
// connect
TAOS *taos = taos_connect(ip, user, password, NULL, 0);
if (taos == NULL) {
printf("failed to connect to server %s, reason: %s\n", ip, taos_errstr(NULL));
taos_cleanup();
return -1;
}
printf("success to connect server %s\n", ip);
const char *sql = "SELECT ts, current, location FROM power.meters limit 1";
// query data with reqid
TAOS_RES *result = taos_query_with_reqid(taos, sql, 3L);
int code = taos_errno(result);
if (code != 0) {
printf("failed to query data from power.meters, ip: %s, reason: %s\n", ip, taos_errstr(result));
taos_close(taos);
taos_cleanup();
return -1;
}
TAOS_ROW row = NULL;
int rows = 0;
int num_fields = taos_field_count(result);
TAOS_FIELD *fields = taos_fetch_fields(result);
printf("fields: %d\n", num_fields);
printf("sql: %s, result:\n", sql);
// fetch the records row by row
while ((row = taos_fetch_row(result))) {
char temp[1024] = {0};
rows++;
taos_print_row(temp, row, fields, num_fields);
printf("%s\n", temp);
}
printf("total rows: %d\n", rows);
taos_free_result(result);
printf("success to query data from power.meters\n");
// close & clean
taos_close(taos);
taos_cleanup();
return 0;
// ANCHOR_END: with_reqid
}
int main(int argc, char *argv[]) { return DemoWithReqId(); }

View File

@ -3,6 +3,7 @@
connect/bin
influxdbLine/bin
optsJSON/bin
nativesml/bin
optsTelnet/bin
query/bin
sqlInsert/bin
@ -11,9 +12,12 @@ subscribe/bin
wsConnect/bin
wsInsert/bin
wsQuery/bin
wssml/bin
wsStmt/bin
wssubscribe/bin
connect/obj
influxdbLine/obj
nativesml/obj
optsJSON/obj
optsTelnet/obj
query/obj
@ -23,4 +27,6 @@ subscribe/obj
wsConnect/obj
wsInsert/obj
wsQuery/obj
wsStmt/obj
wssml/obj
wsStmt/obj
wssubscribe/obj

View File

@ -3,16 +3,35 @@ using TDengine.Driver.Client;
namespace TDengineExample
{
internal class ConnectExample
{
// ANCHOR: main
static void Main(String[] args)
{
var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
var connectionString = "host=127.0.0.1;port=6030;username=root;password=taosdata";
try
{
Console.WriteLine("connected");
// Connect to TDengine server using Native
var builder = new ConnectionStringBuilder(connectionString);
// Open connection with using block, it will close the connection automatically
using (var client = DbDriver.Open(builder))
{
Console.WriteLine("Connected to " + connectionString + " successfully.");
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to connect to " + connectionString + "; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to connect to " + connectionString + "; Err:" + e.Message);
throw;
}
}
// ANCHOR_END: main
}
}
}

View File

@ -27,6 +27,12 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "wsStmt", "wsStmt\wsStmt.csp
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "sqlinsert", "sqlInsert\sqlinsert.csproj", "{CD24BD12-8550-4627-A11D-707B446F48C3}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "nativesml", "nativesml\nativesml.csproj", "{18ADDBE8-B266-4A66-8CC5-CFF80B530EFD}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "wssml", "wssml\wssml.csproj", "{C3E62FDB-CCBC-4F72-ACF6-D3B2C39630E3}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "wssubscribe", "wssubscribe\wssubscribe.csproj", "{CB4BCBA5-C758-433F-8B90-7389F59E46BD}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
@ -84,5 +90,17 @@ Global
{CD24BD12-8550-4627-A11D-707B446F48C3}.Debug|Any CPU.Build.0 = Debug|Any CPU
{CD24BD12-8550-4627-A11D-707B446F48C3}.Release|Any CPU.ActiveCfg = Release|Any CPU
{CD24BD12-8550-4627-A11D-707B446F48C3}.Release|Any CPU.Build.0 = Release|Any CPU
{18ADDBE8-B266-4A66-8CC5-CFF80B530EFD}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{18ADDBE8-B266-4A66-8CC5-CFF80B530EFD}.Debug|Any CPU.Build.0 = Debug|Any CPU
{18ADDBE8-B266-4A66-8CC5-CFF80B530EFD}.Release|Any CPU.ActiveCfg = Release|Any CPU
{18ADDBE8-B266-4A66-8CC5-CFF80B530EFD}.Release|Any CPU.Build.0 = Release|Any CPU
{C3E62FDB-CCBC-4F72-ACF6-D3B2C39630E3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{C3E62FDB-CCBC-4F72-ACF6-D3B2C39630E3}.Debug|Any CPU.Build.0 = Debug|Any CPU
{C3E62FDB-CCBC-4F72-ACF6-D3B2C39630E3}.Release|Any CPU.ActiveCfg = Release|Any CPU
{C3E62FDB-CCBC-4F72-ACF6-D3B2C39630E3}.Release|Any CPU.Build.0 = Release|Any CPU
{CB4BCBA5-C758-433F-8B90-7389F59E46BD}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{CB4BCBA5-C758-433F-8B90-7389F59E46BD}.Debug|Any CPU.Build.0 = Debug|Any CPU
{CB4BCBA5-C758-433F-8B90-7389F59E46BD}.Release|Any CPU.ActiveCfg = Release|Any CPU
{CB4BCBA5-C758-433F-8B90-7389F59E46BD}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
EndGlobal

View File

@ -0,0 +1,57 @@
using TDengine.Driver;
using TDengine.Driver.Client;
namespace TDengineExample
{
internal class NativeSMLExample
{
// ANCHOR: main
public static void Main(string[] args)
{
var host = "127.0.0.1";
var lineDemo =
"meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639";
var telnetDemo = "metric_telnet 1707095283260 4 host=host0 interface=eth0";
var jsonDemo =
"{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
try
{
var builder =
new ConnectionStringBuilder(
$"host={host};port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
// create database
client.Exec("CREATE DATABASE IF NOT EXISTS power");
// use database
client.Exec("USE power");
// insert influx line protocol data
client.SchemalessInsert(new[]{lineDemo}, TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL,
TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId());
// insert opentsdb telnet protocol data
client.SchemalessInsert(new[]{telnetDemo}, TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL,
TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId());
// insert json data
client.SchemalessInsert(new []{jsonDemo}, TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL,
TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED, 0, ReqId.GetReqId());
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to insert data with schemaless; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to insert data with schemaless; Err:" + e.Message);
throw;
}
}
// ANCHOR_END: main
}
}

View File

@ -0,0 +1,13 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net6.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.1.*" />
</ItemGroup>
</Project>

View File

@ -7,22 +7,51 @@ namespace TDengineExample
{
public static void Main(string[] args)
{
var builder =
new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
// ANCHOR: main
var host = "127.0.0.1";
var lineDemo =
"meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639";
var telnetDemo = "metric_telnet 1707095283260 4 host=host0 interface=eth0";
var jsonDemo =
"{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
try
{
client.Exec("CREATE DATABASE test WAL_RETENTION_PERIOD 3600");
client.Exec("use test");
string[] lines =
var builder =
new ConnectionStringBuilder(
$"protocol=WebSocket;host={host};port=6041;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
"[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
" {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}, " +
"{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
" {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}]"
};
client.SchemalessInsert(lines, TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL,
TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId());
// create database
client.Exec("CREATE DATABASE IF NOT EXISTS power");
// use database
client.Exec("USE power");
// insert influx line protocol data
client.SchemalessInsert(new[]{lineDemo}, TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL,
TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId());
// insert opentsdb telnet protocol data
client.SchemalessInsert(new[]{telnetDemo}, TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL,
TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId());
// insert json data
client.SchemalessInsert(new []{jsonDemo}, TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL,
TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED, 0, ReqId.GetReqId());
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to insert data with schemaless; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to insert data with schemaless; Err:" + e.Message);
throw;
}
// ANCHOR_END: main
}
}
}

View File

@ -13,8 +13,7 @@ namespace TDengineExample
{
try
{
client.Exec("use power");
string query = "SELECT * FROM meters";
string query = "SELECT * FROM power.meters";
using (var rows = client.Query(query))
{
while (rows.Read())

View File

@ -6,42 +6,162 @@ namespace TDengineExample
{
internal class SQLInsertExample
{
public static void Main(string[] args)
{
var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
try
{
try
var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
client.Exec("create database power");
client.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
string insertQuery =
"INSERT INTO " +
"power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " +
"VALUES " +
"('2023-10-03 14:38:05.000', 10.30000, 219, 0.31000) " +
"('2023-10-03 14:38:15.000', 12.60000, 218, 0.33000) " +
"('2023-10-03 14:38:16.800', 12.30000, 221, 0.31000) " +
"power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " +
"VALUES " +
"('2023-10-03 14:38:16.650', 10.30000, 218, 0.25000) " +
"power.d1003 USING power.meters TAGS(2,'California.LosAngeles') " +
"VALUES " +
"('2023-10-03 14:38:05.500', 11.80000, 221, 0.28000) " +
"('2023-10-03 14:38:16.600', 13.40000, 223, 0.29000) " +
"power.d1004 USING power.meters TAGS(3,'California.LosAngeles') " +
"VALUES " +
"('2023-10-03 14:38:05.000', 10.80000, 223, 0.29000) " +
"('2023-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
client.Exec(insertQuery);
}
catch (Exception e)
{
Console.WriteLine(e.ToString());
throw;
CreateDatabaseAndTable(client);
InsertData(client);
QueryData(client);
QueryWithReqId(client);
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine(e.Message);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine(e.Message);
throw;
}
}
private static void CreateDatabaseAndTable(ITDengineClient client)
{
// ANCHOR: create_db_and_table
try
{
// create database
var affected = client.Exec("CREATE DATABASE IF NOT EXISTS power");
Console.WriteLine($"Create database power, affected rows: {affected}");
// create table
affected = client.Exec(
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
Console.WriteLine($"Create table meters, affected rows: {affected}");
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to create db and table; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to create db and table; Err:" + e.Message);
throw;
}
// ANCHOR_END: create_db_and_table
}
private static void InsertData(ITDengineClient client)
{
// ANCHOR: insert_data
try
{
// insert data
var insertQuery = "INSERT INTO " +
"power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " +
"VALUES " +
"(NOW + 1a, 10.30000, 219, 0.31000) " +
"(NOW + 2a, 12.60000, 218, 0.33000) " +
"(NOW + 3a, 12.30000, 221, 0.31000) " +
"power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " +
"VALUES " +
"(NOW + 1a, 10.30000, 218, 0.25000) ";
var affectedRows = client.Exec(insertQuery);
Console.WriteLine("insert " + affectedRows + " rows to power.meters successfully.");
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to insert data to power.meters; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to insert data to power.meters; Err:" + e.Message);
throw;
}
// ANCHOR_END: insert_data
}
private static void QueryData(ITDengineClient client)
{
// ANCHOR: select_data
try
{
// query data
var query = "SELECT ts, current, location FROM power.meters limit 100";
using (var rows = client.Query(query))
{
while (rows.Read())
{
var ts = (DateTime)rows.GetValue(0);
var current = (float)rows.GetValue(1);
var location = Encoding.UTF8.GetString((byte[])rows.GetValue(2));
Console.WriteLine(
$"ts: {ts:yyyy-MM-dd HH:mm:ss.fff}, current: {current}, location: {location}");
}
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to query data from power.meters; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to query data from power.meters; Err:" + e.Message);
throw;
}
// ANCHOR_END: select_data
}
private static void QueryWithReqId(ITDengineClient client)
{
// ANCHOR: query_id
try
{
// query data
var query = "SELECT ts, current, location FROM power.meters limit 1";
// query with request id 3
using (var rows = client.Query(query,3))
{
while (rows.Read())
{
var ts = (DateTime)rows.GetValue(0);
var current = (float)rows.GetValue(1);
var location = Encoding.UTF8.GetString((byte[])rows.GetValue(2));
Console.WriteLine(
$"ts: {ts:yyyy-MM-dd HH:mm:ss.fff}, current: {current}, location: {location}");
}
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to execute sql with reqId; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to execute sql with reqId; Err:" + e.Message);
throw;
}
// ANCHOR_END: query_id
}
}
}
}

View File

@ -5,34 +5,72 @@ namespace TDengineExample
{
internal class StmtInsertExample
{
// ANCHOR: main
public static void Main(string[] args)
{
var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
var host = "127.0.0.1";
var numOfSubTable = 10;
var numOfRow = 10;
var random = new Random();
try
{
try
var builder = new ConnectionStringBuilder($"host={host};port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
client.Exec($"create database power");
// create database
client.Exec("CREATE DATABASE IF NOT EXISTS power");
// use database
client.Exec("USE power");
// create table
client.Exec(
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
"CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
using (var stmt = client.StmtInit())
{
stmt.Prepare(
"Insert into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(?,?,?,?)");
var ts = new DateTime(2023, 10, 03, 14, 38, 05, 000);
stmt.BindRow(new object[] { ts, (float)10.30000, (int)219, (float)0.31000 });
stmt.AddBatch();
stmt.Exec();
var affected = stmt.Affected();
Console.WriteLine($"affected rows: {affected}");
String sql = "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)";
stmt.Prepare(sql);
for (int i = 1; i <= numOfSubTable; i++)
{
var tableName = $"d_bind_{i}";
// set table name
stmt.SetTableName(tableName);
// set tags
stmt.SetTags(new object[] { i, $"location_{i}" });
var current = DateTime.Now;
// bind rows
for (int j = 0; j < numOfRow; j++)
{
stmt.BindRow(new object[]
{
current.Add(TimeSpan.FromMilliseconds(j)),
random.NextSingle() * 30,
random.Next(300),
random.NextSingle()
});
}
// add batch
stmt.AddBatch();
// execute
stmt.Exec();
// get affected rows
var affectedRows = stmt.Affected();
Console.WriteLine($"table {tableName} insert {affectedRows} rows.");
}
}
}
catch (Exception e)
{
Console.WriteLine(e);
throw;
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to insert to table meters using stmt; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to insert to table meters using stmt; Err:" + e.Message);
throw;
}
}
// ANCHOR_END: main
}
}

View File

@ -1,5 +1,4 @@

using TDengine.Driver;
using TDengine.Driver;
using TDengine.Driver.Client;
using TDengine.TMQ;
@ -9,65 +8,242 @@ namespace TMQExample
{
public static void Main(string[] args)
{
var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
try
{
try
var builder = new ConnectionStringBuilder("host=127.0.0.1;port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
client.Exec("CREATE DATABASE power");
client.Exec("CREATE DATABASE IF NOT EXISTS power");
client.Exec("USE power");
client.Exec(
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
client.Exec("CREATE TOPIC topic_meters as SELECT * from power.meters");
var cfg = new Dictionary<string, string>()
{
{ "group.id", "group1" },
{ "auto.offset.reset", "latest" },
{ "td.connect.ip", "127.0.0.1" },
{ "td.connect.user", "root" },
{ "td.connect.pass", "taosdata" },
{ "td.connect.port", "6030" },
{ "client.id", "tmq_example" },
{ "enable.auto.commit", "true" },
{ "msg.with.table.name", "false" },
};
var consumer = new ConsumerBuilder<Dictionary<string, object>>(cfg).Build();
consumer.Subscribe(new List<string>() { "topic_meters" });
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
client.Exec("CREATE TOPIC IF NOT EXISTS topic_meters as SELECT * from power.meters");
var consumer = CreateConsumer();
// insert data
Task.Run(InsertData);
while (true)
{
using (var cr = consumer.Consume(500))
{
if (cr == null) continue;
foreach (var message in cr.Message)
{
Console.WriteLine(
$"message {{{((DateTime)message.Value["ts"]).ToString("yyyy-MM-dd HH:mm:ss.fff")}, " +
$"{message.Value["current"]}, {message.Value["voltage"]}, {message.Value["phase"]}}}");
}
}
}
}
catch (Exception e)
{
Console.WriteLine(e.ToString());
throw;
// consume message
Consume(consumer);
// seek
Seek(consumer);
// commit
CommitOffset(consumer);
// close
Close(consumer);
Console.WriteLine("Done");
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine(e.Message);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine(e.Message);
throw;
}
}
static void InsertData()
{
var builder = new ConnectionStringBuilder("host=localhost;port=6030;username=root;password=taosdata");
var builder = new ConnectionStringBuilder("host=127.0.0.1;port=6030;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
while (true)
{
client.Exec("INSERT into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(now,11.5,219,0.30)");
client.Exec(
"INSERT into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(now,11.5,219,0.30)");
Task.Delay(1000).Wait();
}
}
}
}
}
static IConsumer<Dictionary<string, object>> CreateConsumer()
{
// ANCHOR: create_consumer
// consumer config
var cfg = new Dictionary<string, string>()
{
{ "td.connect.port", "6030" },
{ "auto.offset.reset", "latest" },
{ "msg.with.table.name", "true" },
{ "enable.auto.commit", "true" },
{ "auto.commit.interval.ms", "1000" },
{ "group.id", "group1" },
{ "client.id", "client1" },
{ "td.connect.ip", "127.0.0.1" },
{ "td.connect.user", "root" },
{ "td.connect.pass", "taosdata" },
};
IConsumer<Dictionary<string, object>> consumer = null!;
try
{
// create consumer
consumer = new ConsumerBuilder<Dictionary<string, object>>(cfg).Build();
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to create consumer; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to create consumer; Err:" + e.Message);
throw;
}
// ANCHOR_END: create_consumer
return consumer;
}
static void Consume(IConsumer<Dictionary<string, object>> consumer)
{
// ANCHOR: subscribe
try
{
// subscribe
consumer.Subscribe(new List<string>() { "topic_meters" });
for (int i = 0; i < 50; i++)
{
// consume message with using block to ensure the result is disposed
using (var cr = consumer.Consume(100))
{
if (cr == null) continue;
foreach (var message in cr.Message)
{
// handle message
Console.WriteLine(
$"data {{{((DateTime)message.Value["ts"]).ToString("yyyy-MM-dd HH:mm:ss.fff")}, " +
$"{message.Value["current"]}, {message.Value["voltage"]}, {message.Value["phase"]}}}");
}
}
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to poll data; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to poll data; Err:" + e.Message);
throw;
}
// ANCHOR_END: subscribe
}
static void Seek(IConsumer<Dictionary<string, object>> consumer)
{
// ANCHOR: seek
try
{
// get assignment
var assignment = consumer.Assignment;
// seek to the beginning
foreach (var topicPartition in assignment)
{
consumer.Seek(new TopicPartitionOffset(topicPartition.Topic, topicPartition.Partition, 0));
}
Console.WriteLine("assignment seek to beginning successfully");
// poll data again
for (int i = 0; i < 50; i++)
{
// consume message with using block to ensure the result is disposed
using (var cr = consumer.Consume(100))
{
if (cr == null) continue;
foreach (var message in cr.Message)
{
// handle message
Console.WriteLine(
$"second data polled: {{{((DateTime)message.Value["ts"]).ToString("yyyy-MM-dd HH:mm:ss.fff")}, " +
$"{message.Value["current"]}, {message.Value["voltage"]}, {message.Value["phase"]}}}");
}
break;
}
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to seek; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to seek; Err:" + e.Message);
throw;
}
// ANCHOR_END: seek
}
static void CommitOffset(IConsumer<Dictionary<string, object>> consumer)
{
// ANCHOR: commit_offset
for (int i = 0; i < 5; i++)
{
try
{
// consume message with using block to ensure the result is disposed
using (var cr = consumer.Consume(100))
{
if (cr == null) continue;
// commit offset
consumer.Commit(new List<TopicPartitionOffset>
{
cr.TopicPartitionOffset,
});
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to commit offset; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to commit offset; Err:" + e.Message);
throw;
}
}
// ANCHOR_END: commit_offset
}
static void Close(IConsumer<Dictionary<string, object>> consumer)
{
// ANCHOR: close
try
{
// unsubscribe
consumer.Unsubscribe();
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to unsubscribe consumer; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to unsubscribe consumer; Err:" + e.Message);
throw;
}
finally
{
// close consumer
consumer.Close();
}
// ANCHOR_END: close
}
}
}

View File

@ -6,13 +6,35 @@ namespace Examples
{
public class WSConnExample
{
// ANCHOR: main
static void Main(string[] args)
{
var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
var connectionString =
"protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata";
try
{
Console.WriteLine("connected");
// Connect to TDengine server using WebSocket
var builder = new ConnectionStringBuilder(connectionString);
// Open connection with using block, it will close the connection automatically
using (var client = DbDriver.Open(builder))
{
Console.WriteLine("Connected to " + connectionString + " successfully.");
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to connect to " + connectionString + "; ErrCode:" + e.Code +
"; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to connect to " + connectionString + "; Err:" + e.Message);
throw;
}
}
// ANCHOR_END: main
}
}
}

View File

@ -1,4 +1,5 @@
using System;
using System.Text;
using TDengine.Driver;
using TDengine.Driver.Client;
@ -8,39 +9,159 @@ namespace Examples
{
public static void Main(string[] args)
{
var builder = new ConnectionStringBuilder("protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
try
{
try
var builder = new ConnectionStringBuilder("protocol=WebSocket;host=127.0.0.1;port=6041;useSSL=false;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
client.Exec("create database power");
client.Exec("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
string insertQuery =
"INSERT INTO " +
"power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " +
"VALUES " +
"('2023-10-03 14:38:05.000', 10.30000, 219, 0.31000) " +
"('2023-10-03 14:38:15.000', 12.60000, 218, 0.33000) " +
"('2023-10-03 14:38:16.800', 12.30000, 221, 0.31000) " +
"power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " +
"VALUES " +
"('2023-10-03 14:38:16.650', 10.30000, 218, 0.25000) " +
"power.d1003 USING power.meters TAGS(2,'California.LosAngeles') " +
"VALUES " +
"('2023-10-03 14:38:05.500', 11.80000, 221, 0.28000) " +
"('2023-10-03 14:38:16.600', 13.40000, 223, 0.29000) " +
"power.d1004 USING power.meters TAGS(3,'California.LosAngeles') " +
"VALUES " +
"('2023-10-03 14:38:05.000', 10.80000, 223, 0.29000) " +
"('2023-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
client.Exec(insertQuery);
}
catch (Exception e)
{
Console.WriteLine(e.ToString());
throw;
CreateDatabaseAndTable(client);
InsertData(client);
QueryData(client);
QueryWithReqId(client);
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine(e.Message);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine(e.Message);
throw;
}
}
private static void CreateDatabaseAndTable(ITDengineClient client)
{
// ANCHOR: create_db_and_table
try
{
// create database
var affected = client.Exec("CREATE DATABASE IF NOT EXISTS power");
Console.WriteLine($"Create database power, affected rows: {affected}");
// create table
affected = client.Exec(
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
Console.WriteLine($"Create table meters, affected rows: {affected}");
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to create db and table; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to create db and table; Err:" + e.Message);
throw;
}
// ANCHOR_END: create_db_and_table
}
private static void InsertData(ITDengineClient client)
{
// ANCHOR: insert_data
try
{
// insert data
var insertQuery = "INSERT INTO " +
"power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " +
"VALUES " +
"(NOW + 1a, 10.30000, 219, 0.31000) " +
"(NOW + 2a, 12.60000, 218, 0.33000) " +
"(NOW + 3a, 12.30000, 221, 0.31000) " +
"power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " +
"VALUES " +
"(NOW + 1a, 10.30000, 218, 0.25000) ";
var affectedRows = client.Exec(insertQuery);
Console.WriteLine("insert " + affectedRows + " rows to power.meters successfully.");
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to insert data to power.meters; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to insert data to power.meters; Err:" + e.Message);
throw;
}
// ANCHOR_END: insert_data
}
private static void QueryData(ITDengineClient client)
{
// ANCHOR: select_data
try
{
// query data
var query = "SELECT ts, current, location FROM power.meters limit 100";
using (var rows = client.Query(query))
{
while (rows.Read())
{
var ts = (DateTime)rows.GetValue(0);
var current = (float)rows.GetValue(1);
var location = Encoding.UTF8.GetString((byte[])rows.GetValue(2));
Console.WriteLine(
$"ts: {ts:yyyy-MM-dd HH:mm:ss.fff}, current: {current}, location: {location}");
}
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to query data from power.meters; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to query data from power.meters; Err:" + e.Message);
throw;
}
// ANCHOR_END: select_data
}
private static void QueryWithReqId(ITDengineClient client)
{
// ANCHOR: query_id
try
{
// query data
var query = "SELECT ts, current, location FROM power.meters limit 1";
// query with request id 3
using (var rows = client.Query(query,3))
{
while (rows.Read())
{
var ts = (DateTime)rows.GetValue(0);
var current = (float)rows.GetValue(1);
var location = Encoding.UTF8.GetString((byte[])rows.GetValue(2));
Console.WriteLine(
$"ts: {ts:yyyy-MM-dd HH:mm:ss.fff}, current: {current}, location: {location}");
}
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to execute sql with reqId; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to execute sql with reqId; Err:" + e.Message);
throw;
}
// ANCHOR_END: query_id
}
}
}
}

View File

@ -14,8 +14,7 @@ namespace Examples
{
try
{
client.Exec("use power");
string query = "SELECT * FROM meters";
string query = "SELECT * FROM power.meters";
using (var rows = client.Query(query))
{
while (rows.Read())

View File

@ -6,36 +6,72 @@ namespace Examples
{
public class WSStmtExample
{
// ANCHOR: main
public static void Main(string[] args)
{
var builder =
new ConnectionStringBuilder(
"protocol=WebSocket;host=localhost;port=6041;useSSL=false;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
var host = "127.0.0.1";
var numOfSubTable = 10;
var numOfRow = 10;
var random = new Random();
try
{
try
var builder = new ConnectionStringBuilder($"protocol=WebSocket;host={host};port=6041;useSSL=false;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
client.Exec($"create database power");
// create database
client.Exec("CREATE DATABASE IF NOT EXISTS power");
// use database
client.Exec("USE power");
// create table
client.Exec(
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
"CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
using (var stmt = client.StmtInit())
{
stmt.Prepare(
"Insert into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(?,?,?,?)");
var ts = new DateTime(2023, 10, 03, 14, 38, 05, 000);
stmt.BindRow(new object[] { ts, (float)10.30000, (int)219, (float)0.31000 });
stmt.AddBatch();
stmt.Exec();
var affected = stmt.Affected();
Console.WriteLine($"affected rows: {affected}");
String sql = "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)";
stmt.Prepare(sql);
for (int i = 1; i <= numOfSubTable; i++)
{
var tableName = $"d_bind_{i}";
// set table name
stmt.SetTableName(tableName);
// set tags
stmt.SetTags(new object[] { i, $"location_{i}" });
var current = DateTime.Now;
// bind rows
for (int j = 0; j < numOfRow; j++)
{
stmt.BindRow(new object[]
{
current.Add(TimeSpan.FromMilliseconds(j)),
random.NextSingle() * 30,
random.Next(300),
random.NextSingle()
});
}
// add batch
stmt.AddBatch();
// execute
stmt.Exec();
// get affected rows
var affectedRows = stmt.Affected();
Console.WriteLine($"table {tableName} insert {affectedRows} rows.");
}
}
}
catch (Exception e)
{
Console.WriteLine(e);
throw;
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to insert to table meters using stmt; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to insert to table meters using stmt; Err:" + e.Message);
throw;
}
}
// ANCHOR_END: main
}
}

View File

@ -0,0 +1,57 @@
using TDengine.Driver;
using TDengine.Driver.Client;
namespace TDengineExample
{
internal class WssmlExample
{
// ANCHOR: main
public static void Main(string[] args)
{
var host = "127.0.0.1";
var lineDemo =
"meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639";
var telnetDemo = "metric_telnet 1707095283260 4 host=host0 interface=eth0";
var jsonDemo =
"{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}";
try
{
var builder =
new ConnectionStringBuilder(
$"protocol=WebSocket;host={host};port=6041;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
// create database
client.Exec("CREATE DATABASE IF NOT EXISTS power");
// use database
client.Exec("USE power");
// insert influx line protocol data
client.SchemalessInsert(new[]{lineDemo}, TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL,
TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId());
// insert opentsdb telnet protocol data
client.SchemalessInsert(new[]{telnetDemo}, TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL,
TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS, 0, ReqId.GetReqId());
// insert json data
client.SchemalessInsert(new []{jsonDemo}, TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL,
TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED, 0, ReqId.GetReqId());
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to insert data with schemaless; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to insert data with schemaless; Err:" + e.Message);
throw;
}
}
// ANCHOR_END: main
}
}

View File

@ -0,0 +1,12 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net6.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.1.*" />
</ItemGroup>
</Project>

View File

@ -0,0 +1,250 @@
using TDengine.Driver;
using TDengine.Driver.Client;
using TDengine.TMQ;
namespace TMQExample
{
internal class SubscribeDemo
{
public static void Main(string[] args)
{
try
{
var builder = new ConnectionStringBuilder("protocol=WebSocket;host=127.0.0.1;port=6041;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
client.Exec("CREATE DATABASE IF NOT EXISTS power");
client.Exec("USE power");
client.Exec(
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))");
client.Exec("CREATE TOPIC IF NOT EXISTS topic_meters as SELECT * from power.meters");
var consumer = CreateConsumer();
// insert data
Task.Run(InsertData);
// consume message
Consume(consumer);
// seek
Seek(consumer);
// commit
CommitOffset(consumer);
// close
Close(consumer);
Console.WriteLine("Done");
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine(e.Message);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine(e.Message);
throw;
}
}
static void InsertData()
{
var builder = new ConnectionStringBuilder("protocol=WebSocket;host=127.0.0.1;port=6041;username=root;password=taosdata");
using (var client = DbDriver.Open(builder))
{
while (true)
{
client.Exec(
"INSERT into power.d1001 using power.meters tags(2,'California.SanFrancisco') values(now,11.5,219,0.30)");
Task.Delay(1000).Wait();
}
}
}
static IConsumer<Dictionary<string, object>> CreateConsumer()
{
// ANCHOR: create_consumer
// consumer config
var cfg = new Dictionary<string, string>()
{
{"td.connect.type", "WebSocket"},
{ "td.connect.port", "6041" },
{ "auto.offset.reset", "latest" },
{ "msg.with.table.name", "true" },
{ "enable.auto.commit", "true" },
{ "auto.commit.interval.ms", "1000" },
{ "group.id", "group1" },
{ "client.id", "client1" },
{ "td.connect.ip", "127.0.0.1" },
{ "td.connect.user", "root" },
{ "td.connect.pass", "taosdata" },
};
IConsumer<Dictionary<string, object>> consumer = null!;
try
{
// create consumer
consumer = new ConsumerBuilder<Dictionary<string, object>>(cfg).Build();
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to create consumer; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to create consumer; Err:" + e.Message);
throw;
}
// ANCHOR_END: create_consumer
return consumer;
}
static void Consume(IConsumer<Dictionary<string, object>> consumer)
{
// ANCHOR: subscribe
try
{
// subscribe
consumer.Subscribe(new List<string>() { "topic_meters" });
for (int i = 0; i < 50; i++)
{
// consume message with using block to ensure the result is disposed
using (var cr = consumer.Consume(100))
{
if (cr == null) continue;
foreach (var message in cr.Message)
{
// handle message
Console.WriteLine(
$"data {{{((DateTime)message.Value["ts"]).ToString("yyyy-MM-dd HH:mm:ss.fff")}, " +
$"{message.Value["current"]}, {message.Value["voltage"]}, {message.Value["phase"]}}}");
}
}
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to poll data; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to poll data; Err:" + e.Message);
throw;
}
// ANCHOR_END: subscribe
}
static void Seek(IConsumer<Dictionary<string, object>> consumer)
{
// ANCHOR: seek
try
{
// get assignment
var assignment = consumer.Assignment;
// seek to the beginning
foreach (var topicPartition in assignment)
{
consumer.Seek(new TopicPartitionOffset(topicPartition.Topic, topicPartition.Partition, 0));
}
Console.WriteLine("assignment seek to beginning successfully");
// poll data again
for (int i = 0; i < 50; i++)
{
// consume message with using block to ensure the result is disposed
using (var cr = consumer.Consume(100))
{
if (cr == null) continue;
foreach (var message in cr.Message)
{
// handle message
Console.WriteLine(
$"second data polled: {{{((DateTime)message.Value["ts"]).ToString("yyyy-MM-dd HH:mm:ss.fff")}, " +
$"{message.Value["current"]}, {message.Value["voltage"]}, {message.Value["phase"]}}}");
}
break;
}
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to seek; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to seek; Err:" + e.Message);
throw;
}
// ANCHOR_END: seek
}
static void CommitOffset(IConsumer<Dictionary<string, object>> consumer)
{
// ANCHOR: commit_offset
for (int i = 0; i < 5; i++)
{
try
{
// consume message with using block to ensure the result is disposed
using (var cr = consumer.Consume(100))
{
if (cr == null) continue;
// commit offset
consumer.Commit(new List<TopicPartitionOffset>
{
cr.TopicPartitionOffset,
});
}
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to commit offset; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to commit offset; Err:" + e.Message);
throw;
}
}
// ANCHOR_END: commit_offset
}
static void Close(IConsumer<Dictionary<string, object>> consumer)
{
// ANCHOR: close
try
{
// unsubscribe
consumer.Unsubscribe();
}
catch (TDengineError e)
{
// handle TDengine error
Console.WriteLine("Failed to unsubscribe consumer; ErrCode:" + e.Code + "; ErrMessage: " + e.Error);
throw;
}
catch (Exception e)
{
// handle other exceptions
Console.WriteLine("Failed to unsubscribe consumer; Err:" + e.Message);
throw;
}
finally
{
// close consumer
consumer.Close();
}
// ANCHOR_END: close
}
}
}

View File

@ -0,0 +1,12 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>net6.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.1.3" GeneratePathProperty="true" />
</ItemGroup>
</Project>

View File

@ -13,6 +13,6 @@ func main() {
if err != nil {
log.Fatalln("failed to connect, err:", err)
} else {
fmt.Println("connected")
fmt.Println("Connected")
}
}

View File

@ -9,16 +9,14 @@ import (
)
func main() {
// use
// var taosDSN = "root:taosdata@tcp(localhost:6030)/dbName"
// if you want to connect a specified database named "dbName".
var taosDSN = "root:taosdata@tcp(localhost:6030)/"
taos, err := sql.Open("taosSql", taosDSN)
if err != nil {
log.Fatalln("failed to connect TDengine, err:", err)
return
}
fmt.Println("Connected")
fmt.Println("Connected to " + taosDSN + " successfully.")
defer taos.Close()
}
// use
// var taosDSN = "root:taosdata@tcp(localhost:6030)/dbName"
// if you want to connect a specified database named "dbName".

View File

@ -0,0 +1,32 @@
package main
import (
"database/sql"
"fmt"
"log"
_ "github.com/taosdata/driver-go/v3/taosSql"
)
func main() {
// use
// var taosDSN = "root:taosdata@tcp(localhost:6030)/dbName"
// if you want to connect a specified database named "dbName".
var taosDSN = "root:taosdata@tcp(localhost:6030)/"
taos, err := sql.Open("taosSql", taosDSN)
if err != nil {
log.Fatalln("failed to connect TDengine, err:", err)
}
fmt.Println("Connected")
defer taos.Close()
// ANCHOR: pool
// SetMaxOpenConns sets the maximum number of open connections to the database. 0 means unlimited.
taos.SetMaxOpenConns(0)
// SetMaxIdleConns sets the maximum number of connections in the idle connection pool.
taos.SetMaxIdleConns(2)
// SetConnMaxLifetime sets the maximum amount of time a connection may be reused.
taos.SetConnMaxLifetime(0)
// SetConnMaxIdleTime sets the maximum amount of time a connection may be idle.
taos.SetConnMaxIdleTime(0)
// ANCHOR_END: pool
}

View File

@ -9,16 +9,14 @@ import (
)
func main() {
// use
// var taosDSN = "root:taosdata@http(localhost:6041)/dbName"
// if you want to connect a specified database named "dbName".
var taosDSN = "root:taosdata@http(localhost:6041)/"
taos, err := sql.Open("taosRestful", taosDSN)
if err != nil {
log.Fatalln("failed to connect TDengine, err:", err)
return
}
fmt.Println("Connected")
fmt.Println("Connected to " + taosDSN + " successfully.")
defer taos.Close()
}
// use
// var taosDSN = "root:taosdata@http(localhost:6041)/dbName"
// if you want to connect a specified database named "dbName".

View File

@ -0,0 +1,22 @@
package main
import (
"database/sql"
"fmt"
"log"
_ "github.com/taosdata/driver-go/v3/taosWS"
)
func main() {
// use
// var taosDSN = "root:taosdata@ws(localhost:6041)/dbName"
// if you want to connect a specified database named "dbName".
var taosDSN = "root:taosdata@ws(localhost:6041)/"
taos, err := sql.Open("taosWS", taosDSN)
if err != nil {
log.Fatalln("failed to connect TDengine, err:", err)
}
fmt.Println("Connected to " + taosDSN + " successfully.")
defer taos.Close()
}

View File

@ -2,5 +2,12 @@ module goexample
go 1.17
require github.com/taosdata/driver-go/v3 v3.1.0
require github.com/taosdata/driver-go/v3 v3.5.6
require (
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
)

View File

@ -1,15 +1,25 @@
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/taosdata/driver-go/v3 v3.1.0/go.mod h1:H2vo/At+rOPY1aMzUV9P49SVX7NlXb3LAbKw+MCLrmU=
github.com/taosdata/driver-go/v3 v3.5.6 h1:LDVtMyT3B9p2VREsd5KKM91D4Y7P4kSdh2SQumXi8bk=
github.com/taosdata/driver-go/v3 v3.5.6/go.mod h1:H2vo/At+rOPY1aMzUV9P49SVX7NlXb3LAbKw+MCLrmU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -0,0 +1,56 @@
package main
import (
"context"
"database/sql"
"fmt"
"log"
"time"
_ "github.com/taosdata/driver-go/v3/taosSql"
)
func main() {
db, err := sql.Open("taosSql", "root:taosdata@tcp(localhost:6030)/")
if err != nil {
log.Fatal("Open database error: ", err)
}
defer db.Close()
initEnv(db)
// ANCHOR: query_id
// use context to set request id
ctx := context.WithValue(context.Background(), "taos_req_id", int64(3))
// execute query with context
rows, err := db.QueryContext(ctx, "SELECT ts, current, location FROM power.meters limit 1")
if err != nil {
log.Fatal("Query error: ", err)
}
for rows.Next() {
var (
ts time.Time
current float32
location string
)
err = rows.Scan(&ts, &current, &location)
if err != nil {
log.Fatal("Scan error: ", err)
}
fmt.Printf("ts: %s, current: %f, location: %s\n", ts, current, location)
}
// ANCHOR_END: query_id
}
func initEnv(conn *sql.DB) {
_, err := conn.Exec("CREATE DATABASE IF NOT EXISTS power")
if err != nil {
log.Fatal("Create database error: ", err)
}
_, err = conn.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))")
if err != nil {
log.Fatal("Create table error: ", err)
}
_, err = conn.Exec("INSERT INTO power.d1001 USING power.meters TAGS (2, 'California.SanFrancisco') VALUES (NOW , 10.2, 219, 0.32)")
if err != nil {
log.Fatal("Insert data error: ", err)
}
}

View File

@ -0,0 +1,43 @@
package main
import (
"log"
"github.com/taosdata/driver-go/v3/af"
)
func main() {
host := "127.0.0.1"
lineDemo := "meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639"
telnetDemo := "metric_telnet 1707095283260 4 host=host0 interface=eth0"
jsonDemo := "{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"
conn, err := af.Open(host, "root", "taosdata", "", 0)
if err != nil {
log.Fatal("failed to connect TDengine, err:", err)
}
defer conn.Close()
_, err = conn.Exec("CREATE DATABASE IF NOT EXISTS power")
if err != nil {
log.Fatal("failed to create database, err:", err)
}
_, err = conn.Exec("USE power")
if err != nil {
log.Fatal("failed to use database, err:", err)
}
// insert influxdb line protocol
err = conn.InfluxDBInsertLines([]string{lineDemo}, "ms")
if err != nil {
log.Fatal("failed to insert influxdb line protocol, err:", err)
}
// insert opentsdb telnet protocol
err = conn.OpenTSDBInsertTelnetLines([]string{telnetDemo})
if err != nil {
log.Fatal("failed to insert opentsdb telnet line protocol, err:", err)
}
// insert opentsdb json protocol
err = conn.OpenTSDBInsertJsonPayload(jsonDemo)
if err != nil {
log.Fatal("failed to insert opentsdb json format protocol, err:", err)
}
}

View File

@ -0,0 +1,54 @@
package main
import (
"database/sql"
"fmt"
"log"
"time"
"github.com/taosdata/driver-go/v3/common"
_ "github.com/taosdata/driver-go/v3/taosWS"
"github.com/taosdata/driver-go/v3/ws/schemaless"
)
func main() {
host := "127.0.0.1"
lineDemo := "meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639"
telnetDemo := "metric_telnet 1707095283260 4 host=host0 interface=eth0"
jsonDemo := "{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"
db, err := sql.Open("taosWS", fmt.Sprintf("root:taosdata@ws(%s:6041)/", host))
if err != nil {
log.Fatal("failed to connect TDengine, err:", err)
}
defer db.Close()
_, err = db.Exec("CREATE DATABASE IF NOT EXISTS power")
if err != nil {
log.Fatal("failed to create database, err:", err)
}
s, err := schemaless.NewSchemaless(schemaless.NewConfig("ws://localhost:6041", 1,
schemaless.SetDb("power"),
schemaless.SetReadTimeout(10*time.Second),
schemaless.SetWriteTimeout(10*time.Second),
schemaless.SetUser("root"),
schemaless.SetPassword("taosdata"),
))
if err != nil {
log.Fatal("failed to create schemaless connection, err:", err)
}
// insert influxdb line protocol
err = s.Insert(lineDemo, schemaless.InfluxDBLineProtocol, "ms", 0, common.GetReqID())
if err != nil {
log.Fatal("failed to insert influxdb line protocol, err:", err)
}
// insert opentsdb telnet line protocol
err = s.Insert(telnetDemo, schemaless.OpenTSDBTelnetLineProtocol, "ms", 0, common.GetReqID())
if err != nil {
log.Fatal("failed to insert opentsdb telnet line protocol, err:", err)
}
// insert opentsdb json format protocol
err = s.Insert(jsonDemo, schemaless.OpenTSDBJsonFormatProtocol, "s", 0, common.GetReqID())
if err != nil {
log.Fatal("failed to insert opentsdb json format protocol, err:", err)
}
}

View File

@ -0,0 +1,89 @@
package main
import (
"database/sql"
"fmt"
"log"
"time"
_ "github.com/taosdata/driver-go/v3/taosSql"
)
func main() {
db, err := sql.Open("taosSql", "root:taosdata@tcp(localhost:6030)/")
if err != nil {
log.Fatal("open database failed:", err)
}
defer db.Close()
// ANCHOR: create_db_and_table
// create database
res, err := db.Exec("CREATE DATABASE IF NOT EXISTS power")
if err != nil {
log.Fatal("create database failed:", err)
}
affected, err := res.RowsAffected()
if err != nil {
log.Fatal("get affected rows failed:", err)
}
fmt.Println("create database affected:", affected)
// use database
res, err = db.Exec("USE power")
if err != nil {
log.Fatal("use database failed:", err)
}
affected, err = res.RowsAffected()
if err != nil {
log.Fatal("get affected rows failed:", err)
}
fmt.Println("use database affected:", affected)
// create table
res, err = db.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))")
affected, err = res.RowsAffected()
if err != nil {
log.Fatal("create table failed:", err)
}
fmt.Println("create table affected:", affected)
// ANCHOR_END: create_db_and_table
// ANCHOR: insert_data
// insert data, please make sure the database and table are created before
insertQuery := "INSERT INTO " +
"power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " +
"VALUES " +
"(NOW + 1a, 10.30000, 219, 0.31000) " +
"(NOW + 2a, 12.60000, 218, 0.33000) " +
"(NOW + 3a, 12.30000, 221, 0.31000) " +
"power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " +
"VALUES " +
"(NOW + 1a, 10.30000, 218, 0.25000) "
res, err = db.Exec(insertQuery)
if err != nil {
log.Fatal("insert data failed:", err)
}
affected, err = res.RowsAffected()
if err != nil {
log.Fatal("get affected rows failed:", err)
}
// you can check affectedRows here
fmt.Println("insert data affected:", affected)
// ANCHOR_END: insert_data
// ANCHOR: select_data
// query data, make sure the database and table are created before
rows, err := db.Query("SELECT ts, current, location FROM power.meters limit 100")
if err != nil {
log.Fatal("query data failed:", err)
}
for rows.Next() {
var (
ts time.Time
current float32
location string
)
err = rows.Scan(&ts, &current, &location)
if err != nil {
log.Fatal("scan data failed:", err)
}
// you can check data here
fmt.Printf("ts: %s, current: %f, location: %s\n", ts, current, location)
}
// ANCHOR_END: select_data
}

View File

@ -0,0 +1,83 @@
package main
import (
"fmt"
"log"
"math/rand"
"time"
"github.com/taosdata/driver-go/v3/af"
"github.com/taosdata/driver-go/v3/common"
"github.com/taosdata/driver-go/v3/common/param"
)
func main() {
host := "127.0.0.1"
numOfSubTable := 10
numOfRow := 10
db, err := af.Open(host, "root", "taosdata", "", 0)
if err != nil {
log.Fatal("failed to connect TDengine, err:", err)
}
defer db.Close()
// prepare database and table
_, err = db.Exec("CREATE DATABASE IF NOT EXISTS power")
if err != nil {
log.Fatal("failed to create database, err:", err)
}
_, err = db.Exec("USE power")
if err != nil {
log.Fatal("failed to use database, err:", err)
}
_, err = db.Exec("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))")
if err != nil {
log.Fatal("failed to create table, err:", err)
}
// prepare statement
sql := "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)"
stmt := db.Stmt()
err = stmt.Prepare(sql)
if err != nil {
log.Fatal("failed to prepare statement, err:", err)
}
for i := 1; i <= numOfSubTable; i++ {
tableName := fmt.Sprintf("d_bind_%d", i)
tags := param.NewParam(2).AddInt(i).AddBinary([]byte(fmt.Sprintf("location_%d", i)))
// set tableName and tags
err = stmt.SetTableNameWithTags(tableName, tags)
if err != nil {
log.Fatal("failed to set table name and tags, err:", err)
}
// bind column data
current := time.Now()
for j := 0; j < numOfRow; j++ {
row := param.NewParam(4).
AddTimestamp(current.Add(time.Millisecond*time.Duration(j)), common.PrecisionMilliSecond).
AddFloat(rand.Float32() * 30).
AddInt(rand.Intn(300)).
AddFloat(rand.Float32())
err = stmt.BindRow(row)
if err != nil {
log.Fatal("failed to bind row, err:", err)
}
}
// add batch
err = stmt.AddBatch()
if err != nil {
log.Fatal("failed to add batch, err:", err)
}
// execute batch
err = stmt.Execute()
if err != nil {
log.Fatal("failed to execute batch, err:", err)
}
// get affected rows
affected := stmt.GetAffectedRows()
// you can check exeResult here
fmt.Printf("table %s insert %d rows.\n", tableName, affected)
}
err = stmt.Close()
if err != nil {
log.Fatal("failed to close statement, err:", err)
}
}

View File

@ -0,0 +1,103 @@
package main
import (
"database/sql"
"fmt"
"log"
"math/rand"
"time"
"github.com/taosdata/driver-go/v3/common"
"github.com/taosdata/driver-go/v3/common/param"
_ "github.com/taosdata/driver-go/v3/taosRestful"
"github.com/taosdata/driver-go/v3/ws/stmt"
)
func main() {
host := "127.0.0.1"
numOfSubTable := 10
numOfRow := 10
db, err := sql.Open("taosRestful", fmt.Sprintf("root:taosdata@http(%s:6041)/", host))
if err != nil {
log.Fatal("failed to connect TDengine, err:", err)
}
defer db.Close()
// prepare database and table
_, err = db.Exec("CREATE DATABASE IF NOT EXISTS power")
if err != nil {
log.Fatal("failed to create database, err:", err)
}
_, err = db.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))")
if err != nil {
log.Fatal("failed to create table, err:", err)
}
config := stmt.NewConfig(fmt.Sprintf("ws://%s:6041", host), 0)
config.SetConnectUser("root")
config.SetConnectPass("taosdata")
config.SetConnectDB("power")
config.SetMessageTimeout(common.DefaultMessageTimeout)
config.SetWriteWait(common.DefaultWriteWait)
connector, err := stmt.NewConnector(config)
if err != nil {
log.Fatal("failed to create stmt connector, err:", err)
}
// prepare statement
sql := "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)"
stmt, err := connector.Init()
if err != nil {
log.Fatal("failed to init stmt, err:", err)
}
err = stmt.Prepare(sql)
if err != nil {
log.Fatal("failed to prepare stmt, err:", err)
}
for i := 1; i <= numOfSubTable; i++ {
tableName := fmt.Sprintf("d_bind_%d", i)
tags := param.NewParam(2).AddInt(i).AddBinary([]byte(fmt.Sprintf("location_%d", i)))
tagsType := param.NewColumnType(2).AddInt().AddBinary(24)
columnType := param.NewColumnType(4).AddTimestamp().AddFloat().AddInt().AddFloat()
// set tableName
err = stmt.SetTableName(tableName)
if err != nil {
log.Fatal("failed to set table name, err:", err)
}
// set tags
err = stmt.SetTags(tags, tagsType)
if err != nil {
log.Fatal("failed to set tags, err:", err)
}
// bind column data
current := time.Now()
for j := 0; j < numOfRow; j++ {
columnData := make([]*param.Param, 4)
columnData[0] = param.NewParam(1).AddTimestamp(current.Add(time.Millisecond*time.Duration(j)), common.PrecisionMilliSecond)
columnData[1] = param.NewParam(1).AddFloat(rand.Float32() * 30)
columnData[2] = param.NewParam(1).AddInt(rand.Intn(300))
columnData[3] = param.NewParam(1).AddFloat(rand.Float32())
err = stmt.BindParam(columnData, columnType)
if err != nil {
log.Fatal("failed to bind param, err:", err)
}
}
// add batch
err = stmt.AddBatch()
if err != nil {
log.Fatal("failed to add batch, err:", err)
}
// execute batch
err = stmt.Exec()
if err != nil {
log.Fatal("failed to exec stmt, err:", err)
}
// get affected rows
affected := stmt.GetAffectedRows()
// you can check exeResult here
fmt.Printf("table %s insert %d rows.\n", tableName, affected)
}
err = stmt.Close()
if err != nil {
log.Fatal("failed to close stmt, err:", err)
}
}

View File

@ -0,0 +1,153 @@
package main
import (
"database/sql"
"fmt"
"log"
"time"
"github.com/taosdata/driver-go/v3/af/tmq"
tmqcommon "github.com/taosdata/driver-go/v3/common/tmq"
_ "github.com/taosdata/driver-go/v3/taosSql"
)
var done = make(chan struct{})
func main() {
// init env
conn, err := sql.Open("taosSql", "root:taosdata@tcp(127.0.0.1:6030)/")
if err != nil {
log.Fatal("failed to connect TDengine, err:", err)
}
defer func() {
conn.Close()
}()
initEnv(conn)
// ANCHOR: create_consumer
// create consumer
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"auto.offset.reset": "latest",
"msg.with.table.name": "true",
"enable.auto.commit": "true",
"auto.commit.interval.ms": "1000",
"group.id": "group1",
"client.id": "client1",
})
if err != nil {
log.Fatal("failed to create consumer, err:", err)
}
// ANCHOR_END: create_consumer
// ANCHOR: subscribe
err = consumer.Subscribe("topic_meters", nil)
if err != nil {
log.Fatal("failed to subscribe, err:", err)
}
for i := 0; i < 50; i++ {
ev := consumer.Poll(100)
if ev != nil {
switch e := ev.(type) {
case *tmqcommon.DataMessage:
// process your data here
fmt.Printf("data:%v\n", e)
// ANCHOR: commit_offset
// commit offset
topicPartition, err := consumer.CommitOffsets([]tmqcommon.TopicPartition{e.TopicPartition})
if err != nil {
log.Fatal("failed to commit offset, err:", err)
}
fmt.Println(topicPartition)
// ANCHOR_END: commit_offset
case tmqcommon.Error:
fmt.Printf("%% Error: %v: %v\n", e.Code(), e)
log.Fatal("failed to get message, err:", e)
}
// commit all offsets
topicPartition, err := consumer.Commit()
if err != nil {
log.Fatal("failed to commit, err:", err)
}
fmt.Println(topicPartition)
}
}
// ANCHOR_END: subscribe
// ANCHOR: seek
// get assignment
partitions, err := consumer.Assignment()
if err != nil {
log.Fatal("failed to get assignment, err:", err)
}
for i := 0; i < len(partitions); i++ {
fmt.Println(partitions[i])
// seek to the beginning
err = consumer.Seek(tmqcommon.TopicPartition{
Topic: partitions[i].Topic,
Partition: partitions[i].Partition,
Offset: 0,
}, 0)
if err != nil {
log.Fatal("failed to seek, err:", err)
}
}
fmt.Println("assignment seek to beginning successfully")
// poll data again
gotData := false
for i := 0; i < 50; i++ {
if gotData {
break
}
ev := consumer.Poll(100)
if ev != nil {
switch e := ev.(type) {
case *tmqcommon.DataMessage:
// process your data here
fmt.Printf("second data polled:%v\n", e)
gotData = true
case tmqcommon.Error:
fmt.Printf("%% Error: %v: %v\n", e.Code(), e)
log.Fatal("failed to get message, err:", e)
}
}
}
// ANCHOR_END: seek
// ANCHOR: close
// unsubscribe
err = consumer.Unsubscribe()
if err != nil {
log.Fatal("failed to unsubscribe, err:", err)
}
// close consumer
err = consumer.Close()
if err != nil {
log.Fatal("failed to close consumer, err:", err)
}
// ANCHOR_END: close
<-done
}
func initEnv(conn *sql.DB) {
_, err := conn.Exec("CREATE DATABASE IF NOT EXISTS power")
if err != nil {
log.Fatal("failed to create database, err:", err)
}
_, err = conn.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))")
if err != nil {
log.Fatal("failed to create stable, err:", err)
}
_, err = conn.Exec("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM power.meters")
if err != nil {
log.Fatal("failed to create topic, err:", err)
}
go func() {
for i := 0; i < 10; i++ {
time.Sleep(time.Second)
_, err = conn.Exec("INSERT INTO power.d1001 USING power.meters TAGS (2, 'California.SanFrancisco') VALUES (NOW , 10.2, 219, 0.32)")
if err != nil {
log.Fatal("failed to insert data, err:", err)
}
}
done <- struct{}{}
}()
}

View File

@ -0,0 +1,158 @@
package main
import (
"database/sql"
"fmt"
"log"
"time"
"github.com/taosdata/driver-go/v3/common"
tmqcommon "github.com/taosdata/driver-go/v3/common/tmq"
_ "github.com/taosdata/driver-go/v3/taosWS"
"github.com/taosdata/driver-go/v3/ws/tmq"
)
var done = make(chan struct{})
func main() {
// init env
conn, err := sql.Open("taosWS", "root:taosdata@ws(127.0.0.1:6041)/")
if err != nil {
log.Fatal("failed to connect TDengine, err:", err)
}
defer func() {
conn.Close()
}()
initEnv(conn)
// ANCHOR: create_consumer
// create consumer
consumer, err := tmq.NewConsumer(&tmqcommon.ConfigMap{
"ws.url": "ws://127.0.0.1:6041",
"ws.message.channelLen": uint(0),
"ws.message.timeout": common.DefaultMessageTimeout,
"ws.message.writeWait": common.DefaultWriteWait,
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"auto.offset.reset": "latest",
"msg.with.table.name": "true",
"enable.auto.commit": "true",
"auto.commit.interval.ms": "1000",
"group.id": "group1",
"client.id": "client1",
})
if err != nil {
log.Fatal("failed to create consumer, err:", err)
}
// ANCHOR_END: create_consumer
// ANCHOR: subscribe
err = consumer.Subscribe("topic_meters", nil)
if err != nil {
log.Fatal("failed to subscribe, err:", err)
}
for i := 0; i < 50; i++ {
ev := consumer.Poll(100)
if ev != nil {
switch e := ev.(type) {
case *tmqcommon.DataMessage:
// process your data here
fmt.Printf("data:%v\n", e)
// ANCHOR: commit_offset
// commit offset
topicPartition, err := consumer.CommitOffsets([]tmqcommon.TopicPartition{e.TopicPartition})
if err != nil {
log.Fatal("failed to commit offset, err:", err)
}
fmt.Println(topicPartition)
// ANCHOR_END: commit_offset
case tmqcommon.Error:
fmt.Printf("%% Error: %v: %v\n", e.Code(), e)
log.Fatal("failed to get message, err:", e)
}
// commit all offsets
topicPartition, err := consumer.Commit()
if err != nil {
log.Fatal("failed to commit, err:", err)
}
fmt.Println(topicPartition)
}
}
// ANCHOR_END: subscribe
// ANCHOR: seek
// get assignment
partitions, err := consumer.Assignment()
if err != nil {
log.Fatal("failed to get assignment, err:", err)
}
for i := 0; i < len(partitions); i++ {
fmt.Println(partitions[i])
// seek to the beginning
err = consumer.Seek(tmqcommon.TopicPartition{
Topic: partitions[i].Topic,
Partition: partitions[i].Partition,
Offset: 0,
}, 0)
if err != nil {
log.Fatal("failed to seek, err:", err)
}
}
fmt.Println("assignment seek to beginning successfully")
// poll data again
gotData := false
for i := 0; i < 50; i++ {
if gotData {
break
}
ev := consumer.Poll(100)
if ev != nil {
switch e := ev.(type) {
case *tmqcommon.DataMessage:
// process your data here
fmt.Printf("second data polled:%v\n", e)
gotData = true
case tmqcommon.Error:
fmt.Printf("%% Error: %v: %v\n", e.Code(), e)
log.Fatal("failed to get message, err:", e)
}
}
}
// ANCHOR_END: seek
// ANCHOR: close
// unsubscribe
err = consumer.Unsubscribe()
if err != nil {
log.Fatal("failed to unsubscribe, err:", err)
}
// close consumer
err = consumer.Close()
if err != nil {
log.Fatal("failed to close consumer, err:", err)
}
// ANCHOR_END: close
<-done
}
func initEnv(conn *sql.DB) {
_, err := conn.Exec("CREATE DATABASE IF NOT EXISTS power")
if err != nil {
log.Fatal("failed to create database, err:", err)
}
_, err = conn.Exec("CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))")
if err != nil {
log.Fatal("failed to create stable, err:", err)
}
_, err = conn.Exec("CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM power.meters")
if err != nil {
log.Fatal("failed to create topic, err:", err)
}
go func() {
for i := 0; i < 10; i++ {
time.Sleep(time.Second)
_, err = conn.Exec("INSERT INTO power.d1001 USING power.meters TAGS (2, 'California.SanFrancisco') VALUES (NOW , 10.2, 219, 0.32)")
if err != nil {
log.Fatal("failed to insert data, err:", err)
}
}
done <- struct{}{}
}()
}

View File

@ -8,18 +8,31 @@ import java.util.Properties;
import com.taosdata.jdbc.TSDBDriver;
public class JNIConnectExample {
public static void main(String[] args) throws SQLException {
String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
System.out.println("Connected");
conn.close();
// ANCHOR: main
public static void main(String[] args) throws SQLException {
// use
// String jdbcUrl = "jdbc:TAOS://localhost:6030/dbName?user=root&password=taosdata";
// if you want to connect a specified database named "dbName".
String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
try (Connection conn = DriverManager.getConnection(jdbcUrl, connProps)) {
System.out.println("Connected to " + jdbcUrl + " successfully.");
// you can use the connection for execute SQL here
} catch (SQLException ex) {
// handle any errors, please refer to the JDBC specifications for detailed exceptions info
System.out.println("Failed to connect to " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage());
throw ex;
} catch (Exception ex){
System.out.println("Failed to connect to " + jdbcUrl + "; ErrMessage: " + ex.getMessage());
throw ex;
}
}
// ANCHOR_END: main
}
// use
// String jdbcUrl = "jdbc:TAOS://localhost:6030/dbName?user=root&password=taosdata";
// if you want to connect a specified database named "dbName".

View File

@ -5,12 +5,22 @@ import java.sql.DriverManager;
import java.sql.SQLException;
public class RESTConnectExample {
// ANCHOR: main
public static void main(String[] args) throws SQLException {
String jdbcUrl = "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata";
Connection conn = DriverManager.getConnection(jdbcUrl);
System.out.println("Connected");
conn.close();
// ANCHOR: main
public static void main(String[] args) throws SQLException {
String jdbcUrl = "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata";
try (Connection conn = DriverManager.getConnection(jdbcUrl)){
System.out.println("Connected to " + jdbcUrl + " successfully.");
// you can use the connection for execute SQL here
} catch (SQLException ex) {
// handle any errors, please refer to the JDBC specifications for detailed exceptions info
System.out.println("Failed to connect to " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage());
throw ex;
} catch (Exception ex){
System.out.println("Failed to connect to " + jdbcUrl + "; ErrMessage: " + ex.getMessage());
throw ex;
}
// ANCHOR_END: main
}
// ANCHOR_END: main
}

View File

@ -8,14 +8,31 @@ import java.sql.SQLException;
import java.util.Properties;
public class WSConnectExample {
// ANCHOR: main
public static void main(String[] args) throws SQLException {
String jdbcUrl = "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true");
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
System.out.println("Connected");
conn.close();
// ANCHOR: main
public static void main(String[] args) throws SQLException {
// use
// String jdbcUrl = "jdbc:TAOS-RS://localhost:6041/dbName?user=root&password=taosdata";
// if you want to connect a specified database named "dbName".
String jdbcUrl = "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD, "true");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
try (Connection conn = DriverManager.getConnection(jdbcUrl, connProps)){
System.out.println("Connected to " + jdbcUrl + " successfully.");
// you can use the connection for execute SQL here
} catch (SQLException ex) {
// handle any errors, please refer to the JDBC specifications for detailed exceptions info
System.out.println("Failed to connect to " + jdbcUrl + "; ErrCode:" + ex.getErrorCode() + "; ErrMessage: " + ex.getMessage());
throw ex;
} catch (Exception ex){
System.out.println("Failed to connect to " + jdbcUrl + "; ErrMessage: " + ex.getMessage());
throw ex;
}
// ANCHOR_END: main
}
// ANCHOR_END: main
}

View File

@ -1,16 +1,8 @@
const taos = require("@tdengine/websocket");
let influxdbData = ["meters,location=California.LosAngeles,groupId=2 current=11.8,voltage=221,phase=0.28 1648432611249",
"meters,location=California.LosAngeles,groupId=2 current=13.4,voltage=223,phase=0.29 1648432611250",
"meters,location=California.LosAngeles,groupId=3 current=10.8,voltage=223,phase=0.29 1648432611249"];
let jsonData = ["{\"metric\": \"meter_current\",\"timestamp\": 1626846402,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}",
"{\"metric\": \"meter_current\",\"timestamp\": 1626846403,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1002\"}}",
"{\"metric\": \"meter_current\",\"timestamp\": 1626846404,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1003\"}}"]
let telnetData = ["meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
"meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
"meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3"];
let influxdbData = ["meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639"];
let jsonData = ["{\"metric\": \"metric_json\",\"timestamp\": 1626846400,\"value\": 10.3, \"tags\": {\"groupid\": 2, \"location\": \"California.SanFrancisco\", \"id\": \"d1001\"}}"]
let telnetData = ["metric_telnet 1707095283260 4 host=host0 interface=eth0"];
async function createConnect() {
let dsn = 'ws://localhost:6041'
@ -30,11 +22,11 @@ async function test() {
try {
wsSql = await createConnect()
await wsSql.schemalessInsert(influxdbData, taos.SchemalessProto.InfluxDBLineProtocol, taos.Precision.MILLI_SECONDS, ttl);
await wsSql.schemalessInsert(jsonData, taos.SchemalessProto.OpenTSDBJsonFormatProtocol, taos.Precision.SECONDS, ttl);
await wsSql.schemalessInsert(telnetData, taos.SchemalessProto.OpenTSDBTelnetLineProtocol, taos.Precision.MILLI_SECONDS, ttl);
await wsSql.schemalessInsert(jsonData, taos.SchemalessProto.OpenTSDBJsonFormatProtocol, taos.Precision.SECONDS, ttl);
}
catch (err) {
console.error(err.code, err.message);
console.error("Failed to insert data with schemaless, ErrCode:" + err.code + "; ErrMessage: " + err.message);
}
finally {
if (wsRows) {

View File

@ -1,6 +1,6 @@
// ANCHOR: createConnect
const taos = require("@tdengine/websocket");
// ANCHOR: createConnect
async function createConnect() {
let dsn = 'ws://localhost:6041';
let conf = new taos.WSConfig(dsn);
@ -28,8 +28,7 @@ async function createDbAndTable(wsSql) {
taosResult = await wsSql.exec('describe meters');
console.log(taosResult);
} catch (err) {
console.error(err.code, err.message);
console.error("Failed to create db and table, ErrCode:" + err.code + "; ErrMessage: " + err.message);
} finally {
if (wsSql) {
await wsSql.close();
@ -56,7 +55,7 @@ async function insertData(wsSql) {
taosResult = await wsSql.exec(insertQuery);
console.log(taosResult);
} catch (err) {
console.error(err.code, err.message);
console.error("Failed to insert data to power.meters, ErrCode:" + err.code + "; ErrMessage: " + err.message);
} finally {
if (wsSql) {
await wsSql.close();
@ -71,7 +70,7 @@ async function queryData() {
let wsSql = null;
try {
wsSql = await createConnect();
wsRows = await wsSql.query('select * from meters');
wsRows = await wsSql.query('SELECT ts, current, location FROM power.meters limit 100');
let meta = wsRows.getMeta();
console.log("wsRow:meta:=>", meta);
while (await wsRows.next()) {
@ -80,7 +79,7 @@ async function queryData() {
}
}
catch (err) {
console.error(err.code, err.message);
console.error("Failed to query data from power.meters," + err.code + "; ErrMessage: " + err.message);
}
finally {
if (wsRows) {
@ -95,22 +94,12 @@ async function queryData() {
// ANCHOR: sqlWithReqid
async function sqlWithReqid(wsSql) {
let insertQuery = "INSERT INTO " +
"power.d1001 USING power.meters (location, groupId) TAGS('California.SanFrancisco', 2) " +
"VALUES " +
"(NOW + 1a, 10.30000, 219, 0.31000) " +
"(NOW + 2a, 12.60000, 218, 0.33000) " +
"(NOW + 3a, 12.30000, 221, 0.31000) " +
"power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) " +
"VALUES " +
"(NOW + 1a, 10.30000, 218, 0.25000) ";
let wsRows = null;
let wsSql = null;
try {
wsSql = await createConnect();
taosResult = await wsSql.exec(insertQuery, 1);
wsRows = await wsSql.query('select * from meters', 2);
wsRows = await wsSql.query('SELECT ts, current, location FROM power.meters limit 100', 1);
let meta = wsRows.getMeta();
console.log("wsRow:meta:=>", meta);
while (await wsRows.next()) {
@ -119,7 +108,7 @@ async function sqlWithReqid(wsSql) {
}
}
catch (err) {
console.error(err.code, err.message);
console.error("Failed to execute sql with reqId," + err.code + "; ErrMessage: " + err.message);
}
finally {
if (wsRows) {

View File

@ -2,13 +2,13 @@ const taos = require("@tdengine/websocket");
let db = 'power';
let stable = 'meters';
let tags = ['California.SanFrancisco', 3];
let values = [
[1706786044994, 1706786044995, 1706786044996],
[10.2, 10.3, 10.4],
[292, 293, 294],
[0.32, 0.33, 0.34],
];
let numOfSubTable = 10;
let numOfRow = 10;
function getRandomInt(min, max) {
min = Math.ceil(min);
max = Math.floor(max);
return Math.floor(Math.random() * (max - min + 1)) + min;
}
async function prepare() {
let dsn = 'ws://localhost:6041'
@ -29,24 +29,36 @@ async function prepare() {
connector = await prepare();
stmt = await connector.stmtInit();
await stmt.prepare(`INSERT INTO ? USING ${db}.${stable} (location, groupId) TAGS (?, ?) VALUES (?, ?, ?, ?)`);
await stmt.setTableName('d1001');
let tagParams = stmt.newStmtParam();
tagParams.setVarchar([tags[0]]);
tagParams.setInt([tags[1]]);
await stmt.setTags(tagParams);
let bindParams = stmt.newStmtParam();
bindParams.setTimestamp(values[0]);
bindParams.setFloat(values[1]);
bindParams.setInt(values[2]);
bindParams.setFloat(values[3]);
await stmt.bind(bindParams);
await stmt.batch();
await stmt.exec();
console.log(stmt.getLastAffected());
for (let i = 0; i < numOfSubTable; i++) {
await stmt.setTableName(`d_bind_${i}`);
let tagParams = stmt.newStmtParam();
tagParams.setVarchar([`location_${i}`]);
tagParams.setInt([i]);
await stmt.setTags(tagParams);
let timestampParams = [];
let currentParams = [];
let voltageParams = [];
let phaseParams = [];
const currentMillis = new Date().getTime();
for (let j = 0; j < numOfRow; j++) {
timestampParams.push(currentMillis + j);
currentParams.push(Math.random() * 30);
voltageParams.push(getRandomInt(100, 300));
phaseParams.push(Math.random());
}
let bindParams = stmt.newStmtParam();
bindParams.setTimestamp(timestampParams);
bindParams.setFloat(currentParams);
bindParams.setInt(voltageParams);
bindParams.setFloat(phaseParams);
await stmt.bind(bindParams);
await stmt.batch();
await stmt.exec();
console.log(`d_bind_${i} insert ` + stmt.getLastAffected() + " rows.");
}
}
catch (err) {
console.error(err.code, err.message);
console.error("Failed to insert to table meters using stmt, ErrCode:" + err.code + "; ErrMessage: " + err.message);
}
finally {
if (stmt) {

View File

@ -7,24 +7,30 @@ const topics = ['power_meters_topic'];
// ANCHOR: create_consumer
async function createConsumer() {
let configMap = new Map([
[taos.TMQConstants.GROUP_ID, "gId"],
[taos.TMQConstants.GROUP_ID, "group1"],
[taos.TMQConstants.CLIENT_ID, 'client1'],
[taos.TMQConstants.CONNECT_USER, "root"],
[taos.TMQConstants.CONNECT_PASS, "taosdata"],
[taos.TMQConstants.AUTO_OFFSET_RESET, "latest"],
[taos.TMQConstants.CLIENT_ID, 'test_tmq_client'],
[taos.TMQConstants.WS_URL, 'ws://localhost:6041'],
[taos.TMQConstants.ENABLE_AUTO_COMMIT, 'true'],
[taos.TMQConstants.AUTO_COMMIT_INTERVAL_MS, '1000']
]);
return await taos.tmqConnect(configMap);
try {
return await taos.tmqConnect(configMap);
}catch (err) {
console.log("Failed to create websocket consumer, ErrCode:" + err.code + "; ErrMessage: " + err.message);
throw err;
}
}
// ANCHOR_END: create_consumer
async function prepare() {
let conf = new taos.WSConfig('ws://localhost:6041');
conf.setUser('root')
conf.setPwd('taosdata')
conf.setDb('power')
conf.setUser('root');
conf.setPwd('taosdata');
conf.setDb('power');
const createDB = `CREATE DATABASE IF NOT EXISTS POWER ${db} KEEP 3650 DURATION 10 BUFFER 16 WAL_LEVEL 1;`;
const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`;
@ -32,10 +38,9 @@ async function prepare() {
await wsSql.exec(createDB);
await wsSql.exec(createStable);
// ANCHOR: create_topic
let createTopic = `CREATE TOPIC IF NOT EXISTS ${topics[0]} AS SELECT * FROM ${db}.${stable}`;
await wsSql.exec(createTopic);
// ANCHOR_END: create_topic
let createTopic = `CREATE TOPIC IF NOT EXISTS ${topics[0]} AS SELECT * FROM ${db}.${stable}`;
await wsSql.exec(createTopic);
for (let i = 0; i < 10; i++) {
await wsSql.exec(`INSERT INTO d1001 USING ${stable} (location, groupId) TAGS ("California.SanFrancisco", 3) VALUES (NOW, ${10 + i}, ${200 + i}, ${0.32 + i})`);
@ -43,41 +48,35 @@ await wsSql.exec(createTopic);
wsSql.Close();
}
// ANCHOR: subscribe
async function subscribe(consumer) {
await consumer.subscribe(topics);
for (let i = 0; i < 5; i++) {
let res = await consumer.poll(500);
for (let [key, value] of res) {
console.log(key, value);
}
if (res.size == 0) {
break;
}
await consumer.commit();
// ANCHOR: commit
try {
await consumer.subscribe(['topic_meters']);
for (let i = 0; i < 50; i++) {
let res = await consumer.poll(100);
for (let [key, value] of res) {
console.log(key, value);
}
consumer.commit();
}
} catch (err) {
console.error("Failed to poll data; err.code, ErrCode:" + err.code + "; ErrMessage: " + err.message);
throw err;
}
// ANCHOR_END: commit
}
// ANCHOR_END: subscribe
async function test() {
// ANCHOR: unsubscribe
let consumer = null;
try {
await prepare();
let consumer = await createConsumer()
await subscribe(consumer)
// ANCHOR: assignment
let assignment = await consumer.assignment();
console.log(assignment);
assignment = await consumer.seekToBeginning(assignment);
for(let i in assignment) {
console.log("seek after:", assignment[i])
}
// ANCHOR_END: assignment
await subscribe(consumer)
await consumer.unsubscribe();
}
catch (err) {
console.error(err.code, err.message);
console.error("Failed to unsubscribe consume, ErrCode:" + err.code + "; ErrMessage: " + err.message);
}
finally {
if (consumer) {
@ -85,6 +84,7 @@ async function test() {
}
taos.destroy();
}
// ANCHOR_END: unsubscribe
}
test()

View File

@ -0,0 +1,104 @@
const taos = require("@tdengine/websocket");
const db = 'power';
const stable = 'meters';
const topics = ['power_meters_topic'];
// ANCHOR: create_consumer
async function createConsumer() {
let configMap = new Map([
[taos.TMQConstants.GROUP_ID, "group1"],
[taos.TMQConstants.CLIENT_ID, 'client1'],
[taos.TMQConstants.CONNECT_USER, "root"],
[taos.TMQConstants.CONNECT_PASS, "taosdata"],
[taos.TMQConstants.AUTO_OFFSET_RESET, "latest"],
[taos.TMQConstants.WS_URL, 'ws://localhost:6041'],
[taos.TMQConstants.ENABLE_AUTO_COMMIT, 'true'],
[taos.TMQConstants.AUTO_COMMIT_INTERVAL_MS, '1000']
]);
try {
return await taos.tmqConnect(configMap);
}catch (err) {
console.log(err);
throw err;
}
}
// ANCHOR_END: create_consumer
async function prepare() {
let conf = new taos.WSConfig('ws://localhost:6041');
conf.setUser('root');
conf.setPwd('taosdata');
conf.setDb('power');
const createDB = `CREATE DATABASE IF NOT EXISTS POWER ${db} KEEP 3650 DURATION 10 BUFFER 16 WAL_LEVEL 1;`;
const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`;
let wsSql = await taos.sqlConnect(conf);
await wsSql.exec(createDB);
await wsSql.exec(createStable);
let createTopic = `CREATE TOPIC IF NOT EXISTS ${topics[0]} AS SELECT * FROM ${db}.${stable}`;
await wsSql.exec(createTopic);
for (let i = 0; i < 10; i++) {
await wsSql.exec(`INSERT INTO d1001 USING ${stable} (location, groupId) TAGS ("California.SanFrancisco", 3) VALUES (NOW, ${10 + i}, ${200 + i}, ${0.32 + i})`);
}
wsSql.Close();
}
// ANCHOR: subscribe
async function subscribe(consumer) {
try {
await consumer.subscribe(['topic_meters']);
for (let i = 0; i < 50; i++) {
let res = await consumer.poll(100);
for (let [key, value] of res) {
console.log(key, value);
}
}
}catch (err) {
console.error("Failed to poll data; err.code, ErrCode:" + err.code + "; ErrMessage: " + err.message);
throw err;
}
}
// ANCHOR_END: subscribe
// ANCHOR: offset
async function test() {
let consumer = null;
try {
await prepare();
let consumer = await createConsumer()
await consumer.subscribe(['topic_meters']);
let res = new Map();
while (res.size == 0) {
res = await consumer.poll(100);
}
let assignment = await consumer.assignment();
for (let i in assignment) {
console.log("seek before:", assignment[i]);
}
await consumer.seekToBeginning(assignment);
assignment = await consumer.assignment();
for (let i in assignment) {
console.log("seek after:", assignment[i]);
}
await consumer.unsubscribe();
}
catch (err) {
console.error("seek example failed, ErrCode:" + err.code + "; ErrMessage: " + err.message);
}
finally {
if (consumer) {
await consumer.close();
}
taos.destroy();
}
}
// ANCHOR_END: offset
test()

View File

@ -1,19 +1,24 @@
import taos
def test_connection():
def create_connection():
# all parameters are optional.
# if database is specified,
# then it must exist.
conn = taos.connect(host="localhost",
port=6030,
user="root",
password="taosdata",
database="log")
print('client info:', conn.client_info)
print('server info:', conn.server_info)
conn.close()
conn = None
host = "localhost"
port = 6030
try:
conn = taos.connect(
user="root",
password="taosdata",
host=host,
port=port,
)
print(f"Connected to {host}:{port} successfully.");
except Exception as err:
print(f"Failed to connect to {host}:{port} ; Err:{err}")
finally:
if conn:
conn.close()
if __name__ == "__main__":
test_connection()
create_connection()

View File

@ -0,0 +1,23 @@
# ANCHOR: connect
import taosrest
def create_connection():
conn = None
url="http://localhost:6041"
try:
conn = taosrest.connect(url=url,
user="root",
password="taosdata",
timeout=30)
print(f"Connected to {url} successfully.");
except Exception as err:
print(f"Failed to connect to {url} ; Err:{err}")
finally:
if conn:
conn.close()
# ANCHOR_END: connect
if __name__ == "__main__":
create_connection()

View File

@ -1,11 +1,12 @@
# ANCHOR: connect
from taosrest import connect, TaosRestConnection, TaosRestCursor
conn = connect(url="http://localhost:6041",
user="root",
password="taosdata",
timeout=30)
conn = connect(url="http://localhost:6041",
user="root",
password="taosdata",
timeout=30)
# ANCHOR_END: connect
# ANCHOR: basic
# create STable

View File

@ -1,29 +1,69 @@
# ANCHOR: connect
import taosws
conn = taosws.connect("taosws://root:taosdata@localhost:6041")
# ANCHOR_END: connect
def create_connection():
conn = None
host = "localhost"
port = 6041
try:
conn = taosws.connect(
user="root",
password="taosdata",
host=host,
port=port,
)
print(f"Connected to {host}:{port} successfully.");
except Exception as err:
print(f"Failed to connect to {host}:{port} ; Err:{err}")
return conn
# ANCHOR_END: connect
# ANCHOR: basic
conn.execute("drop database if exists connwspy")
conn.execute("create database if not exists connwspy wal_retention_period 3600 keep 36500 ")
conn.execute("use connwspy")
conn.execute("create table if not exists stb (ts timestamp, c1 int) tags (t1 int)")
conn.execute("create table if not exists tb1 using stb tags (1)")
conn.execute("insert into tb1 values (now, 1)")
conn.execute("insert into tb1 values (now+1s, 2)")
conn.execute("insert into tb1 values (now+2s, 3)")
def create_db_table(conn):
# ANCHOR: create_db
try:
conn.execute("CREATE DATABASE IF NOT EXISTS power")
conn.execute("USE power")
conn.execute("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))")
conn.execute("CREATE TABLE IF NOT EXISTS `d0` USING `meters` (groupId, location) TAGS(0, 'Los Angles')")
except Exception as err:
print(f'Exception {err}')
# ANCHOR_END: create_db
r = conn.execute("select * from stb")
result = conn.query("select * from stb")
num_of_fields = result.field_count
print(num_of_fields)
def insert(conn):
# ANCHOR: insert
sql = """
INSERT INTO
power.d1001 USING power.meters TAGS('California.SanFrancisco', 2)
VALUES (NOW + 1a, 10.30000, 219, 0.31000)
(NOW + 2a, 12.60000, 218, 0.33000) (NOW + 3a, 12.30000, 221, 0.31000)
power.d1002 USING power.meters TAGS('California.SanFrancisco', 3)
VALUES (NOW + 1a, 10.30000, 218, 0.25000)
"""
try:
inserted = conn.execute(sql)
assert inserted == 8
except Exception as err:
print(f'Exception111 {err}')
# ANCHOR_END: insert
for row in result:
print(row)
def query(conn):
# ANCHOR: query
try:
result = conn.query("select * from meters")
num_of_fields = result.field_count
print(num_of_fields)
# output:
# 3
# ('2023-02-28 15:56:13.329 +08:00', 1, 1)
# ('2023-02-28 15:56:13.333 +08:00', 2, 1)
# ('2023-02-28 15:56:13.337 +08:00', 3, 1)
for row in result:
print(row)
except Exception as err:
print(f'Exception {err}')
# ANCHOR_END: query
if __name__ == "__main__":
conn = create_connection()
create_db_table(conn)
insert(conn)
query(conn)
if conn:
conn.close()

View File

@ -14,7 +14,6 @@ conn.execute("insert into tb1 values (now, 1)", req_id=6)
conn.execute("insert into tb1 values (now, 2)", req_id=7)
conn.execute("insert into tb1 values (now, 3)", req_id=8)
r = conn.execute("select * from stb", req_id=9)
result = conn.query("select * from stb", req_id=10)
num_of_fields = result.field_count
print(num_of_fields)

View File

@ -1,26 +1,34 @@
import taos
conn = taos.connect(
host="localhost",
user="root",
password="taosdata",
port=6030,
)
conn = None
host = "localhost"
port = 6030
try:
conn = taos.connect(host=host,
port=port,
user="root",
password="taosdata")
db = "power"
db = "power"
# create database
rowsAffected = conn.execute(f"CREATE DATABASE IF NOT EXISTS {db}")
assert rowsAffected == 0
conn.execute(f"DROP DATABASE IF EXISTS {db}")
conn.execute(f"CREATE DATABASE {db}")
# change database. same as execute "USE db"
conn.select_db(db)
# create super table
rowsAffected = conn.execute(
"CREATE TABLE IF NOT EXISTS `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"
)
assert rowsAffected == 0
# change database. same as execute "USE db"
conn.select_db(db)
# create table
rowsAffected = conn.execute("CREATE TABLE IF NOT EXISTS `d0` USING `meters` (groupid, location) TAGS(0, 'Los Angles')")
assert rowsAffected == 0
# create super table
conn.execute(
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"
)
# create table
conn.execute("CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')")
conn.close()
except Exception as err:
print(f"Failed to create db and table, db addrr:{host}:{port} err:{err}")
finally:
if conn:
conn.close()

View File

@ -1,18 +1,29 @@
import taosrest
conn = taosrest.connect(url="http://localhost:6041")
conn = None
url = "http://localhost:6041"
try:
conn = taosrest.connect(url=url,
user="root",
password="taosdata",
timeout=30)
db = "power"
db = "power"
# create database
rowsAffected = conn.execute(f"CREATE DATABASE IF NOT EXISTS {db}")
assert rowsAffected == 0
conn.execute(f"DROP DATABASE IF EXISTS {db}")
conn.execute(f"CREATE DATABASE {db}")
# create super table
rowsAffected = conn.execute(
f"CREATE TABLE IF NOT EXISTS `{db}`.`meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"
)
assert rowsAffected == 0
# create table
rowsAffected = conn.execute(f"CREATE TABLE IF NOT EXISTS `{db}`.`d0` USING `{db}`.`meters` (groupid, location) TAGS(0, 'Los Angles')")
assert rowsAffected == 0
# create super table
conn.execute(
f"CREATE TABLE `{db}`.`meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"
)
# create table
conn.execute(f"CREATE TABLE `{db}`.`d0` USING `{db}`.`meters` TAGS(0, 'Los Angles')")
conn.close()
except Exception as err:
print(f"Failed to create db and table, url:{url} err:{err}")
finally:
if conn:
conn.close()

View File

@ -1,22 +1,35 @@
import taosws
dsn = "taosws://root:taosdata@localhost:6041"
conn = taosws.connect(dsn)
conn = None
host = "localhost"
port = 6041
try:
conn = taosws.connect(user="root",
password="taosdata",
host=host,
port=port)
db = "power"
db = "power"
# create database
rowsAffected = conn.execute(f"CREATE DATABASE IF NOT EXISTS {db}")
assert rowsAffected == 0
conn.execute(f"DROP DATABASE IF EXISTS {db}")
conn.execute(f"CREATE DATABASE {db}")
# change database.
rowsAffected = conn.execute(f"USE {db}")
assert rowsAffected == 0
# create super table
rowsAffected = conn.execute(
"CREATE TABLE IF NOT EXISTS `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"
)
assert rowsAffected == 0
# change database.
conn.execute(f"USE {db}")
# create table
rowsAffected = conn.execute("CREATE TABLE IF NOT EXISTS `d0` USING `meters` (groupid, location) TAGS(0, 'Los Angles')")
assert rowsAffected == 0
# create super table
conn.execute(
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"
)
# create table
conn.execute("CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')")
conn.close()
except Exception as err:
print(f"Failed to create db and table, db addrr:{host}:{port} err:{err}")
finally:
if conn:
conn.close()

View File

@ -1,76 +1,26 @@
import taos
conn = taos.connect(
host="localhost",
user="root",
password="taosdata",
port=6030,
)
conn = None
db = "power"
try:
conn = taos.connect(user="root",
password="taosdata",
host="localhost",
port=6030)
conn.execute(f"DROP DATABASE IF EXISTS {db}")
conn.execute(f"CREATE DATABASE {db}")
sql = """
INSERT INTO
power.d1001 USING power.meters (groupid, location) TAGS(2, 'California.SanFrancisco')
VALUES (NOW + 1a, 10.30000, 219, 0.31000)
(NOW + 2a, 12.60000, 218, 0.33000) (NOW + 3a, 12.30000, 221, 0.31000)
power.d1002 USING power.meters (groupid, location) TAGS(3, 'California.SanFrancisco')
VALUES (NOW + 1a, 10.30000, 218, 0.25000)
"""
affectedRows = conn.execute(sql)
print(f"inserted into {affectedRows} rows to power.meters successfully.")
# change database. same as execute "USE db"
conn.select_db(db)
# create super table
conn.execute(
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"
)
# ANCHOR: insert
# insert data
sql = """
INSERT INTO
power.d1001 USING power.meters TAGS('California.SanFrancisco', 2)
VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000)
('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
power.d1002 USING power.meters TAGS('California.SanFrancisco', 3)
VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
power.d1003 USING power.meters TAGS('California.LosAngeles', 2)
VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
power.d1004 USING power.meters TAGS('California.LosAngeles', 3)
VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)
"""
inserted = conn.execute(sql)
assert inserted == 8
# ANCHOR_END: insert
# ANCHOR: query
# Execute a sql and get its result set. It's useful for SELECT statement
result = conn.query("SELECT * from meters")
# Get fields from result
fields = result.fields
for field in fields:
print(field)
"""
output:
{name: ts, type: 9, bytes: 8}
{name: current, type: 6, bytes: 4}
{name: voltage, type: 4, bytes: 4}
{name: phase, type: 6, bytes: 4}
{name: location, type: 8, bytes: 64}
{name: groupid, type: 4, bytes: 4}
"""
# Get data from result as list of tuple
data = result.fetch_all()
for row in data:
print(row)
"""
output:
(datetime.datetime(2018, 10, 3, 14, 38, 16, 650000), 10.300000190734863, 218, 0.25, 'California.SanFrancisco', 3)
...
"""
# ANCHOR_END: query
# ANCHOR: req_id
result = conn.query("SELECT * from meters", req_id=1)
# ANCHOR_END: req_id
conn.close()
except Exception as err:
print(err)
finally:
if conn:
conn.close()

View File

@ -1,48 +1,26 @@
import taosrest
conn = taosrest.connect(url="http://localhost:6041")
conn = None
db = "power"
try:
conn = taosrest.connect(url="http://localhost:6041",
user="root",
password="taosdata",
timeout=30)
conn.execute(f"DROP DATABASE IF EXISTS {db}")
conn.execute(f"CREATE DATABASE {db}")
sql = """
INSERT INTO
power.d1001 USING power.meters (groupid, location) TAGS(2, 'California.SanFrancisco')
VALUES (NOW + 1a, 10.30000, 219, 0.31000)
(NOW + 2a, 12.60000, 218, 0.33000) (NOW + 3a, 12.30000, 221, 0.31000)
power.d1002 USING power.meters (groupid, location) TAGS(3, 'California.SanFrancisco')
VALUES (NOW + 1a, 10.30000, 218, 0.25000)
"""
affectedRows = conn.execute(sql)
print(f"inserted into {affectedRows} rows to power.meters successfully.")
# create super table
conn.execute(
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"
)
# ANCHOR: insert
# rest insert data
sql = """
INSERT INTO
power.d1001 USING power.meters TAGS('California.SanFrancisco', 2)
VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000)
('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
power.d1002 USING power.meters TAGS('California.SanFrancisco', 3)
VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
power.d1003 USING power.meters TAGS('California.LosAngeles', 2)
VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
power.d1004 USING power.meters TAGS('California.LosAngeles', 3)
VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)
"""
inserted = conn.execute(sql)
assert inserted == 8
# ANCHOR_END: insert
# ANCHOR: query
client = taosrest.RestClient("http://localhost:6041")
result = client.sql(f"SELECT * from {db}.meters")
print(result)
"""
output:
{'code': 0, 'column_meta': [['ts', 'TIMESTAMP', 8], ['current', 'FLOAT', 4], ['voltage', 'INT', 4], ['phase', 'FLOAT', 4], ['location', 'VARCHAR', 64], ['groupid', 'INT', 4]], 'data': [[datetime.datetime(2018, 10, 3, 14, 38, 5), 10.3, 219, 0.31, 'California.SanFrancisco', 2], [datetime.datetime(2018, 10, 3, 14, 38, 15), 12.6, 218, 0.33, 'California.SanFrancisco', 2], [datetime.datetime(2018, 10, 3, 14, 38, 16, 800000), 12.3, 221, 0.31, 'California.SanFrancisco', 2], [datetime.datetime(2018, 10, 3, 14, 38, 16, 650000), 10.3, 218, 0.25, 'California.SanFrancisco', 3], [datetime.datetime(2018, 10, 3, 14, 38, 5, 500000), 11.8, 221, 0.28, 'California.LosAngeles', 2], [datetime.datetime(2018, 10, 3, 14, 38, 16, 600000), 13.4, 223, 0.29, 'California.LosAngeles', 2], [datetime.datetime(2018, 10, 3, 14, 38, 5), 10.8, 223, 0.29, 'California.LosAngeles', 3], [datetime.datetime(2018, 10, 3, 14, 38, 6, 500000), 11.5, 221, 0.35, 'California.LosAngeles', 3]], 'rows': 8}
"""
# ANCHOR_END: query
# ANCHOR: req_id
result = client.sql(f"SELECT * from {db}.meters", req_id=1)
# ANCHOR_END: req_id
conn.close()
except Exception as err:
print(err)
finally:
if conn:
conn.close()

View File

@ -1,71 +1,26 @@
import taosws
dsn = "taosws://root:taosdata@localhost:6041"
conn = taosws.connect(dsn)
conn = None
db = "power"
try:
conn = taosws.connect(user="root",
password="taosdata",
host="localhost",
port=6041)
conn.execute(f"DROP DATABASE IF EXISTS {db}")
conn.execute(f"CREATE DATABASE {db}")
sql = """
INSERT INTO
power.d1001 USING power.meters (groupid, location) TAGS(2, 'California.SanFrancisco')
VALUES (NOW + 1a, 10.30000, 219, 0.31000)
(NOW + 2a, 12.60000, 218, 0.33000) (NOW + 3a, 12.30000, 221, 0.31000)
power.d1002 USING power.meters (groupid, location) TAGS(3, 'California.SanFrancisco')
VALUES (NOW + 1a, 10.30000, 218, 0.25000)
"""
affectedRows = conn.execute(sql)
print(f"inserted into {affectedRows} rows to power.meters successfully.")
# change database.
conn.execute(f"USE {db}")
# create super table
conn.execute(
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"
)
# ANCHOR: insert
# ws insert data
sql = """
INSERT INTO
power.d1001 USING power.meters TAGS('California.SanFrancisco', 2)
VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000)
('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
power.d1002 USING power.meters TAGS('California.SanFrancisco', 3)
VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
power.d1003 USING power.meters TAGS('California.LosAngeles', 2)
VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
power.d1004 USING power.meters TAGS('California.LosAngeles', 3)
VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)
"""
inserted = conn.execute(sql)
assert inserted == 8
# ANCHOR_END: insert
# ANCHOR: query
# Execute a sql and get its result set. It's useful for SELECT statement
result = conn.query("SELECT * from meters")
# Get fields from result
fields = result.fields
for field in fields:
print(field)
"""
output:
{name: ts, type: TIMESTAMP, bytes: 8}
{name: current, type: FLOAT, bytes: 4}
{name: voltage, type: INT, bytes: 4}
{name: phase, type: FLOAT, bytes: 4}
{name: location, type: BINARY, bytes: 64}
{name: groupid, type: INT, bytes: 4}
"""
# Get rows from result
for row in result:
print(row)
"""
output:
('2018-10-03 14:38:05 +08:00', 10.300000190734863, 219, 0.3100000023841858, 'California.SanFrancisco', 2)
...
"""
# ANCHOR_END: query
# ANCHOR: req_id
result = conn.query_with_req_id("SELECT * from meters", req_id=1)
# ANCHOR_END: req_id
conn.close()
except Exception as err:
print(err)
finally:
if conn:
conn.close()

View File

@ -0,0 +1,26 @@
import taos
conn = None
try:
conn = taos.connect(host="localhost",
port=6030,
user="root",
password="taosdata")
result = conn.query("SELECT ts, current, location FROM power.meters limit 100")
print(result)
# Get fields from result
fields = result.fields
for field in fields:
print(field)
# Get data from result as list of tuple
data = result.fetch_all()
for row in data:
print(row)
except Exception as err:
print(f"Failed to query data from power.meters, err:{err}")
finally:
if conn:
conn.close()

View File

@ -0,0 +1,15 @@
import taosrest
client = None
try:
client = taosrest.RestClient(url="http://localhost:6041",
user="root",
password="taosdata",
timeout=30)
result = client.sql(f"SELECT ts, current, location FROM power.meters limit 100", 1)
print(result)
except Exception as err:
print(f"Failed to query data from power.meters, err:{err}")

View File

@ -0,0 +1,22 @@
import taosws
conn = None
try:
conn = taosws.connect(user="root",
password="taosdata",
host="localhost",
port=6041)
result = conn.query("SELECT ts, current, location FROM power.meters limit 100")
num_of_fields = result.field_count
print(num_of_fields)
for row in result:
print(row)
except Exception as err:
print(f"Failed to query data from power.meters, err:{err}")
finally:
if conn:
conn.close()

View File

@ -0,0 +1,25 @@
import taos
conn = None
try:
conn = taos.connect(host="localhost",
port=6030,
user="root",
password="taosdata")
result = conn.query("SELECT ts, current, location FROM power.meters limit 100", 1)
# Get fields from result
fields = result.fields
for field in fields:
print(field)
# Get data from result as list of tuple
data = result.fetch_all()
for row in data:
print(row)
except Exception as err:
print(f"Failed to execute sql with reqId, err:{err}")
finally:
if conn:
conn.close()

View File

@ -0,0 +1,15 @@
import taosrest
client = None
try:
client = taosrest.RestClient(url="http://localhost:6041",
user="root",
password="taosdata",
timeout=30)
result = client.sql(f"SELECT ts, current, location FROM power.meters limit 100", 1)
print(result)
except Exception as err:
print(f"Failed to execute sql with reqId, err:{err}")

View File

@ -0,0 +1,22 @@
import taosws
conn = None
try:
conn = taosws.connect(
user="root",
password="taosdata",
host="localhost",
port=6041,
)
result = conn.query_with_req_id("SELECT ts, current, location FROM power.meters limit 100", req_id=1)
for row in result:
print(row)
except Exception as err:
print(f"Failed to execute sql with reqId, err:{err}")
finally:
if conn:
conn.close()

View File

@ -1,36 +1,39 @@
import taos
conn = taos.connect(
host="localhost",
user="root",
password="taosdata",
port=6030,
)
db = "power"
conn.execute(f"DROP DATABASE IF EXISTS {db}")
conn.execute(f"CREATE DATABASE {db}")
# change database. same as execute "USE db"
conn.select_db(db)
lineDemo = [
"meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639000000"
"meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639"
]
telnetDemo = ["stb0_0 1707095283260 4 host=host0 interface=eth0"]
telnetDemo = ["metric_telnet 1707095283260 4 host=host0 interface=eth0"]
jsonDemo = [
'{"metric": "meter_current","timestamp": 1626846400,"value": 10.3, "tags": {"groupid": 2, "location": "California.SanFrancisco", "id": "d1001"}}'
'{"metric": "metric_json","timestamp": 1626846400,"value": 10.3, "tags": {"groupid": 2, "location": "California.SanFrancisco", "id": "d1001"}}'
]
conn.schemaless_insert(
lineDemo, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.MILLI_SECONDS
)
conn.schemaless_insert(
telnetDemo, taos.SmlProtocol.TELNET_PROTOCOL, taos.SmlPrecision.MICRO_SECONDS
)
conn.schemaless_insert(
jsonDemo, taos.SmlProtocol.JSON_PROTOCOL, taos.SmlPrecision.MILLI_SECONDS
)
try:
conn = taos.connect(
host="localhost",
user="root",
password="taosdata",
port=6030
)
conn.execute("CREATE DATABASE IF NOT EXISTS power")
# change database. same as execute "USE db"
conn.select_db("power")
conn.schemaless_insert(
lineDemo, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.MILLI_SECONDS
)
conn.schemaless_insert(
telnetDemo, taos.SmlProtocol.TELNET_PROTOCOL, taos.SmlPrecision.MICRO_SECONDS
)
conn.schemaless_insert(
jsonDemo, taos.SmlProtocol.JSON_PROTOCOL, taos.SmlPrecision.MILLI_SECONDS
)
except Exception as err:
print(f"Failed to insert data with schemaless, err:{err}")
finally:
if conn:
conn.close()
conn.close()

View File

@ -1,46 +1,72 @@
import taosws
dsn = "taosws://root:taosdata@localhost:6041"
conn = taosws.connect(dsn)
db = "power"
def prepare():
conn = None
try:
conn = taosws.connect(user="root",
password="taosdata",
host="localhost",
port=6041)
conn.execute(f"DROP DATABASE IF EXISTS {db}")
conn.execute(f"CREATE DATABASE {db}")
# create database
rowsAffected = conn.execute(f"CREATE DATABASE IF NOT EXISTS {db}")
assert rowsAffected == 0
# change database.
conn = taosws.connect(f"{dsn}/{db}")
except Exception as err:
print(f"Failed to create db and table, err:{err}")
finally:
if conn:
conn.close()
lineDemo = [
"meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639000000"
]
telnetDemo = ["stb0_0 1707095283260 4 host=host0 interface=eth0"]
jsonDemo = [
'{"metric": "meter_current","timestamp": 1626846400,"value": 10.3, "tags": {"groupid": 2, "location": "California.SanFrancisco", "id": "d1001"}}'
]
def schemaless_insert():
conn.schemaless_insert(
lines=lineDemo,
protocol=taosws.PySchemalessProtocol.Line,
precision=taosws.PySchemalessPrecision.Millisecond,
ttl=1,
req_id=1,
)
conn = None
conn.schemaless_insert(
lines=telnetDemo,
protocol=taosws.PySchemalessProtocol.Telnet,
precision=taosws.PySchemalessPrecision.Microsecond,
ttl=1,
req_id=2,
)
lineDemo = [
"meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639"
]
conn.schemaless_insert(
lines=jsonDemo,
protocol=taosws.PySchemalessProtocol.Json,
precision=taosws.PySchemalessPrecision.Millisecond,
ttl=1,
req_id=3,
)
telnetDemo = ["metric_telnet 1707095283260 4 host=host0 interface=eth0"]
conn.close()
jsonDemo = [
'{"metric": "metric_json","timestamp": 1626846400,"value": 10.3, "tags": {"groupid": 2, "location": "California.SanFrancisco", "id": "d1001"}}'
]
try:
conn = taosws.connect(user="root",
password="taosdata",
host="localhost",
port=6041,
database=db)
conn.schemaless_insert(
lines = lineDemo,
protocol = taosws.PySchemalessProtocol.Line,
precision = taosws.PySchemalessPrecision.Millisecond,
ttl=1,
req_id=1,
)
conn.schemaless_insert(
lines=telnetDemo,
protocol=taosws.PySchemalessProtocol.Telnet,
precision=taosws.PySchemalessPrecision.Microsecond,
ttl=1,
req_id=2,
)
conn.schemaless_insert(
lines=jsonDemo,
protocol=taosws.PySchemalessProtocol.Json,
precision=taosws.PySchemalessPrecision.Millisecond,
ttl=1,
req_id=3,
)
except Exception as err:
print(f"Failed to insert data with schemaless, err:{err}")
finally:
if conn:
conn.close()

View File

@ -1,53 +1,63 @@
import taos
from datetime import datetime
import random
conn = taos.connect(
host="localhost",
user="root",
password="taosdata",
port=6030,
)
numOfSubTable = 10
numOfRow = 10
db = "power"
conn = None
stmt = None
conn.execute(f"DROP DATABASE IF EXISTS {db}")
conn.execute(f"CREATE DATABASE {db}")
try:
conn = taos.connect(
host="localhost",
user="root",
password="taosdata",
port=6030,
)
# change database. same as execute "USE db"
conn.select_db(db)
conn.execute("CREATE DATABASE IF NOT EXISTS power")
conn.execute("USE power")
conn.execute(
"CREATE TABLE IF NOT EXISTS `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"
)
# create super table
conn.execute(
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"
)
# ANCHOR: stmt
sql = "INSERT INTO ? USING meters (groupid, location) TAGS(?,?) VALUES (?,?,?,?)"
stmt = conn.statement(sql)
for i in range(numOfSubTable):
tbname = f"d_bind_{i}"
# ANCHOR: stmt
sql = "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)"
stmt = conn.statement(sql)
tags = taos.new_bind_params(2)
tags[0].int([i])
tags[1].binary([f"location_{i}"])
stmt.set_tbname_tags(tbname, tags)
tbname = "power.d1001"
current = int(datetime.now().timestamp() * 1000)
timestamps = []
currents = []
voltages = []
phases = []
tags = taos.new_bind_params(2)
tags[0].binary(["California.SanFrancisco"])
tags[1].int([2])
for j in range (numOfRow):
timestamps.append(current + i)
currents.append(random.random() * 30)
voltages.append(random.randint(100, 300))
phases.append(random.random())
stmt.set_tbname_tags(tbname, tags)
params = taos.new_bind_params(4)
params[0].timestamp(timestamps)
params[1].float(currents)
params[2].int(voltages)
params[3].float(phases)
stmt.bind_param_batch(params)
stmt.execute()
print(f"stmt insert successfully.")
params = taos.new_bind_params(4)
params[0].timestamp((1626861392589, 1626861392591, 1626861392592))
params[1].float((10.3, 12.6, 12.3))
params[2].int([194, 200, 201])
params[3].float([0.31, 0.33, 0.31])
stmt.bind_param_batch(params)
stmt.execute()
stmt.close()
# ANCHOR_END: stmt
result = conn.query("SELECT * from meters")
for row in result.fetch_all():
print(row)
conn.close()
except Exception as err:
print(f"Failed to insert to table meters using stmt, error: {err}")
finally:
if stmt:
stmt.close()
if conn:
conn.close()

View File

@ -1,52 +1,67 @@
from datetime import datetime
import random
import taosws
dsn = "taosws://root:taosdata@localhost:6041"
conn = taosws.connect(dsn)
numOfSubTable = 10
db = "power"
numOfRow = 10
conn.execute(f"DROP DATABASE IF EXISTS {db}")
conn.execute(f"CREATE DATABASE {db}")
conn = None
stmt = None
try:
conn = taosws.connect(user="root",
password="taosdata",
host="localhost",
port=6041)
# change database.
conn.execute(f"USE {db}")
conn.execute("CREATE DATABASE IF NOT EXISTS power")
conn.execute("USE power")
conn.execute(
"CREATE TABLE IF NOT EXISTS `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"
)
# create super table
conn.execute(
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"
)
# ANCHOR: stmt
sql = "INSERT INTO ? USING meters (groupid, location) TAGS(?,?) VALUES (?,?,?,?)"
stmt = conn.statement()
stmt.prepare(sql)
# ANCHOR: stmt
sql = "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)"
stmt = conn.statement()
stmt.prepare(sql)
for i in range(numOfSubTable):
tbname = f"d_bind_{i}"
tbname = "power.d1001"
tags = [
taosws.int_to_tag(i),
taosws.varchar_to_tag(f"location_{i}"),
]
stmt.set_tbname_tags(tbname, tags)
current = int(datetime.now().timestamp() * 1000)
timestamps = []
currents = []
voltages = []
phases = []
for j in range (numOfRow):
timestamps.append(current + i)
currents.append(random.random() * 30)
voltages.append(random.randint(100, 300))
phases.append(random.random())
tags = [
taosws.varchar_to_tag("California.SanFrancisco"),
taosws.int_to_tag(2),
]
stmt.bind_param(
[
taosws.millis_timestamps_to_column(timestamps),
taosws.floats_to_column(currents),
taosws.ints_to_column(voltages),
taosws.floats_to_column(phases),
]
)
stmt.set_tbname_tags(tbname, tags)
stmt.bind_param(
[
taosws.millis_timestamps_to_column(
[1626861392589, 1626861392591, 1626861392592]
),
taosws.floats_to_column([10.3, 12.6, 12.3]),
taosws.ints_to_column([194, 200, 201]),
taosws.floats_to_column([0.31, 0.33, 0.31]),
]
)
stmt.add_batch()
rows = stmt.execute()
assert rows == 3
stmt.close()
# ANCHOR_END: stmt
conn.close()
stmt.add_batch()
stmt.execute()
print(f"stmt insert successfully.")
except Exception as err:
print(f"Failed to insert to table meters using stmt, error: {err}")
finally:
if stmt:
stmt.close()
if conn:
conn.close()

View File

@ -1,84 +1,163 @@
import taos
conn = taos.connect(
host="localhost",
user="root",
password="taosdata",
port=6030,
)
def prepareMeta():
conn = None
try:
conn = taos.connect(
host="localhost",
user="root",
password="taosdata",
port=6030,
)
db = "power"
topic = "topic_meters"
db = "power"
topic = "topic_meters"
conn.execute(f"CREATE DATABASE IF NOT EXISTS {db}")
conn.execute(f"DROP TOPIC IF EXISTS {topic}")
conn.execute(f"DROP DATABASE IF EXISTS {db}")
conn.execute(f"CREATE DATABASE {db}")
# change database. same as execute "USE db"
conn.select_db(db)
# change database. same as execute "USE db"
conn.select_db(db)
# create super table
conn.execute(
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"
)
# create super table
conn.execute(
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"
)
# ANCHOR: create_topic
# create topic
conn.execute(
f"CREATE TOPIC IF NOT EXISTS {topic} AS SELECT ts, current, voltage, phase, groupid, location FROM meters"
)
# ANCHOR_END: create_topic
# ANCHOR: create_topic
# create topic
conn.execute(
f"CREATE TOPIC IF NOT EXISTS {topic} AS SELECT ts, current, voltage, phase, groupid, location FROM meters"
)
# ANCHOR_END: create_topic
sql = """
INSERT INTO
power.d1001 USING power.meters (groupid, location) TAGS(2, 'California.SanFrancisco')
VALUES (NOW + 1a, 10.30000, 219, 0.31000)
(NOW + 2a, 12.60000, 218, 0.33000) (NOW + 3a, 12.30000, 221, 0.31000)
power.d1002 USING power.meters (groupid, location) TAGS(3, 'California.SanFrancisco')
VALUES (NOW + 1a, 10.30000, 218, 0.25000)
"""
affectedRows = conn.execute(sql)
print(f"inserted into {affectedRows} rows to power.meters successfully.")
except Exception as err:
print(f"prepare meta err:{err}")
raise err
finally:
if conn:
conn.close()
# ANCHOR: create_consumer
from taos.tmq import Consumer
consumer = Consumer(
{
"group.id": "1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"enable.auto.commit": "true",
}
)
# ANCHOR_END: create_consumer
def create_consumer():
try:
consumer = Consumer(
{
"group.id": "group1",
"client.id": "1",
"td.connect.user": "root",
"td.connect.pass": "taosdata",
"enable.auto.commit": "true",
"auto.commit.interval.ms": "1000",
"auto.offset.reset": "latest",
"td.connect.ip": "localhost",
"td.connect.port": "6030",
}
)
return consumer
except Exception as err:
print(f"Failed to poll data, err:{err}")
raise err
# ANCHOR_END: create_consumer
# ANCHOR: subscribe
consumer.subscribe([topic])
def subscribe(consumer):
try:
# subscribe to the topics
consumer.subscribe(["topic_meters"])
print("subscribe topics successfully")
for i in range(50):
records = consumer.poll(1)
if records:
err = records.error()
if err is not None:
print(f"poll data error, {err}")
raise err
val = records.value()
if val:
for block in val:
print(block.fetchall())
except Exception as err:
print(f"Failed to poll data, err:{err}")
raise err
# ANCHOR_END: subscribe
try:
# ANCHOR: consume
while True:
res = consumer.poll(1)
if not res:
break
err = res.error()
if err is not None:
raise err
val = res.value()
def commit_offset(consumer):
# ANCHOR: commit_offset
try:
for i in range(50):
records = consumer.poll(1)
if records:
err = records.error()
if err is not None:
print(f"poll data error, {err}")
raise err
for block in val:
print(block.fetchall())
# ANCHOR_END: consume
val = records.value()
if val:
for block in val:
print(block.fetchall())
# after processing the data, commit the offset manually
consumer.commit(records)
except Exception as err:
print(f"Failed to poll data, err:{err}")
raise err
# ANCHOR_END: commit_offset
def seek_offset(consumer):
# ANCHOR: assignment
assignments = consumer.assignment()
for assignment in assignments:
print(assignment)
# ANCHOR_END: assignment
try:
assignments = consumer.assignment()
if assignments:
for partition in assignments:
print(f"first data polled: {partition.offset}")
partition.offset = 0
consumer.seek(partition)
print(f"assignment seek to beginning successfully");
except Exception as err:
print(f"seek example failed; err:{err}")
raise err
# ANCHOR_END: assignment
# ANCHOR: seek
offset = taos.tmq.TopicPartition(
topic=topic,
partition=assignment.partition,
offset=0,
)
consumer.seek(offset)
# ANCHOR_END: seek
finally:
# ANCHOR: unsubscribe
consumer.unsubscribe()
consumer.close()
# ANCHOR_END: unsubscribe
conn.close()
# ANCHOR: unsubscribe
def unsubscribe(consumer):
try:
consumer.unsubscribe()
except Exception as err:
print(f"Failed to unsubscribe consumer. err:{err}")
# ANCHOR_END: unsubscribe
if __name__ == "__main__":
consumer = None
try:
prepareMeta()
consumer = create_consumer()
subscribe(consumer)
seek_offset(consumer)
commit_offset(consumer)
unsubscribe(consumer)
except Exception as err:
print(f"Failed to stmt consumer. err:{err}")
finally:
if consumer:
consumer.close()

View File

@ -1,31 +1,163 @@
#!/usr/bin/python3
from taosws import Consumer
import taosws
conf = {
"td.connect.websocket.scheme": "ws",
"group.id": "0",
}
consumer = Consumer(conf)
topic = "topic_meters"
consumer.subscribe(["test"])
def prepareMeta():
conn = None
while True:
message = consumer.poll(timeout=1.0)
if message:
id = message.vgroup()
topic = message.topic()
database = message.database()
try:
conn = taosws.connect(user="root",
password="taosdata",
host="localhost",
port=6041)
for block in message:
nrows = block.nrows()
ncols = block.ncols()
for row in block:
print(row)
values = block.fetchall()
print(nrows, ncols)
db = "power"
# create database
rowsAffected = conn.execute(f"CREATE DATABASE IF NOT EXISTS {db}")
assert rowsAffected == 0
# consumer.commit(message)
else:
break
# change database.
rowsAffected = conn.execute(f"USE {db}")
assert rowsAffected == 0
consumer.close()
# create super table
rowsAffected = conn.execute(
"CREATE TABLE IF NOT EXISTS `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"
)
assert rowsAffected == 0
# create table
rowsAffected = conn.execute(
"CREATE TABLE IF NOT EXISTS `d0` USING `meters` (groupid, location) TAGS(0, 'Los Angles')")
assert rowsAffected == 0
# ANCHOR: create_topic
# create topic
conn.execute(
f"CREATE TOPIC IF NOT EXISTS {topic} AS SELECT ts, current, voltage, phase, groupid, location FROM meters"
)
# ANCHOR_END: create_topic
sql = """
INSERT INTO
power.d1001 USING power.meters (groupid, location) TAGS(2, 'California.SanFrancisco')
VALUES (NOW + 1a, 10.30000, 219, 0.31000)
(NOW + 2a, 12.60000, 218, 0.33000) (NOW + 3a, 12.30000, 221, 0.31000)
power.d1002 USING power.meters (groupid, location) TAGS(3, 'California.SanFrancisco')
VALUES (NOW + 1a, 10.30000, 218, 0.25000)
"""
affectedRows = conn.execute(sql)
print(f"inserted into {affectedRows} rows to power.meters successfully.")
except Exception as err:
print(f"Failed to prepareMeta {err}")
raise err
finally:
if conn:
conn.close()
# ANCHOR: create_consumer
def create_consumer():
try:
consumer = taosws.Consumer(conf={
"td.connect.websocket.scheme": "ws",
"group.id": "group1",
"client.id": "1",
"auto.offset.reset": "latest",
"td.connect.ip": "localhost",
"td.connect.port": "6041",
"enable.auto.commit": "true",
"auto.commit.interval.ms": "1000",
})
return consumer;
except Exception as err:
print(f"Failed to create websocket consumer, err:{err}");
raise err
# ANCHOR_END: create_consumer
def seek_offset(consumer):
# ANCHOR: assignment
try:
assignments = consumer.assignment()
for assignment in assignments:
topic = assignment.topic()
print(f"topic: {topic}")
for assign in assignment.assignments():
print(
f"vg_id: {assign.vg_id()}, offset: {assign.offset()}, begin: {assign.begin()}, end: {assign.end()}")
consumer.seek(topic, assign.vg_id(), assign.begin())
print("assignment seek to beginning successfully");
except Exception as err:
print(f"seek example failed; err:{err}")
raise err
# ANCHOR_END: assignment
# ANCHOR: subscribe
def subscribe(consumer):
try:
consumer.subscribe([topic])
print("subscribe topics successfully")
for i in range(50):
records = consumer.poll(timeout=1.0)
if records:
for block in records:
for row in block:
print(row)
except Exception as err:
print(f"Failed to poll data, err:{err}")
raise err
# ANCHOR_END: subscribe
# ANCHOR: commit_offset
def commit_offset(consumer):
try:
for i in range(50):
records = consumer.poll(timeout=1.0)
if records:
for block in records:
for row in block:
print(row)
# after processing the data, commit the offset manually
consumer.commit(records)
except Exception as err:
print(f"Failed to poll data, err:{err}")
raise err
# ANCHOR_END: commit_offset
#
# ANCHOR: unsubscribe
def unsubscribe(consumer):
try:
consumer.unsubscribe()
except Exception as err:
print("Failed to unsubscribe consumer. err:{err}")
# ANCHOR_END: unsubscribe
if __name__ == "__main__":
consumer = None
try:
prepareMeta()
consumer = create_consumer()
subscribe(consumer)
seek_offset(consumer)
commit_offset(consumer)
unsubscribe(consumer)
except Exception as err:
print(f"Failed to stmt consumer. err:{err}")
finally:
if consumer:
consumer.close()

View File

@ -12,4 +12,4 @@ tokio = { version = "1", features = ["rt", "macros", "rt-multi-thread"] }
log = "0.4"
pretty_env_logger = "0.5.0"
taos = { version = "0.11.8" }
taos = "*"

View File

@ -3,7 +3,7 @@ use taos::*;
#[tokio::main]
async fn main() -> Result<(), Error> {
#[allow(unused_variables)]
let taos = TaosBuilder::from_dsn("taos://")?.build()?;
println!("Connected");
let taos = TaosBuilder::from_dsn("taos://localhost:6030")?.build()?;
println!("Connected to localhost with native connection successfully.");
Ok(())
}

View File

@ -1,46 +1,50 @@
use taos::*;
use chrono::Local;
use chrono::DateTime;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let dsn = "taos://localhost:6030";
let builder = TaosBuilder::from_dsn(dsn)?;
let taos = builder.build()?;
let taos = builder.build().await?;
// ANCHOR: create_db_and_table
let db = "power";
// create database
taos.exec_many([
format!("DROP DATABASE IF EXISTS `{db}`"),
format!("CREATE DATABASE `{db}`"),
format!("CREATE DATABASE IF NOT EXISTS `{db}`"),
format!("USE `{db}`"),
])
.await?;
println!("Create database power successfully.");
// create table
// create super table
taos.exec_many([
// create super table
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
TAGS (`groupid` INT, `location` BINARY(24))",
"CREATE STABLE IF NOT EXISTS `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
TAGS (`groupid` INT, `location` BINARY(24))",
]).await?;
// ANCHOR_END: create_db_and_table
// ANCHOR: insert_data
let inserted = taos.exec("INSERT INTO " +
"power.d1001 USING power.meters TAGS(2,'California.SanFrancisco') " +
"VALUES " +
"(NOW + 1a, 10.30000, 219, 0.31000) " +
"(NOW + 2a, 12.60000, 218, 0.33000) " +
"(NOW + 3a, 12.30000, 221, 0.31000) " +
"power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco') " +
"VALUES " +
"(NOW + 1a, 10.30000, 218, 0.25000) ").await?;
println!("Create stable meters successfully.");
println!("inserted: {} rows", inserted);
// ANCHOR_END: create_db_and_table
// ANCHOR: insert_data
let inserted = taos.exec(r#"INSERT INTO
power.d1001 USING power.meters TAGS(2,'California.SanFrancisco')
VALUES
(NOW + 1a, 10.30000, 219, 0.31000)
(NOW + 2a, 12.60000, 218, 0.33000)
(NOW + 3a, 12.30000, 221, 0.31000)
power.d1002 USING power.meters TAGS(3, 'California.SanFrancisco')
VALUES
(NOW + 1a, 10.30000, 218, 0.25000) "#).await?;
println!("inserted: {} rows to power.meters successfully.", inserted);
// ANCHOR_END: insert_data
// ANCHOR: query_data
let mut result = taos.query("SELECT * FROM power.meters").await?;
// query data, make sure the database and table are created before
let mut result = taos.query("SELECT ts, current, location FROM power.meters limit 100").await?;
for field in result.fields() {
println!("got field: {}", field.name());
@ -59,8 +63,39 @@ async fn main() -> anyhow::Result<()> {
}
// ANCHOR_END: query_data
// ANCHOR: query_with_req_id
let result = taos.query_with_req_id("SELECT * FROM power.meters", 0).await?;
// ANCHOR_END: query_with_req_id
// ANCHOR: query_data_2
// query data, make sure the database and table are created before
#[derive(Debug, serde::Deserialize)]
#[allow(dead_code)]
struct Record {
// deserialize timestamp to chrono::DateTime<Local>
ts: DateTime<Local>,
// float to f32
current: Option<f32>,
// int to i32
voltage: Option<i32>,
phase: Option<f32>,
groupid: i32,
// binary/varchar to String
location: String,
}
let records: Vec<Record> = taos
.query("select ts, current, voltage, phase, groupid, location from power.meters limit 100")
.await?
.deserialize()
.try_collect()
.await?;
dbg!(records);
// ANCHOR_END: query_data_2
// ANCHOR: query_with_req_id
let result = taos.query_with_req_id("SELECT ts, current, location FROM power.meters limit 1", 1).await?;
for field in result.fields() {
println!("got field: {}", field.name());
}
println!("query with reqId successfully");
// ANCHOR_END: query_with_req_id
Ok(())
}

View File

@ -2,23 +2,22 @@ use taos_query::common::SchemalessPrecision;
use taos_query::common::SchemalessProtocol;
use taos_query::common::SmlDataBuilder;
use crate::AsyncQueryable;
use crate::AsyncTBuilder;
use crate::TaosBuilder;
use taos::AsyncQueryable;
use taos::AsyncTBuilder;
use taos::TaosBuilder;
use taos::taos_query;
async fn put() -> anyhow::Result<()> {
#[tokio::main]
async fn main() -> anyhow::Result<()> {
std::env::set_var("RUST_LOG", "taos=debug");
pretty_env_logger::init();
let dsn =
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
let dsn = "taos://localhost:6030".to_string();
log::debug!("dsn: {:?}", &dsn);
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
let db = "power";
client.exec(format!("drop database if exists {db}")).await?;
client
.exec(format!("create database if not exists {db}"))
.await?;
@ -28,7 +27,7 @@ async fn put() -> anyhow::Result<()> {
// SchemalessProtocol::Line
let data = [
"meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639000000",
"meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639",
]
.map(String::from)
.to_vec();
@ -44,7 +43,7 @@ async fn put() -> anyhow::Result<()> {
// SchemalessProtocol::Telnet
let data = [
"meters.current 1648432611249 10.3 location=California.SanFrancisco group=2",
"metric_telnet 1707095283260 4 host=host0 interface=eth0",
]
.map(String::from)
.to_vec();
@ -60,7 +59,16 @@ async fn put() -> anyhow::Result<()> {
// SchemalessProtocol::Json
let data = [
r#"[{"metric": "meters.current", "timestamp": 1681345954000, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}}, {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]"#
r#"[{
"metric": "metric_json",
"timestamp": 1626846400,
"value": 10.3,
"tags": {
"groupid": 2,
"location": "California.SanFrancisco",
"id": "d1001"
}
}]"#
]
.map(String::from)
.to_vec();
@ -74,7 +82,6 @@ async fn put() -> anyhow::Result<()> {
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
client.exec(format!("drop database if exists {db}")).await?;
println!("Inserted data with schemaless successfully.");
Ok(())
}

View File

@ -17,6 +17,8 @@ async fn main() -> anyhow::Result<()> {
for i in 0..NUM_TABLES {
let table_name = format!("d{}", i);
let tags = vec![Value::VarChar("California.SanFransico".into()), Value::Int(2)];
// set table name and tags for the prepared statement.
stmt.set_tbname_tags(&table_name, &tags).await?;
for j in 0..NUM_ROWS {
let values = vec![
@ -25,13 +27,17 @@ async fn main() -> anyhow::Result<()> {
ColumnView::from_ints(vec![219 + j as i32]),
ColumnView::from_floats(vec![0.31 + j as f32]),
];
// bind values to the prepared statement.
stmt.bind(&values).await?;
}
stmt.add_batch().await?;
}
// execute.
let rows = stmt.execute().await?;
assert_eq!(rows, NUM_TABLES * NUM_ROWS);
println!("execute stmt insert successfully");
Ok(())
}

View File

@ -38,7 +38,7 @@ async fn main() -> anyhow::Result<()> {
let dsn = "taos://localhost:6030";
let builder = TaosBuilder::from_dsn(dsn)?;
let taos = builder.build()?;
let taos = builder.build().await?;
let db = "tmq";
// prepare database
@ -61,9 +61,10 @@ async fn main() -> anyhow::Result<()> {
// subscribe
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
let mut consumer = tmq.build()?;
let mut consumer = tmq.build().await?;
consumer.subscribe(["tmq_meters"]).await?;
// ANCHOR: consumer_commit_manually
consumer
.stream()
.try_for_each(|(offset, message)| async {
@ -78,11 +79,54 @@ async fn main() -> anyhow::Result<()> {
println!("** read {} records: {:#?}\n", records.len(), records);
}
}
// commit offset manually when you have processed the message.
consumer.commit(offset).await?;
Ok(())
})
.await?;
// ANCHOR_END: consumer_commit_manually
// ANCHOR: assignments
let assignments = consumer.assignments().await.unwrap();
log::info!("assignments: {:?}", assignments);
// ANCHOR_END: assignments
// seek offset
for topic_vec_assignment in assignments {
let topic = &topic_vec_assignment.0;
let vec_assignment = topic_vec_assignment.1;
for assignment in vec_assignment {
let vgroup_id = assignment.vgroup_id();
let current = assignment.current_offset();
let begin = assignment.begin();
let end = assignment.end();
log::debug!(
"topic: {}, vgroup_id: {}, current offset: {} begin {}, end: {}",
topic,
vgroup_id,
current,
begin,
end
);
// ANCHOR: seek_offset
let res = consumer.offset_seek(topic, vgroup_id, end).await;
if res.is_err() {
log::error!("seek offset error: {:?}", res);
let a = consumer.assignments().await.unwrap();
log::error!("assignments: {:?}", a);
}
// ANCHOR_END: seek_offset
}
let topic_assignment = consumer.topic_assignment(topic).await;
log::debug!("topic assignment: {:?}", topic_assignment);
}
// after seek offset
let assignments = consumer.assignments().await.unwrap();
log::info!("after seek offset assignments: {:?}", assignments);
consumer.unsubscribe().await;
task.await??;

View File

@ -1,6 +1,7 @@
use std::time::Duration;
use std::str::FromStr;
use chrono::Local;
use chrono::DateTime;
use taos::*;
#[tokio::main]
@ -9,9 +10,11 @@ async fn main() -> anyhow::Result<()> {
.filter_level(log::LevelFilter::Info)
.init();
use taos_query::prelude::*;
// ANCHOR: create_consumer_dsn
let dsn = "taos://localhost:6030".to_string();
log::info!("dsn: {}", dsn);
let mut dsn = Dsn::from_str(&dsn)?;
// ANCHOR_END: create_consumer_dsn
let taos = TaosBuilder::from_dsn(&dsn)?.build().await?;
@ -21,18 +24,8 @@ async fn main() -> anyhow::Result<()> {
"drop database if exists power",
"create database if not exists power WAL_RETENTION_PERIOD 86400",
"use power",
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))",
"create table if not exists power.d001 using power.meters tags(1,'location')",
])
.await?;
taos.exec_many([
"drop database if exists db2",
"create database if not exists db2 wal_retention_period 3600",
"use db2",
])
.await?;
@ -43,72 +36,80 @@ async fn main() -> anyhow::Result<()> {
.await?;
// ANCHOR_END: create_topic
// ANCHOR: create_consumer
dsn.params.insert("group.id".to_string(), "abc".to_string());
dsn.params.insert("auto.offset.reset".to_string(), "earliest".to_string());
// ANCHOR: create_consumer_ac
dsn.params.insert("auto.offset.reset".to_string(), "latest".to_string());
dsn.params.insert("msg.with.table.name".to_string(), "true".to_string());
dsn.params.insert("enable.auto.commit".to_string(), "true".to_string());
dsn.params.insert("auto.commit.interval.ms".to_string(), "1000".to_string());
dsn.params.insert("group.id".to_string(), "group1".to_string());
dsn.params.insert("client.id".to_string(), "client1".to_string());
let builder = TmqBuilder::from_dsn(&dsn)?;
let mut consumer = builder.build().await?;
// ANCHOR_END: create_consumer
// ANCHOR_END: create_consumer_ac
// ANCHOR: subscribe
consumer.subscribe(["topic_meters"]).await?;
// ANCHOR_END: subscribe
#[derive(Debug, serde::Deserialize)]
#[allow(dead_code)]
struct Record {
// deserialize timestamp to chrono::DateTime<Local>
ts: DateTime<Local>,
// float to f32
current: Option<f32>,
// int to i32
voltage: Option<i32>,
phase: Option<f32>,
groupid: i32,
// binary/varchar to String
location: String,
}
// ANCHOR: consume
{
let mut stream = consumer.stream_with_timeout(Timeout::from_secs(1));
while let Some((offset, message)) = stream.try_next().await? {
consumer
.stream()
.try_for_each(|(offset, message)| async move {
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
let topic: &str = offset.topic();
let database = offset.database();
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
let records: Vec<Record> = block.deserialize().try_collect()?;
println!("** read {} records: {:#?}\n", records.len(), records);
}
}
Ok(())
})
.await?;
// ANCHOR_END: consume
// ANCHOR: consumer_commit_manually
consumer
.stream()
.try_for_each(|(offset, message)| async {
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
log::debug!(
"topic: {}, database: {}, vgroup_id: {}",
topic,
database,
vgroup_id
);
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
match message {
MessageSet::Meta(meta) => {
log::info!("Meta");
let raw = meta.as_raw_meta().await?;
taos.write_raw_meta(&raw).await?;
let json = meta.as_json_meta().await?;
let sql = json.to_string();
if let Err(err) = taos.exec(sql).await {
println!("maybe error: {}", err);
}
}
MessageSet::Data(data) => {
log::info!("Data");
while let Some(data) = data.fetch_raw_block().await? {
log::debug!("data: {:?}", data);
}
}
MessageSet::MetaData(meta, data) => {
log::info!("MetaData");
let raw = meta.as_raw_meta().await?;
taos.write_raw_meta(&raw).await?;
let json = meta.as_json_meta().await?;
let sql = json.to_string();
if let Err(err) = taos.exec(sql).await {
println!("maybe error: {}", err);
}
while let Some(data) = data.fetch_raw_block().await? {
log::debug!("data: {:?}", data);
}
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
let records: Vec<Record> = block.deserialize().try_collect()?;
println!("** read {} records: {:#?}\n", records.len(), records);
}
}
// commit offset manually when you have processed the message.
consumer.commit(offset).await?;
}
}
// ANCHOR_END: consume
Ok(())
})
.await?;
// ANCHOR_END: consumer_commit_manually
// ANCHOR: assignments
let assignments = consumer.assignments().await.unwrap();
@ -157,7 +158,6 @@ async fn main() -> anyhow::Result<()> {
tokio::time::sleep(Duration::from_secs(1)).await;
taos.exec_many([
"drop database db2",
"drop topic topic_meters",
"drop database power",
])

View File

@ -8,5 +8,7 @@ anyhow = "1"
chrono = "0.4"
serde = { version = "1", features = ["derive"] }
tokio = { version = "1", features = ["rt", "macros", "rt-multi-thread"] }
log = "0.4"
pretty_env_logger = "0.5.0"
taos = { version = "0.*" }
taos = "*"

View File

@ -3,7 +3,7 @@ use taos::*;
#[tokio::main]
async fn main() -> Result<(), Error> {
#[allow(unused_variables)]
let taos = TaosBuilder::from_dsn("taos://")?.build()?;
println!("Connected");
let taos = TaosBuilder::from_dsn("taos+ws://localhost:6041")?.build()?;
println!("Connected to localhost with websocket connection successfully.");
Ok(())
}

View File

@ -0,0 +1,78 @@
use taos_query::common::SchemalessPrecision;
use taos_query::common::SchemalessProtocol;
use taos_query::common::SmlDataBuilder;
use taos::AsyncQueryable;
use taos::AsyncTBuilder;
use taos::TaosBuilder;
use taos::taos_query;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
std::env::set_var("RUST_LOG", "taos=debug");
pretty_env_logger::init();
let dsn = "http://localhost:6041/power".to_string();
log::debug!("dsn: {:?}", &dsn);
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
// SchemalessProtocol::Line
let data = [
"meters,groupid=2,location=California.SanFrancisco current=10.3000002f64,voltage=219i32,phase=0.31f64 1626006833639",
]
.map(String::from)
.to_vec();
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.ttl(1000)
.req_id(100u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// SchemalessProtocol::Telnet
let data = [
"metric_telnet 1707095283260 4 host=host0 interface=eth0",
]
.map(String::from)
.to_vec();
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Telnet)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.ttl(1000)
.req_id(200u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// SchemalessProtocol::Json
let data = [
r#"[{
"metric": "metric_json",
"timestamp": 1626846400,
"value": 10.3,
"tags": {
"groupid": 2,
"location": "California.SanFrancisco",
"id": "d1001"
}
}]"#
]
.map(String::from)
.to_vec();
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Json)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.ttl(1000)
.req_id(300u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
println!("Inserted data with schemaless successfully.");
Ok(())
}

View File

@ -0,0 +1,43 @@
use taos::*;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let taos = TaosBuilder::from_dsn("ws://")?.build().await?;
taos.exec("DROP DATABASE IF EXISTS power").await?;
taos.create_database("power").await?;
taos.use_database("power").await?;
taos.exec("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)").await?;
let mut stmt = Stmt::init(&taos).await?;
stmt.prepare("INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)").await?;
const NUM_TABLES: usize = 10;
const NUM_ROWS: usize = 10;
for i in 0..NUM_TABLES {
let table_name = format!("d{}", i);
let tags = vec![Value::VarChar("California.SanFransico".into()), Value::Int(2)];
// set table name and tags for the prepared statement.
stmt.set_tbname_tags(&table_name, &tags).await?;
for j in 0..NUM_ROWS {
let values = vec![
ColumnView::from_millis_timestamp(vec![1648432611249 + j as i64]),
ColumnView::from_floats(vec![10.3 + j as f32]),
ColumnView::from_ints(vec![219 + j as i32]),
ColumnView::from_floats(vec![0.31 + j as f32]),
];
// bind values to the prepared statement.
stmt.bind(&values).await?;
}
stmt.add_batch().await?;
}
// execute.
let rows = stmt.execute().await?;
assert_eq!(rows, NUM_TABLES * NUM_ROWS);
println!("execute stmt insert successfully");
Ok(())
}

View File

@ -0,0 +1,135 @@
use std::time::Duration;
use chrono::{DateTime, Local};
use taos::*;
// Query options 2, use deserialization with serde.
#[derive(Debug, serde::Deserialize)]
#[allow(dead_code)]
struct Record {
// deserialize timestamp to chrono::DateTime<Local>
ts: DateTime<Local>,
// float to f32
current: Option<f32>,
// int to i32
voltage: Option<i32>,
phase: Option<f32>,
}
async fn prepare(taos: Taos) -> anyhow::Result<()> {
let inserted = taos.exec_many([
// create child table
"CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')",
// insert into child table
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
// insert with NULL values
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
// insert and automatically create table with tags if not exists
"INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)",
// insert many records in a single sql
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
]).await?;
assert_eq!(inserted, 6);
Ok(())
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let dsn = "ws://localhost:6041";
let builder = TaosBuilder::from_dsn(dsn)?;
let taos = builder.build().await?;
let db = "tmq";
// prepare database
taos.exec_many([
format!("DROP TOPIC IF EXISTS tmq_meters"),
format!("DROP DATABASE IF EXISTS `{db}`"),
format!("CREATE DATABASE `{db}` WAL_RETENTION_PERIOD 3600"),
format!("USE `{db}`"),
// create super table
format!("CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(24))"),
// create topic for subscription
format!("CREATE TOPIC tmq_meters AS SELECT * FROM `meters`")
])
.await?;
let task = tokio::spawn(prepare(taos));
tokio::time::sleep(Duration::from_secs(1)).await;
// subscribe
let tmq = TmqBuilder::from_dsn("ws://localhost:6041/?group.id=test")?;
let mut consumer = tmq.build().await?;
consumer.subscribe(["tmq_meters"]).await?;
// ANCHOR: consumer_commit_manually
consumer
.stream()
.try_for_each(|(offset, message)| async {
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
let records: Vec<Record> = block.deserialize().try_collect()?;
println!("** read {} records: {:#?}\n", records.len(), records);
}
}
// commit offset manually when you have processed the message.
consumer.commit(offset).await?;
Ok(())
})
.await?;
// ANCHOR_END: consumer_commit_manually
// ANCHOR: assignments
let assignments = consumer.assignments().await.unwrap();
log::info!("assignments: {:?}", assignments);
// ANCHOR_END: assignments
// seek offset
for topic_vec_assignment in assignments {
let topic = &topic_vec_assignment.0;
let vec_assignment = topic_vec_assignment.1;
for assignment in vec_assignment {
let vgroup_id = assignment.vgroup_id();
let current = assignment.current_offset();
let begin = assignment.begin();
let end = assignment.end();
log::debug!(
"topic: {}, vgroup_id: {}, current offset: {} begin {}, end: {}",
topic,
vgroup_id,
current,
begin,
end
);
// ANCHOR: seek_offset
let res = consumer.offset_seek(topic, vgroup_id, end).await;
if res.is_err() {
log::error!("seek offset error: {:?}", res);
let a = consumer.assignments().await.unwrap();
log::error!("assignments: {:?}", a);
}
// ANCHOR_END: seek_offset
}
let topic_assignment = consumer.topic_assignment(topic).await;
log::debug!("topic assignment: {:?}", topic_assignment);
}
// after seek offset
let assignments = consumer.assignments().await.unwrap();
log::info!("after seek offset assignments: {:?}", assignments);
consumer.unsubscribe().await;
task.await??;
Ok(())
}

View File

@ -0,0 +1,166 @@
use std::time::Duration;
use std::str::FromStr;
use chrono::Local;
use chrono::DateTime;
use taos::*;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
pretty_env_logger::formatted_timed_builder()
.filter_level(log::LevelFilter::Info)
.init();
use taos_query::prelude::*;
// ANCHOR: create_consumer_dsn
let dsn = "ws://localhost:6041".to_string();
log::info!("dsn: {}", dsn);
let mut dsn = Dsn::from_str(&dsn)?;
// ANCHOR_END: create_consumer_dsn
let taos = TaosBuilder::from_dsn(&dsn)?.build().await?;
// prepare database and table
taos.exec_many([
"drop topic if exists topic_meters",
"drop database if exists power",
"create database if not exists power WAL_RETENTION_PERIOD 86400",
"use power",
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))",
"create table if not exists power.d001 using power.meters tags(1,'location')",
])
.await?;
// ANCHOR: create_topic
taos.exec_many([
"CREATE TOPIC IF NOT EXISTS topic_meters AS SELECT ts, current, voltage, phase, groupid, location FROM power.meters",
])
.await?;
// ANCHOR_END: create_topic
// ANCHOR: create_consumer_ac
dsn.params.insert("auto.offset.reset".to_string(), "latest".to_string());
dsn.params.insert("msg.with.table.name".to_string(), "true".to_string());
dsn.params.insert("enable.auto.commit".to_string(), "true".to_string());
dsn.params.insert("auto.commit.interval.ms".to_string(), "1000".to_string());
dsn.params.insert("group.id".to_string(), "group1".to_string());
dsn.params.insert("client.id".to_string(), "client1".to_string());
let builder = TmqBuilder::from_dsn(&dsn)?;
let mut consumer = builder.build().await?;
// ANCHOR_END: create_consumer_ac
// ANCHOR: subscribe
consumer.subscribe(["topic_meters"]).await?;
// ANCHOR_END: subscribe
#[derive(Debug, serde::Deserialize)]
#[allow(dead_code)]
struct Record {
// deserialize timestamp to chrono::DateTime<Local>
ts: DateTime<Local>,
// float to f32
current: Option<f32>,
// int to i32
voltage: Option<i32>,
phase: Option<f32>,
groupid: i32,
// binary/varchar to String
location: String,
}
// ANCHOR: consume
consumer
.stream()
.try_for_each(|(offset, message)| async move {
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
let records: Vec<Record> = block.deserialize().try_collect()?;
println!("** read {} records: {:#?}\n", records.len(), records);
}
}
Ok(())
})
.await?;
// ANCHOR_END: consume
// ANCHOR: consumer_commit_manually
consumer
.stream()
.try_for_each(|(offset, message)| async {
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
let records: Vec<Record> = block.deserialize().try_collect()?;
println!("** read {} records: {:#?}\n", records.len(), records);
}
}
// commit offset manually when you have processed the message.
consumer.commit(offset).await?;
Ok(())
})
.await?;
// ANCHOR_END: consumer_commit_manually
// ANCHOR: assignments
let assignments = consumer.assignments().await.unwrap();
log::info!("assignments: {:?}", assignments);
// ANCHOR_END: assignments
// seek offset
for topic_vec_assignment in assignments {
let topic = &topic_vec_assignment.0;
let vec_assignment = topic_vec_assignment.1;
for assignment in vec_assignment {
let vgroup_id = assignment.vgroup_id();
let current = assignment.current_offset();
let begin = assignment.begin();
let end = assignment.end();
log::debug!(
"topic: {}, vgroup_id: {}, current offset: {} begin {}, end: {}",
topic,
vgroup_id,
current,
begin,
end
);
// ANCHOR: seek_offset
let res = consumer.offset_seek(topic, vgroup_id, end).await;
if res.is_err() {
log::error!("seek offset error: {:?}", res);
let a = consumer.assignments().await.unwrap();
log::error!("assignments: {:?}", a);
}
// ANCHOR_END: seek_offset
}
let topic_assignment = consumer.topic_assignment(topic).await;
log::debug!("topic assignment: {:?}", topic_assignment);
}
// after seek offset
let assignments = consumer.assignments().await.unwrap();
log::info!("after seek offset assignments: {:?}", assignments);
// ANCHOR: unsubscribe
consumer.unsubscribe().await;
// ANCHOR_END: unsubscribe
tokio::time::sleep(Duration::from_secs(1)).await;
taos.exec_many([
"drop topic topic_meters",
"drop database power",
])
.await?;
Ok(())
}

138
docs/zh/02-concept.md Normal file
View File

@ -0,0 +1,138 @@
---
sidebar_label: 时序数据基础
title: 时序数据基础
description: 时序数据基础概念
toc_max_heading_level: 4
---
## 什么是时序数据?
时序数据即时间序列数据Time-Series Data它们是一组按照时间发生先后顺序进行排列的序列数据。日常生活中设备、传感器采集的数据就是时序数据证券交易的记录也是时序数据。因此时序数据的处理并不陌生特别在是工业自动化以及证券金融行业专业的时序数据处理软件早已存在比如工业领域的 PI System 以及金融行业的 KDB。
这些时序数据是周期、准周期产生的,或事件触发产生的,有的采集频率高,有的采集频率低。一般被发送至服务器中进行汇总并进行实时分析和处理,对系统的运行做出实时监测或预警,对股市行情进行预测。这些数据也可以被长期保存下来,用以进行离线数据分析。比如统计时间区间内设备的运行节奏与产出,分析如何进一步优化配置来提升生产效率;统计一段时间内生产过程中的成本分布,分析如何降低生产成本;统计一段时间内的设备异常值,结合业务分析潜在的安全隐患,以降低故障时长等等。
过去的二十年,随着数据通讯成本的急剧下降,以及各种传感技术和智能设备的出现,特别是物联网与工业 4.0 的推动,工业、物联网企业为了监测设备、环境、生产线及整个系统的运行状态,在各个关键点都配有传感器,采集各种数据。从手环、共享出行、智能电表、环境监测设备到电梯、数控机床、挖掘机、工业生产线等都在源源不断的产生海量的实时数据,时序数据的体量正指数级的增长。以智能电表为例,智能电表每隔 15 分钟采集一次数据,每天会自动生成 96 条记录。现在全中国已经有超过 10 亿台智能电表,一天就产生 960 亿条时序数据。一台联网的汽车往往每隔 10 到 15 秒采集一次数据发到云端,那么一天下来就很容易产生 1000 条记录。假设中国有 2 亿车辆联网,它们每天将产生总计 2000 亿条甚至更多的时序数据。
由于数据量指数级的增长,而且对分析和实时计算的需求越来越多,特别是在人工智能的时代,传统的时序数据处理工具难以满足需求,对每天高达 10TB 级别的海量时序大数据如何进行实时的存储、分析和计算,成为一个技术挑战,因此海量时序大数据的高效处理在过去的十年获得全球工业界的高度关注。
## 时序数据的十大特征
相对于普通的互联网的应用数据,时序数据有着很多明显的特征。涛思数据的创始人陶建辉先生早在 2017 年,就对此进行了充分地归纳分析,总结了时序数据本身以及时序数据应用的十大特征:
1. 数据是时序的,一定带有时间戳:联网的设备按照设定的周期,或受外部的事件触发,源源不断地产生数据,每条记录都是在一个时间点产生的,其时间戳必须记录,否则记录的值没有任何意义。
2. 数据是结构化的:物联网、工业设备产生的数据以及证券交易数据往往是结构化的,而且绝大多数都是数值型的,比如智能电表采集的电流、电压就可以用 4 字节的标准的浮点数来表示。
3. 一个数据采集点就是一个数据流:一个设备采集的数据、以及一支股票的交易数据,与另外一个设备采集的数据或股票是完全独立的。一台设备的数据一定是这台设备产生的,不可能是人工或其他设备产生的。一台设备产生的数据或一支股票的交易数据只有一个生产者,也就是说数据源是唯一的。
4. 数据较少有更新删除操作:对于一个典型的信息化或互联网应用,记录可能是经常需要修改或删除的。但对于设备或交易产生的数据正常情况下不会去更新/删除。
5. 数据不依赖于事务:在设备产生的数据中,具体的单条数据价值相对不高,数据的完整性和一致性并不像传统关系型数据库那样严格,大家关心的是趋势,所以不需要引入复杂的事务机制。
6. 相对互联网应用,写多读少:对于互联网应用,一条数据记录,往往是一次写,很多次读。比如一条微博或一篇微信公共号文章,一次写,但有可能上百万人读。但工业、物联网设备产生的数据不一样,一般是计算、分析程序自动读,且次数不多,只有遇到事故、人们才会主动读取原始数据。
7. 用户关注的是一段时间的趋势:对于一条银行交易记录,或者一条微博、微信,对于它的用户而言,每一条都很重要。但对于物联网、工业时序数据,每个数据点与数据点的变化并不大,大家关心的更多是一段时间,比如过去五分钟、一小时数据变化的趋势,不会只针对一个时间点进行。
8. 数据是有保留期限的:采集的数据一般都有基于时长的保留策略,比如仅仅保留一天、一周、一个月、一年甚至更长时间,该类数据的价值往往是由时间段决定的,因此对于不在重要时间段内的数据,都是可以被视为过期数据整块删除的。
9. 需要实时分析计算操作:对于大部分互联网大数据应用,更多的是离线分析,即使有实时分析,但要求并不高。比如用户画像、可以积累一定的用户行为数据后进行,早一天晚一天画不会特别影响结果。但是对于工业、物联网的平台应用以及交易系统,对数据的实时计算要求就往往很高,因为需要根据计算结果进行实时报警、监控,从而避免事故的发生、决策时机的错过。
10. 流量平稳、可预测:给定工业、物联网设备数量、数据采集频次,就可以较为准确的估算出所需要的带宽、流量、存储等数字,以及每天新生成的数据大小。而不是像电商,在双 11 期间,淘宝、天猫、京东等流量是几十倍的涨幅。也不像 12306 网站,春节期间,网站流量是几十倍的增长。
上述的特征使时序数据的处理具有着独特的需求和挑战。但是反过来说,对于一个高效的时序数据处理平台,它也必然充分利用这十大特征来提升它的处理能力。
## 时序数据的典型应用场景
时序数据应用的细分场景有很多,这里简单列举一些
1. 电力能源领域:电力能源领域范围较大,不论是在发电、输电、配电、用电还是其他环节中,各种电力设备都会产生大量时序数据,以风力发电为例,风电机作为大型设备,拥有可能高达数百的数据采集点,因此每日所产生的时序数据量极其之大,对这些数据的监控分析是确保发电环节准确无误的必要工作。在用电环节,对智能电表实时采集回来的电流、电压等数据进行快速计算,实时了解最新的用电总量、尖、峰、平、谷用电量,判断设备是否正常工作。有些时候,电力系统可能需要拉取历史上某一年的全量数据,通过机器学习等技术分析用户的用电习惯、进行负荷预测、节能方案设计、帮助电力公司合理规划电力的供应。或者拉取上个月的尖峰平谷用电量,根据不同价位进行周期性的电费结算,以上都是时序数据在电力能源领域的典型应用。
2. 车联网/轨道交通领域:车辆的 GPS 、速度、油耗、故障信息等,都是典型的时序数据,通过对它们科学合理地数据分析,可以为车辆管理和优化提供强有力的支持。但是,不同车型采集的点位信息从数百点到数千点之间不一而同,随着联网的交通设备数量越来越多,这些海量的时序数据如何安全上传、数据存储、查询和分析,成为了一个亟待解决的行业问题。对于交通工具的本身,科学合理地处理时序数据可以实现车辆轨迹追踪、无人驾驶、故障预警等功能。对于交通工具的整体配套服务,也可以提供良好的支持。比如,在新一代的智能地铁管理系统中,通过地铁站中各种传感器的时序数据采集分析,可以在站中实时展示各个车厢的拥挤度、温度、舒适度等数据,让用户可以自行选择体验度最佳的出行方案,对于地铁运营商,也可以更好地实现乘客流量的调度管理。
3. 智能制造领域:过去的十几年间,许多传统工业企业的数字化得到了长足的发展,单个工厂从传统的几千个数据采集点,到如今数十万点、上百万点,部分远程运维场景面临上万设备、千万点的数据采集存储的需求,这些数据都属于典型的时序数据。就整个工业大数据系统而言,时序数据的处理是相当复杂的。以烟草行业的数据采集为例,设备的工业数据协议各式各样、数据采集单位随着设备类型的不同而不同。数据的实时处理能力随着数据采集点的持续增加而难以跟上,与此同时还要兼顾数据的高性能、高可用、可拓展性等等诸多特性。但从另一个角度来看,如果大数据平台能够解决以上困难,满足企业对于时序数据存储分析的需求,就可以帮助企业实现更加智能化、自动化的生产模式,从而完成质的飞升。
4. 智慧油田:智慧油田也称为数字油田或智能油田,是指利用先进的信息技术和装备,实现油气田层析图和动态生产数据实时更新,提升油气田的开发效率和经济效益的一种油田开发模式。在长期的建设和探索中,钻井、录井、测井、开发生产等勘探开发业务,产生了来自油井、水井、气井等数十种设备的大量时序数据。为了实现统一以油气生产指挥中心为核心的油气生产信息化智能管控模式,满足科学高效智能的油气生产管理需求。该系统需要确保油田几万口油气水井、阀组、加热炉等设备的实时数据处理,做到高效的写入和查询、节省存储空间、基于业务灵活水平扩展、系统简单易用、数据安全可靠。更有一些大型的智慧油田项目,还会把全国各个地区的油田的生产数据实时同步汇总到总部的云端平台,依靠“边云协同”的方式完成“数据入湖”的统一筹划管理模式。
5. IT 运维领域IT 领域中,基础设施(如服务器、网络设备、存储设备)、应用程序运行的过程中会产生大量的时序数据。通过对这些时序数据的监控,可以很快地发现基础设施/应用的运行状态和服务可用性,包括系统是否在线、服务是否正常响应等;也能看到具体到某一个具体的点位的性能指标:如 CPU 利用率、内存利用率、磁盘空间利用率、网络带宽利用率等; 还可以监控系统产生的错误日志和异常事件,包括入侵检测、安全事件日志、权限控制等,最终通过设置报警规则,及时通知管理员或运维人员具体的情况,从而及时发现问题、预防故障,并优化系统性能,确保系统稳定可靠地运行。
6. 金融领域:金融领域目前正经历着数据管理的一场革命,它们的行情数据属于典型的时序数据,由于保留行情数据的储存期限往往需长达 5 至 10 年,甚至超过 30 年,而且可能全世界各个国家/地区的主流金融市场的交易数据都需要全量保存,因此行情数据的总量数据体量庞大,会轻松达到 TB 级别,造成存储、查询等等各方面的瓶颈。在金融领域中,量化交易平台是最能凸显时序数据处理重要性的革命性应用之一:通过对大量时序行情数据的读取分析来及时响应市场变化,帮助交易者把握投资机会,同时规避不必要的风险,实现资产的稳健增长。可以实现包括但不限于:资产管理、情绪监控、股票回测、交易信号模拟、报表自动生成等等诸多功能。
## 处理时序数据所需要的工具
如果想要高效地处理时序数据,一个完整的时序数据处理平台一定要准备好以下几个核心模块。
1. 数据库Database数据库提供时序数据的高效存储和读取能力。在工业、物联网场景由设备所产生的时序数据量是十分惊人的。从存储数据的角度来说数据库需要把这些数据持久化到硬盘上并最大程度地压缩从而降低存储成本。从读取数据的角度来说数据库需要保证实时查询以及历史数据的查询效率。比较传统的存储方案是使用 MySql、Oracle 等关系型数据库,也有 Hadoop 体系的 HBase专用的时序数据库则有 InfluxDB、OpenTSDB、Prometheus 等。
2. 数据订阅Data Subscription很多时序数据应用都需要在第一时间订阅到业务所需的实时数据从而及时了解被监测对对象的最新状态用 AI 或其他工具做实时的数据分析。同时,由于数据的隐私以及安全,你只能容许应用订阅他有权限访问的数据。因此,一个时序数据处理平台一定需要具备数据订阅的能力,帮助应用实时获取最新数据。
3. ETLExtract Transform Load在实际的物联网、工业场景中时序数据的采集需要特定的 ETL 工具进行数据的提取、清洗和转换操作,才能把数据写入数据库中,以保证数据的质量。因为不同数据采集系统往往使用不同的标准,比如采集的温度的物理单位不一致,有的用摄氏度,有的用华氏度;系统之间所在的时区不一致,要进行转换;时间分辨率也可能不统一,因此这些从不同系统汇聚来的数据需要进行转换才能写入数据库。
4. 流计算Stream computing物联网、工业、金融应用需要对时序数据流进行高效快速计算通过所得到的计算结果来满足实时业务的需求。比如面对实时采集到的每个智能电表的电流和电压数据需要立刻算出各个电表的有功功率、无功功率。因此时序数据处理平台通常会选择一些比如 Apache Spark、Apache Flink 等等的流处理框架。
5. 缓存Cache物联网、工业、金融应用需要实时展示一些设备或股票的最新状态因此平台需要缓存技术提供快速的数据访问。原因是由于时序数据体量极大如果不使用缓存技术而是进行常规的读取、筛选那么对于监控设备最新状态之类的计算是十分困难的将会导致很大的延迟从而失去“实时”的意义。因此缓存技术是时序数据处理平台不可缺少的一环 Redis 就是这样一种常用的缓存工具。
处理时序数据需要一系列模块的协同作业,从数据采集到存储、计算、分析与可视化,再到专用的时序数据算法库,每个环节都有相应的工具支持。这些工具的选择取决于具体的业务需求和数据特点,合理地选用和搭配它们才能做到高效地处理各种类型的时序数据,挖掘数据背后的价值。
## 专用时序数据处理工具的必要性
在时序数据的十大特征一节中提到,对于一个优秀的时序大数据处理平台来说,它必然需要具备处理时序数据十大特征的能力。在处理时序数据所需要的工具一节中介绍了时序大数据平台处理时序数据所需要的主要模块/组件。 结合这两节的内容与实际情况,可以发现:处理海量时序数据,其实是一个很庞大复杂的系统。
早些年,为处理日益增长的互联网数据,众多的工具开始出现,最流行的便是 Hadoop 体系。除使用大家所熟悉的 Hadoop 组件如 HDFS、MapReduce、HBase 和 Hive 外,通用的大数据处理平台往往还使用 Kafka 或其他消息队列工具Redis 或其他缓存软件Flink 或其他实时流式数据处理软件。存储上也有人选用 MongoDB、Cassandra 或其他 NoSQL 数据库。这样一个典型的大数据处理平台基本上能很好的处理互联网行业的引用,比如典型的用户画像、舆情分析等。
因此很自然,在工业、物联网大数据兴起后,大家仍然想到的是使用这套通用的大数据处理平台来处理时序数据。现在市场上流行的物联网、车联网等大数据平台几乎无一例外是这类架构,这套方法被证明完全可以工作,但效果仍然有很多不足:
1. 开发效率低:因为不是单一软件,需要集成至少 4 个以上模块,而且很多模块都不是标准的 POSIX 或 SQL 接口,都有自己的开发工具、开发语言、配置,需要一定的学习成本。而且由于数据从一个模块流动到另外一个模块,数据一致性容易受到破坏。同时,这些模块基本上都是开源软件,难免遇到各种 BUG即使有技术论坛、社区的支持一旦被一技术问题卡住总要耗费工程师不少时间。总的来讲需要搭建一个还不错的团队才能将这些模块顺利的组装起来因此需要耗费较大的人力资源。
2. 运行效率低:现有的这些开源软件主要用来处理互联网上的非结构化数据,比如文本、视频、图片数据等,但是通过物联网采集来的数据都是时序的、结构化的。用非结构化数据处理技术来处理结构化数据,无论是存储还是计算,消费的资源都大很多。
3. 运维成本高:每个模块,无论是 Kafka、HBase、HDFS 还是 Redis都有自己的管理后台都需要单独管理。在传统的信息系统中数据库管理员只要学会管理 MySQL 或是 Oracle 就可以了,但现在数据库管理员需要学会管理、配置、优化很多模块,工作量大了很多。由于模块数过多,定位一个问题就变得更为复杂。比如,用户发现有一条采集的数据丢失了,至于是 Kafka、HBase、Spark 丢失的,还是应用程序丢失的,则无法迅速定位,往往需要花很长时间,只有将各模块的日志关联起来才能找到原因。而且模块越多,系统整体的稳定性就越低。
4. 产品推出慢、利润低:由于源软件研发效率低,运维成本高,导致将产品推向市场的时间变长,让企业丧失商机。而且这些开源软件都在演化中,要同步使用最新的版本也需要耗费一定的人力。除互联网头部公司外,中小型公司在通用大数据平台上花费的人力资源成本一般都远超过专业公司的产品或服务费用。
5. 对于小数据量场景,私有化部署太重:在物联网、车联网场景中,因为涉及到生产经营数据的安全,很多还是采取私有化部署。而每个私有化部署,处理的数据量有很大的区别,从几百台联网设备到数千万台设备不等。对于数据量小的场景,通用的大数据解决方案就显得过于臃肿,投入产出不成正比。因此有的平台提供商往往有两套方案,一套针对大数据场景,使用通用的大数据平台,一套针对小数据规模场景,就使用 MySQL 或其他数据库来搞定一切,但是随着历史数据的累积,或接入设备量的增长,关系型数据库性能不足、运维复杂、扩展性差等缺点都会逐渐暴露出来,终究不是长久之计。
由于存在这些根本性的缺陷,导致高速增长的时序大数据市场一直没有一个简单好用而又高效的工具。于是,近些年一批专注时序数据处理的企业杀入了这个赛道,比如美国的 InfluxData其产品 InfluxDB 在 IT 运维监测方面有相当的市场占有率。开源社区也十分活跃,比如基于 HBase 开发的 OpenTSDB中国国内阿里、百度、华为都有基于 OpenTSDB 的产品,涛思数据不依赖任何第三方,推出了自主研发而且开源的的 TDengine。
由于数据量巨大且应用方式特殊,对时序数据的处理具有相当大的技术挑战,因此要使用专业的大数据平台。对实时时序数据的科学合理地高效处理能够帮助企业实时监控生产与经营过程,对历史时序数据的分析有助于对资源的使用和生产配置做出科学的决策。
## 选择时序数据处理工具的标准
毫无疑问,我们需要一个优秀的时序大数据平台来处理设备、交易产生的海量数据。那么,这个大数据平台需要具备哪些能力?与通用的大数据平台相比,它需要具备什么样的特征呢?
1. 必须是分布式系统:首先,由工业、物联网设备产生的海量数据,是任何一台单独的服务器都无能力处理的,因此处理系统必须是可分布式的、水平扩展的。这个平台在设计层面就必须能够高效地处理高基数难题:以智能电表为例,每个设备都有自己的设备 ID、城市 ID、厂商 ID 和模型 ID 等标签。几百个城市,百万级设备,再加上不同的厂商、模型。相乘之下,基数轻松超过百亿级。假如想找到某一个设备的数据,需要在百亿级的基数中筛选过滤,难度可想而知,这便是时序数据领域经典的“高基数”难题。即便是很多中小型项目,过亿的基数也是十分常见的。所以,对于时序数据工具的选型,一定要看它的架构模型能否撑得起你的业务基数。一个能够通过分布式的架构来处理“高基数”难题,才能让平台足以支撑业务的增长,才可以说是一个真正意义上的时序大数据平台。
2. 必须是高性能:“高性能”是一个相对的概念,它描述的是一款产品与其他产品相比而来的性能表现。不同大数据平台的硬件规模和需求都是不一致的,但是一个好的大数据平台绝对不应该依赖于“大硬件”,而是应该拥有强悍的单点工作能力,用更少的资源达到更好的性能,这样才是真正的做到“降本”和“增效”。如果专用的时序大数据处理平台不能在存储、读取、分析这些方面做到“高性能”,那么为什么不采用通用的大数据平台呢?
3. 必须是满足实时计算的系统:互联网大数据处理,大家所熟悉的场景是用户画像、推荐系统、舆情分析等等,这些场景并不需要什么实时性,批处理即可。但是对于物联网场景,需要基于采集的数据做实时预警、决策,延时要控制在秒级以内。如果计算没有实时性,物联网的商业价值就大打折扣。
4. 必须拥有运营商级别的高可靠服务:工业、物联网系统对接的往往是生产、经营系统,如果数据处理系统宕机,直接导致停产,产生经济有损失、导致对终端消费者的服务无法正常提供。比如智能电表,如果系统出问题,直接导致的是千家万户无法正常用电。因此工业、物联网大数据系统必须是高可靠的,必须支持数据实时备份,必须支持异地容灾,必须支持软件、硬件在线升级,必须支持在线 IDC 机房迁移,否则服务一定有被中断的可能。
5. 必须拥有高效的缓存功能:绝大部分场景,都需要能快速获取设备当前状态或其他信息,用以报警、大屏展示或其他。系统需要提供一高效机制,让用户可以获取全部、或符合过滤条件的部分设备的最新状态。
6. 必须拥有实时流式计算:各种实时预警或预测已经不是简单的基于某一个阈值进行,而是需要通过将一个或多个设备产生的数据流进行实时聚合计算,不只是基于一个时间点、而是基于一个时间窗口进行计算。不仅如此,计算的需求也相当复杂,因场景而异,应容许用户自定义函数进行计算。
7. 必须支持数据订阅:与通用大数据平台比较一致,同一组数据往往有很多应用都需要,因此系统应该提供订阅功能,只要有新的数据更新,就应该实时提醒应用。由于数据隐私和安全,而且这个订阅也应该是个性化的,只能订阅有权查看的数据,比如仅仅能订阅每小时的平均功率,而不能订阅原始的电流、电压值。
8. 必须保证数据能持续稳定写入:对于联网设备产生的数据,数据流量往往是平稳的,因此数据写入所需要的资源往往是可以估算的。但是变化的是查询、分析,特别是即席查询,有可能耗费很大的系统资源,不可控。因此系统必须保证分配足够的资源以确保数据能够写入系统而不被丢失。准确的说,系统必须是一个写优先系统。
9. 必须保证实时数据和历史数据的处理合二为一:实时数据在缓存里,历史数据在持久化存储介质里,而且可能依据时长,保留在不同存储介质里。系统应该隐藏背后的存储,给用户和应用呈现的是同一个接口和界面。无论是访问新采集的数据还是十年前的老数据,除输入的时间参数不同之外,其余应该是一样的。
10. 必须支持灵活的多维度分析:对于联网设备产生的数据,需要进行各种维度的统计分析,比如从设备所处的地域进行分析,从设备的型号、供应商进行分析,从设备所使用的人员进行分析等等。而且这些维度的分析是无法事先想好的,是在实际运营过程中,根据业务发展的需求定下来的。因此时序大数据系统需要一个灵活的机制增加某个维度的分析。
11. 需要支持即席分析和查询。为提高大数据分析师的工作效率,系统应该提供一命令行工具或容许用户通过其他工具,执行 SQL 查询,而不是非要通过编程接口。查询分析的结果可以很方便的导出,再制作成各种图表。
12. 必须支持数据降频、插值、特殊函数计算等操作。原始数据的采集频次可能很高,但具体分析往往不需要对原始数据执行,而是数据降频之后。系统需要提供高效的数据降频操作。设备是很难同步的,不同设备采集数据的时间点是很难对齐的,因此分析一个特定时间点的值,往往需要插值才能解决,系统需要提供线性插值、设置固定值等多种插值策略才行。工业互联网里,除通用的统计操作之外,往往还需要支持一些特殊函数,比如时间加权平均、累计求和、差值等。
13. 必须提供灵活的数据管理策略。一个大的系统,采集的数据种类繁多,而且除采集的原始数据外,还有大量的衍生数据。这些数据各自有不同的特点,有的采集频次高,有的要求保留时间长,有的需要多个副本以保证更高的安全性,有的需要能快速访问。因此物联网大数据平台必须提供多种策略,让用户可以根据特点进行选择和配置,而且各种策略并存。
14. 必须是开放的。系统需要支持业界流行的标准 SQL提供各种语言开发接口包括 C/C++、Java、Go、Python、RESTful 等接口,也需要支持 Spark、R、Matlab 等工具,方便集成各种机器学习、人工智能算法或其他应用,让大数据处理平台能够不断扩展,而不是成为一个孤岛。
15. 必须支持异构环境。大数据平台的搭建是一个长期的工作,每个批次采购的服务器和存储设备都会不一样,系统必须支持自己可以和各种档次、配置的服务器和存储设备并存。
16. 必须支持边云协同。要有一套灵活的机制将边缘计算节点的数据上传到云端,根据具体需要,可以将原始数据,或加工计算后的数据,或仅仅符合过滤条件的数据同步到云端,而且随时可以取消,更改策略。这样才能更好的汇聚数据,统筹业务,从而做出更好的业务决策。
17. 需要统一的后台管理系统。便于查看系统运行状态、管理集群、用户、各种系统资源等,而且系统能够与第三方 IT 运维监测平台无缝集成。
18. 需要支持私有化部署。因为很多企业出于安全以及各种因素的考虑,希望采用私有化部署。而传统的企业往往没有很强的 IT 运维团队,因此在安装、部署、运维等方面需要做到简单、快捷,可维护性强。
总之,时序大数据平台应具备高效、可扩展、实时、可靠、灵活、开放、简单、易维护等特点。近年来,众多企业纷纷将时序数据从传统大数据平台或关系型数据库迁移到专用时序大数据平台,以保障海量时序数据得到快速和有效处理,支撑相关业务的持续增长。

View File

@ -1,153 +0,0 @@
---
title: 产品简介
description: 简要介绍 TDengine 的主要功能
toc_max_heading_level: 2
---
TDengine 是一款专为物联网、工业互联网等场景设计并优化的大数据平台,它能安全高效地将大量设备、数据采集器每天产生的高达 TB 甚至 PB 级的数据进行汇聚、存储、分析和分发,对业务运行状态进行实时监测、预警,提供实时的商业洞察。其核心模块是高性能、集群开源、云原生、极简的时序数据库 TDengine OSS。
本节介绍 TDengine OSS 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine OSS 有个整体了解
## 主要功能
TDengine OSS 的主要功能如下:
1. 写入数据,支持
- [SQL 写入](../develop/insert-data/sql-writing)
- [无模式Schemaless写入](../reference/schemaless/),支持多种标准写入协议
- [InfluxDB Line 协议](../develop/insert-data/influxdb-line)
- [OpenTSDB Telnet 协议](../develop/insert-data/opentsdb-telnet)
- [OpenTSDB JSON 协议](../develop/insert-data/opentsdb-json)
- 与多种第三方工具的无缝集成,它们都可以仅通过配置而无需任何代码即可将数据写入 TDengine
- [Telegraf](../third-party/telegraf)
- [Prometheus](../third-party/prometheus)
- [StatsD](../third-party/statsd)
- [collectd](../third-party/collectd)
- [Icinga2](../third-party/icinga2)
- [TCollector](../third-party/tcollector)
- [EMQX](../third-party/emq-broker)
- [HiveMQ](../third-party/hive-mq-broker)
2. 查询数据,支持
- [标准 SQL](../taos-sql),含嵌套查询
- [时序数据特色函数](../taos-sql/function/#time-series-extensions)
- [时序数据特色查询](../taos-sql/distinguished),例如降采样、插值、累加和、时间加权平均、状态窗口、会话窗口等
- [用户自定义函数UDF](../taos-sql/udf)
3. [缓存](../develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis 就能对时序数据进行高效处理
4. [流式计算Stream Processing](../develop/stream)TDengine 不仅支持连续查询,还支持事件驱动的流式计算,这样在处理时序数据时就无需 Flink 或 Spark 这样流式计算组件
5. [数据订阅](../develop/tmq),应用程序可以订阅一张表或一组表的数据,提供与 Kafka 相同的 API而且可以指定过滤条件
6. 可视化
- 支持与 [Grafana](../third-party/grafana/) 的无缝集成
- 支持与 Google Data Studio 的无缝集成
7. 集群
- [集群部署](../deployment/),可以通过增加节点进行水平扩展以提升处理能力
- 可以通过 [Kubernetes 部署 TDengine](../deployment/k8s/)
- 通过多副本提供高可用能力
8. 管理
- [监控](../operation/monitor)运行中的 TDengine 实例
- 多种[数据导入](../operation/import)方式
- 多种[数据导出](../operation/export)方式
9. 工具
- 提供[交互式命令行程序CLI](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询
- 提供压力测试工具 [taosBenchmark](../reference/taosbenchmark),用于测试 TDengine 的性能
10. 编程
- 提供各种语言的[连接器Connector](../connector): 如 [C/C++](../connector/cpp)、[Java](../connector/java)、[Go](../connector/go)、[Node.js](../connector/node)、[Rust](../connector/rust)、[Python](../connector/python)、[C#](../connector/csharp) 等
- 支持 [REST 接口](../connector/rest-api/)
更多细节功能,请阅读整个文档。
## 竞争优势
由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html)比如结构化、无需事务、很少删除或更新、写多读少等等因此与其他时序数据库相比TDengine 有以下特点:
- **[高性能](https://www.taosdata.com/tdengine/fast)**TDengine 是唯一一个解决了时序数据存储的高基数难题的时序数据库,支持上亿数据采集点,并在数据插入、查询和数据压缩上远胜其它时序数据库。
- **[极简时序数据平台](https://www.taosdata.com/tdengine/simplified_solution_for_time-series_data_processing)**TDengine 内建缓存、流式计算和数据订阅等功能,为时序数据的处理提供了极简的解决方案,从而大幅降低了业务系统的设计复杂度和运维成本。
- **[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)**通过原生的分布式设计、数据分片和分区、存算分离、RAFT 协议、Kubernetes 部署和完整的可观测性TDengine 是一款云原生时序数据库并且能够部署在公有云、私有云和混合云上。
- **[简单易用](https://www.taosdata.com/tdengine/ease_of_use)**对系统管理员来说TDengine 大幅降低了管理和维护的代价。对开发者来说, TDengine 提供了简单的接口、极简的解决方案和与第三方工具的无缝集成。对数据分析专家来说TDengine 提供了便捷的数据访问能力。
- **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**通过超级表、存储计算分离、分区分片、预计算和其它技术TDengine 能够高效地浏览、格式化和访问数据。
- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**TDengine 的核心代码包括集群功能全部在开源协议下公开。全球超过 140k 个运行实例GitHub Star 20k且拥有一个活跃的开发者社区。
采用 TDengine可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。表现在几个方面
1. 由于其超强性能,它能将系统所需的计算资源和存储资源大幅降低
2. 因为支持 SQL能与众多第三方软件无缝集成学习迁移成本大幅下降
3. 因为是一款极简的时序数据平台,系统复杂度、研发和运营成本大幅降低
## 技术生态
在整个时序大数据平台中TDengine 扮演的角色如下:
<figure>
![TDengine Database 技术生态图](eco_system.webp)
<center><figcaption>图 1. TDengine 技术生态图</figcaption></center>
</figure>
上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序CLI以及可视化管理工具。
## 典型适用场景
作为一个高性能、分布式、支持 SQL 的时序数据库Time-series DatabaseTDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具因其充分利用了时序大数据的特点它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
### 数据源特点和需求
从数据源角度,设计人员可以从下面几个角度分析 TDengine 在目标应用系统里面的适用性。
| 数据源特点和需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
| ---------------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------------------------------- |
| 总体数据量巨大 | | | √ | TDengine 在容量方面提供出色的水平扩展功能,并且具备匹配高压缩的存储结构,达到业界最优的存储效率。 |
| 数据输入速度偶尔或者持续巨大 | | | √ | TDengine 的性能大大超过同类产品,可以在同样的硬件环境下持续处理大量的输入数据,并且提供很容易在用户环境里面运行的性能评估工具。 |
| 数据源数目巨大 | | | √ | TDengine 设计中包含专门针对大量数据源的优化,包括数据的写入和查询,尤其适合高效处理海量(千万或者更多量级)的数据源。 |
### 系统架构要求
| 系统架构要求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
| ---------------------- | ------ | -------- | -------- | ----------------------------------------------------------------------------------------------------- |
| 要求简单可靠的系统架构 | | | √ | TDengine 的系统架构非常简单可靠,自带消息队列,缓存,流式计算,监控等功能,无需集成额外的第三方产品。 |
| 要求容错和高可靠 | | | √ | TDengine 的集群功能,自动提供容错灾备等高可靠功能。 |
| 标准化规范 | | | √ | TDengine 使用标准的 SQL 语言提供主要功能,遵守标准化规范。 |
### 系统功能需求
| 系统功能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
| -------------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------------------------- |
| 要求完整的内置数据处理算法 | | √ | | TDengine 实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有需求,因此特殊类型的处理需求还需要在应用层面解决。 |
| 需要大量的交叉查询处理 | | √ | | 这种类型的处理更多应该用关系型数据库处理,或者应该考虑 TDengine 和关系型数据库配合实现系统功能。 |
### 系统性能需求
| 系统性能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
| ---------------------- | ------ | -------- | -------- | -------------------------------------------------------------------------------------------------- |
| 要求较大的总体处理能力 | | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。 |
| 要求高速处理数据 | | | √ | TDengine 专门为 IoT 优化的存储和数据处理设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。 |
| 要求快速处理小粒度数据 | | | √ | 这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。 |
### 系统维护需求
| 系统维护需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
| ---------------------- | ------ | -------- | -------- | --------------------------------------------------------------------------------------------------------------------- |
| 要求系统可靠运行 | | | √ | TDengine 的系统架构非常稳定可靠,日常维护也简单便捷,对维护人员的要求简洁明了,最大程度上杜绝人为错误和事故。 |
| 要求运维学习成本可控 | | | √ | 同上。 |
| 要求市场有大量人才储备 | √ | | | TDengine 作为新一代产品,目前人才市场里面有经验的人员还有限。但是学习成本低,我们作为厂家也提供运维的培训和辅助服务。 |
## 与其他数据库的对比测试
- [用 InfluxDB 开源的性能测试工具对比 InfluxDB 和 TDengine](https://www.taosdata.com/blog/2020/01/13/1105.html)
- [TDengine 与 OpenTSDB 对比测试](https://www.taosdata.com/blog/2019/08/21/621.html)
- [TDengine 与 Cassandra 对比测试](https://www.taosdata.com/blog/2019/08/14/573.html)
- [TDengine VS InfluxDB ,写入性能大 PK ](https://www.taosdata.com/2021/11/05/3248.html)
- [TDengine 和 InfluxDB 查询性能对比测试报告](https://www.taosdata.com/2022/02/22/5969.html)
- [TDengine 与 InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse 等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf)
## 主要产品
TDengine 有两个主要产品TDengine Enterprise (即 TDengine 企业版)和 TDengine Cloud关于它们的具体定义请参考
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)

149
docs/zh/03-intro.md Normal file
View File

@ -0,0 +1,149 @@
---
sidebar_label: 产品简介
title: TDengine 产品简介
toc_max_heading_level: 4
---
在 2016 年底,涛思数据创始人陶建辉先生凭借多年的技术研发经验,敏锐地捕捉到时序数据呈指数级增长的态势,发现时序大数据处理领域缺乏一个高效、开放且易于
使用的工具并意识到时序大数据的处理是一个巨大的技术挑战与商业机遇。因此在2017 年,他创办了涛思数据,并亲自主导研发了 TDengine专注于时序数据处理。至今他仍致力于此领域的研究与发展。
TDengine 作为涛思数据的旗舰产品其核心是一个高性能、分布式的时序数据库。通过集成的缓存、数据订阅、流计算和数据清洗与转换等功能TDengine 已经发展成为
一个专为物联网、工业互联网、金融和 IT 运维等关键行业量身定制的时序大数据平台。该平台能够高效地汇聚、存储、分析、计算和分发来自海量数据采集点的大规模数据流,每日处理能力可达 TB 乃至 PB 级别。借助 TDengine企业可以实现实时的业务监控和预警进而发掘出有价值的商业洞察。
自 2019 年 7 月 以 来, 涛 思 数 据 陆 续 将 TDengine 的 不 同 版 本 开 源, 包 括 单 机版2019 年 7 月、集群版2020 年 8 月以及云原生版2022 年 8 月。开源之后TDengine 迅速获得了全球开发者的关注,多次在 GitHub 网站全球趋势排行榜上位居榜首。截至编写本书时TDengine 在 GitHub 网站上已积累近 2.3 万颗星安装实例超过53 万个,覆盖 60 多个国家和地区,广泛应用于电力、石油、化工、新能源、智能制造、汽车、环境监测等行业或领域,赢得了全球开发者的广泛认可
## TDengine 产品
为满足不同用户的需求和场景,涛思数据推出 TDengine 系列产品,包括开源版 TDengine OSS、企业版 TDengine Enterprise 以及云服务 TDengine Cloud。
TDengine OSS 是一个开源的高性能时序数据库与其他时序数据库相比它的核心优势在于其集群开源、高性能和云原生架构。而且除了基础的写入、查询和存储功能外TDengine OSS 还集成了缓存、流式计算和数据订阅等高级功能,这些功能显著简化了系统设计,降低了企业的研发和运营成本。
在 TDengine OSS 的基础上,企业版 TDengine Enterprise 提供了增强的辅助功能包括数据的备份恢复、异地容灾、多级存储、视图、权限控制、安全加密、IP 白名单、支持 MQTT、OPC-UA、OPC-DA、PI、Wonderware、Kafka 等各种数据源。这些功能为企业提供了更为全面、安全、可靠和高效的时序数据管理解决方案。
此外TDengine Cloud 作为一种全托管的云服务,存储与计算分离,分开计费,为企业提供了企业级的工具和服务,彻底解决了运维难题,尤其适合中小规模的用户使用。
## TDengine 主要功能与特性
TDengine 经过特别优化,以适应时间序列数据的独特需求,引入了“一个数据采集点一张表”和“超级表”的创新数据组织策略。这些策略背后的支撑是一个革命性的存储引擎,它极大地提升了数据处理的速度和效率,无论是在数据的写入、查询还是存储方面。接下来,逐一探索 TDengine 的众多功能,帮助您全面了解这个为高效处理时间序列数据而生的大数据平台。
1. 写入数据TDengine 支持多种数据写入方式。首先,它完全兼容 SQL允许用户使用标准的 SQL 语法进行数据写入。而且 TDengine 还支持无模式Schemaless写入包括流行的 InfluxDB Line 协议、OpenTSDB 的 Telnet 和 JSON 协议这些协议的加入使得数据的导入变得更加灵活和高效。更进一步TDengine 与众多第三方工具实现了无缝集成,例如 Telegraf、Prometheus、EMQX、StatsD、collectd 和 HiveMQ 等。对于 TDengine Enterprise TDengine 还提供了 MQTT、OPC-UA、OPC-DA、PI、Wonderware Kafka 等连接器。这些工具通过简单的配置,无需一行代码,就可以将来自各种数据源的数据源源不断的写入数据库,极大地简化了数据收集和存储的过程。
2. 查询数据TDengine 提供标准的 SQL 查询语法并针对时序数据和业务的特点优化和新增了许多语法和功能例如降采样、插值、累计求和、时间加权平均、状态窗口、时间窗口、会话窗口、滑动窗口等。TDengine 还支持用户自定义函数UDF
3. 缓存TDengine 使用时间驱动缓存管理策略First-In-First-OutFIFO将最近到达的当前状态数据保存在缓存中这样便于获取任何监测对象的实时状态而无需使用 Redis 等其他缓存工具,简化系统架构和运营成本。
4. 流式计算TDengine 流式计算引擎提供了实时处理写入的数据流的能力,不仅支持连续查询,还支持事件驱动的流式计算。它提供了替代复杂流处理系统的轻量级解决方案,并能够在高吞吐的数据写入的情况下,提供毫秒级的计算结果延迟
5. 数据订阅TDengine 提供了类似 Kafka 的数据订阅功能。但用户可以通过 SQL 来灵活控制订阅的数据内容,并使用 Kafka 相同的 API 来订阅一张表、一组表、全部列或部分列、甚至整个数据库的数据。TDengine 可以替代需要集成消息队列产品的场景, 从而简化系统设计的复杂度,降低运营维护成本。
6. 可视化/BITDengine 本身不提供可视化或 BI 的功能。但通过其 RESTful API 标准的 JDBC、ODBC 接口TDengine 能够 Grafana、Google Data Studio、Power BI、Tableau 以及国产 BI 工具无缝集成。
7. 集群功能TDengine 支持集群部署,能够随着业务数据量的增长,通过增加节点线性提升系统处理能力,实现水平扩展。同时,通过多副本技术提供高可用性,并支持 Kubernetes 部署。
8. 数据迁移TDengine 提供了多种便捷的数据导入导出功能包括脚本文件导入导出、数据文件导入导出、taosdump 工具导入导出等。企业版还支持边云协同、数据同步等场景,兼容多种数据源,如 AVEVA PI System 等。
9. 编程连接器TDengine 提供不同语言的连接器,包括 C/C++、Java、Go、Node.js、Rust、Python、C#、R、PHP 等。而且 TDengine 支持 REST 接口,应用可以直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库。
10. 数据安全共享TDengine 通过数据库视图功能和权限管理,确保数据访问的安全性。数据订阅功能可将数据实时分发给应用,订阅主题可通过 SQL 定义,实现灵活精细的数据分发控制,保护数据安全和隐私
11. 编程连接器TDengine 提供了丰富的编程语言连接器,包括 C/C++、Java、Go、Node.js、Rust、Python、C#、R、PHP 等,并支持 REST ful 接口方便应用通过HTTP POST 请求操作数据库。
12. 常用工具TDengine 还提供了交互式命令行程序CLI便于管理集群、检查系统状态、做即时查询。压力测试工具 taosBenchmark用于测试 TDengine 的性能。TDengine 还提供了图形化管理界面,简化了操作和管理过程。
## TDengine 与典型时序数据库的区别
由于充分利用了时序数据特点,并采用独特创新的“一个数据采集点一张表” “超级表”的属于模型与其他时序数据库相比TDengine 拥有以下特点:
1. 高性能TDengine 通过创新的存储引擎设计,实现了数据写入和查询性能的超群,速度比通用数据库快 10 倍以上,也远超过其他时序数据库。同时,其存储空间需求仅为通用数据库的 1/10极大地提高了资源利用效率。
2. 云原生TDengine 采用原生分布式设计,充分利用云平台的优势,提供了水平扩展能力。它具备弹性、韧性和可观测性,支持 Kubernetes 部署,并可在公有云、私有云和混合云上灵活运行。
3. 极简时序数据平台TDengine 内置了消息队列、缓存、流式计算等功能,避免了应用集成 Kafka、Redis、HBase、Spark 等软件的复杂性,从而大幅降低系统的复杂度和应用开发及运营成本。
4. 强大的分析能力TDengine 不仅支持标准 SQL 查询,还为时序数据特有的分析提供了 SQL 扩展。通过超级表、存储计算分离、分区分片、预计算、UDF 等先进技术TDengine 展现出强大的数据分析能力。
5. 简单易用TDengine 安装无依赖,集群部署仅需几秒即可完成。它提供了 REST ful接口和多种编程语言的连接器与众多第三方工具无缝集成。此外命令行程序和丰富的运维工具也极大地方便了用户的管理和即时查询需求。
6. 核心开源TDengine 的核心代码包括集群功能均在开源协议下公开发布。它在GitHub 网站全球趋势排行榜上多次位居榜首显示出其受欢迎程度。同时TDengine
拥有一个活跃的开发者社区,为技术的持续发展和创新提供了有力支持。
采用 TDengine企业可以在物联网、车联网、工业互联网等典型场景中显著降低大数据平台的总拥有成本主要体现在以下几个方面
1. 高性能带来的成本节约TDengine 卓越的写入、查询和存储性能意味着系统所需的计算资源和存储资源可以大幅度减少。这不仅降低了硬件成本,还减少了能源消耗和维护费用。
2. 标准化与兼容性带来的成本效益:由于 TDengine 支持标准 SQL并与众多第三方软件实现了无缝集成用户可以轻松地将现有系统迁移到 TDengine 上,无须重写大量代码。这种标准化和兼容性大大降低了学习和迁移成本,缩短了项目周期。
3. 简化系统架构带来的成本降低作为一个极简的时序数据平台TDengine 集成了消息队列、缓存、流计算等必要功能,避免了额外集成众多其他组件的需要。这
种简化的系统架构显著降低了系统的复杂度,从而减少了研发和运营成本,提高了整体运营效率。
## 技术生态
在整个时序大数据平台中TDengine 扮演的角色如下:
<figure>
![TDengine Database 技术生态图](eco_system.webp)
<center><figcaption>图 1. TDengine 技术生态图</figcaption></center>
</figure>
上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序CLI以及可视化管理工具。
## 典型适用场景
作为一个高性能、分布式、支持 SQL 的时序数据库Time-series DatabaseTDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具因其充分利用了时序大数据的特点它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
### 数据源特点和需求
从数据源角度,设计人员可以从下面几个角度分析 TDengine 在目标应用系统里面的适用性。
| 数据源特点和需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
| ---------------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------------------------------- |
| 总体数据量巨大 | | | √ | TDengine 在容量方面提供出色的水平扩展功能,并且具备匹配高压缩的存储结构,达到业界最优的存储效率。 |
| 数据输入速度偶尔或者持续巨大 | | | √ | TDengine 的性能大大超过同类产品,可以在同样的硬件环境下持续处理大量的输入数据,并且提供很容易在用户环境里面运行的性能评估工具。 |
| 数据源数目巨大 | | | √ | TDengine 设计中包含专门针对大量数据源的优化,包括数据的写入和查询,尤其适合高效处理海量(千万或者更多量级)的数据源。 |
### 系统架构要求
| 系统架构要求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
| ---------------------- | ------ | -------- | -------- | ----------------------------------------------------------------------------------------------------- |
| 要求简单可靠的系统架构 | | | √ | TDengine 的系统架构非常简单可靠,自带消息队列,缓存,流式计算,监控等功能,无需集成额外的第三方产品。 |
| 要求容错和高可靠 | | | √ | TDengine 的集群功能,自动提供容错灾备等高可靠功能。 |
| 标准化规范 | | | √ | TDengine 使用标准的 SQL 语言提供主要功能,遵守标准化规范。 |
### 系统功能需求
| 系统功能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
| -------------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------------------------- |
| 要求完整的内置数据处理算法 | | √ | | TDengine 实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有需求,因此特殊类型的处理需求还需要在应用层面解决。 |
| 需要大量的交叉查询处理 | | √ | | 这种类型的处理更多应该用关系型数据库处理,或者应该考虑 TDengine 和关系型数据库配合实现系统功能。 |
### 系统性能需求
| 系统性能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
| ---------------------- | ------ | -------- | -------- | -------------------------------------------------------------------------------------------------- |
| 要求较大的总体处理能力 | | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。 |
| 要求高速处理数据 | | | √ | TDengine 专门为 IoT 优化的存储和数据处理设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。 |
| 要求快速处理小粒度数据 | | | √ | 这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。 |
### 系统维护需求
| 系统维护需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
| ---------------------- | ------ | -------- | -------- | --------------------------------------------------------------------------------------------------------------------- |
| 要求系统可靠运行 | | | √ | TDengine 的系统架构非常稳定可靠,日常维护也简单便捷,对维护人员的要求简洁明了,最大程度上杜绝人为错误和事故。 |
| 要求运维学习成本可控 | | | √ | 同上。 |
| 要求市场有大量人才储备 | √ | | | TDengine 作为新一代产品,目前人才市场里面有经验的人员还有限。但是学习成本低,我们作为厂家也提供运维的培训和辅助服务。 |
## 与其他数据库的对比测试
- [用 InfluxDB 开源的性能测试工具对比 InfluxDB 和 TDengine](https://www.taosdata.com/blog/2020/01/13/1105.html)
- [TDengine 与 OpenTSDB 对比测试](https://www.taosdata.com/blog/2019/08/21/621.html)
- [TDengine 与 Cassandra 对比测试](https://www.taosdata.com/blog/2019/08/14/573.html)
- [TDengine VS InfluxDB ,写入性能大 PK ](https://www.taosdata.com/2021/11/05/3248.html)
- [TDengine 和 InfluxDB 查询性能对比测试报告](https://www.taosdata.com/2022/02/22/5969.html)
- [TDengine 与 InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse 等数据库的对比测试报告](https://www.taosdata.com/downloads/TDengine_Testing_Report_cn.pdf)
## 主要产品
TDengine 有两个主要产品TDengine Enterprise (即 TDengine 企业版)和 TDengine Cloud关于它们的具体定义请参考
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)

View File

@ -1 +0,0 @@
label: 基本概念

View File

@ -1,183 +0,0 @@
---
sidebar_label: 基本概念
title: 数据模型和基本概念
description: TDengine 的数据模型和基本概念
---
为了便于解释基本概念,便于撰写示例程序,整个 TDengine 文档以智能电表作为典型时序数据场景。假设每个智能电表采集电流、电压、相位三个量,有多个智能电表,每个电表有位置 Location 和分组 Group ID 的静态属性. 其采集的数据类似如下的表格:
<div className="center-table">
<table>
<thead>
<tr>
<th rowSpan="2">Device ID</th>
<th rowSpan="2">Timestamp</th>
<th colSpan="3">Collected Metrics</th>
<th colSpan="2">Tags</th>
</tr>
<tr>
<th>current</th>
<th>voltage</th>
<th>phase</th>
<th>location</th>
<th>groupid</th>
</tr>
</thead>
<tbody>
<tr>
<td>d1001</td>
<td>1538548685000</td>
<td>10.3</td>
<td>219</td>
<td>0.31</td>
<td>California.SanFrancisco</td>
<td>2</td>
</tr>
<tr>
<td>d1002</td>
<td>1538548684000</td>
<td>10.2</td>
<td>220</td>
<td>0.23</td>
<td>California.SanFrancisco</td>
<td>3</td>
</tr>
<tr>
<td>d1003</td>
<td>1538548686500</td>
<td>11.5</td>
<td>221</td>
<td>0.35</td>
<td>California.LosAngeles</td>
<td>3</td>
</tr>
<tr>
<td>d1004</td>
<td>1538548685500</td>
<td>13.4</td>
<td>223</td>
<td>0.29</td>
<td>California.LosAngeles</td>
<td>2</td>
</tr>
<tr>
<td>d1001</td>
<td>1538548695000</td>
<td>12.6</td>
<td>218</td>
<td>0.33</td>
<td>California.SanFrancisco</td>
<td>2</td>
</tr>
<tr>
<td>d1004</td>
<td>1538548696600</td>
<td>11.8</td>
<td>221</td>
<td>0.28</td>
<td>California.LosAngeles</td>
<td>2</td>
</tr>
<tr>
<td>d1002</td>
<td>1538548696650</td>
<td>10.3</td>
<td>218</td>
<td>0.25</td>
<td>California.SanFrancisco</td>
<td>3</td>
</tr>
<tr>
<td>d1001</td>
<td>1538548696800</td>
<td>12.3</td>
<td>221</td>
<td>0.31</td>
<td>California.SanFrancisco</td>
<td>2</td>
</tr>
</tbody>
</table>
<a name="#model_table1">表 1. 智能电表数据示例</a>
</div>
每一条记录都有设备 ID、时间戳、采集的物理量如上表中的 `current`、`voltage` 和 `phase`)以及每个设备相关的静态标签(`location` 和 `groupid`)。每个设备是受外界的触发,或按照设定的周期采集数据。采集的数据点是时序的,是一个数据流。
## 采集量Metric
采集量是指传感器、设备或其他类型采集点采集的物理量比如电流、电压、温度、压力、GPS 位置等,是随时间变化的,数据类型可以是整型、浮点型、布尔型,也可是字符串。随着时间的推移,存储的采集量的数据量越来越大。智能电表示例中的电流、电压、相位就是采集量。
## 标签Label/Tag
标签是指传感器、设备或其他类型采集点的静态属性,不是随时间变化的,比如设备型号、颜色、设备的所在地等,数据类型可以是任何类型。虽然是静态的,但 TDengine 容许用户修改、删除或增加标签值。与采集量不一样的是,随时间的推移,存储的标签的数据量不会有什么变化。智能电表示例中的 `location``groupid` 就是标签。
## 数据采集点Data Collection Point
数据采集点是指按照预设时间周期或受事件触发采集物理量的硬件或软件。一个数据采集点可以采集一个或多个采集量,**但这些采集量都是同一时刻采集的,具有相同的时间戳**。对于复杂的设备,往往有多个数据采集点,每个数据采集点采集的周期都可能不一样,而且完全独立,不同步。比如对于一台汽车,有数据采集点专门采集 GPS 位置,有数据采集点专门采集发动机状态,有数据采集点专门采集车内的环境,这样一台汽车就有三个数据采集点。智能电表示例中的 d1001、d1002、d1003、d1004 等就是数据采集点。
## 表Table
因为采集量一般是结构化数据同时为降低学习门槛TDengine 采用传统的关系型数据库模型管理数据。用户需要先创建库,然后创建表,之后才能插入或查询数据。
为充分利用其数据的时序性和其他数据特点TDengine 采取**一个数据采集点一张表**的策略,要求对每个数据采集点单独建表(比如有一千万个智能电表,就需创建一千万张表,上述表格中的 d1001d1002d1003d1004 都需单独建表),用来存储这个数据采集点所采集的时序数据。这种设计有几大优点:
1. 由于不同数据采集点产生数据的过程完全独立,每个数据采集点的数据源是唯一的,一张表也就只有一个写入者,这样就可采用无锁方式来写,写入速度就能大幅提升。
2. 对于一个数据采集点而言,其产生的数据是按照时间排序的,因此写的操作可用追加的方式实现,进一步大幅提高数据写入速度。
3. 一个数据采集点的数据是以块为单位连续存储的。如果读取一个时间段的数据,它能大幅减少随机读取操作,成数量级的提升读取和查询速度。
4. 一个数据块内部,采用列式存储,对于不同数据类型,采用不同压缩算法,而且由于一个数据采集点的采集量的变化是缓慢的,压缩率更高。
如果采用传统的方式,将多个数据采集点的数据写入一张表,由于网络延时不可控,不同数据采集点的数据到达服务器的时序是无法保证的,写入操作是要有锁保护的,而且一个数据采集点的数据是难以保证连续存储在一起的。**采用一个数据采集点一张表的方式,能最大程度的保证单个数据采集点的插入和查询的性能是最优的。**
TDengine 建议用数据采集点的名字(如上表中的 d1001来做表名。每个数据采集点可能同时采集多个采集量如上表中的 `current`、`voltage` 和 `phase`),每个采集量对应一张表中的一列,数据类型可以是整型、浮点型、字符串等。除此之外,表的第一列必须是时间戳,即数据类型为 Timestamp。对采集量TDengine 将自动按照时间戳建立索引,但对采集量本身不建任何索引。数据用列式存储方式保存。
对于复杂的设备,比如汽车,它有多个数据采集点,那么就需要为一辆汽车建立多张表。
## 超级表STable
由于一个数据采集点一张表导致表的数量巨增难以管理而且应用经常需要做采集点之间的聚合操作聚合的操作也变得复杂起来。为解决这个问题TDengine 引入超级表Super Table简称为 STable的概念。
超级表是指某一特定类型的数据采集点的集合。同一类型的数据采集点,其表的结构是完全一样的,但每个表(数据采集点)的静态属性(标签)是不一样的。描述一个超级表(某一特定类型的数据采集点的集合),除需要定义采集量的表结构之外,还需要定义其标签的 Schema标签的数据类型可以是整数、浮点数、字符串、JSON标签可以有多个可以事后增加、删除或修改。如果整个系统有 N 个不同类型的数据采集点,就需要建立 N 个超级表。
在 TDengine 的设计里,**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。智能电表示例中,我们可以创建一个超级表 `meters`.
## 子表Subtable
当为某个具体数据采集点创建表时,用户可以使用超级表的定义做模板,同时指定该具体采集点(表)的具体标签值来创建该表。**通过超级表创建的表称之为子表**。正常的表与子表的差异在于:
1. 子表就是表,因此所有正常表的 SQL 操作都可以在子表上执行。
2. 子表在正常表的基础上有扩展,它是带有静态标签的,而且这些标签可以事后增加、删除、修改,而正常的表没有。
3. 子表一定属于一张超级表,但普通表不属于任何超级表
4. 普通表无法转为子表,子表也无法转为普通表。
超级表与基于超级表建立的子表之间的关系表现在:
1. 一张超级表包含有多张子表,这些子表具有相同的采集量 Schema但带有不同的标签值。
2. 不能通过子表调整数据或标签的模式,对于超级表的数据模式修改立即对所有的子表生效。
3. 超级表只定义一个模板,自身不存储任何数据或标签信息。因此,不能向一个超级表写入数据,只能将数据写入子表中。
查询既可以在表上进行也可以在超级表上进行。针对超级表的查询TDengine 将把所有子表中的数据视为一个整体数据集进行处理会先把满足标签过滤条件的表从超级表中找出来然后再扫描这些表的时序数据进行聚合操作这样需要扫描的数据集会大幅减少从而显著提高查询的性能。本质上TDengine 通过对超级表查询的支持,实现了多个同类数据采集点的高效聚合。
TDengine 系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。在智能电表的示例中,我们可以通过超级表 meters 创建子表 d1001、d1002、d1003、d1004 等。
为了更好地理解采集量、标签、超级与子表的关系,可以参考下面关于智能电表数据模型的示意图。
<figure>
![智能电表数据模型示意图](./supertable.webp)
<center><figcaption>图 1. 智能电表数据模型示意图</figcaption></center>
</figure>
## 库Database
库是指一组表的集合。TDengine 容许一个运行实例有多个库,而且每个库可以配置不同的存储策略。不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为了在各种场景下 TDengine 都能最大效率的工作TDengine 建议将不同数据特征的超级表创建在不同的库里。
一个库里,可以有一到多个超级表,但一个超级表只属于一个库。一个超级表所拥有的子表全部存在一个库里。
## FQDN & Endpoint
FQDNFully Qualified Domain Name完全限定域名是 Internet 上特定计算机或主机的完整域名。FQDN 由两部分组成:主机名和域名。例如,假设邮件服务器的 FQDN 可能是 mail.tdengine.com。主机名是 mail主机位于域名 tdengine.com 中。DNSDomain Name System负责将 FQDN 翻译成 IP是互联网应用的寻址方式。对于没有 DNS 的系统,可以通过配置 hosts 文件来解决。
TDengine 集群的每个节点是由 Endpoint 来唯一标识的Endpoint 是由 FQDN 外加 Port 组成,比如 h1.tdengine.com:6030。这样当 IP 发生变化的时候,我们依然可以使用 FQDN 来动态找到节点,不需要更改集群的任何配置。而且采用 FQDN便于内网和外网对同一个集群的统一访问。
TDengine 不建议采用直接的 IP 地址访问集群,不利于管理。不了解 FQDN 概念,请看博文[《一篇文章说清楚 TDengine 的 FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。

Binary file not shown.

Before

Width:  |  Height:  |  Size: 33 KiB

View File

@ -0,0 +1,85 @@
---
sidebar_label: Linux
title: 在 Linux 系统上安装和启动 TDengine
toc_max_heading_level: 4
---
本节简介在 Linux 系统上安装和启动 TDengine 的快速步骤。
## 安装步骤
访问 TDengine 的官方版本发布页面https://docs.taosdata.com/releases/tdengine/ ,下载 TDengine 安装包TDengine-server-3.3.0.0-Linux-x64.tar.gz 。其他类型安装包的安装方法请参考相关文档TDengine 遵循各种安装包的标准。
1. 进入到安装包所在目录,使用 tar 解压安装包
```shell
tar -zxvf TDengine-server-3.3.0.0-Linux-x64.tar.gz
```
2. 解压文件后,进入相应子目录 TDengine-server-3.3.0.0,执行其中的 install.sh 安装脚本
```shell
sudo ./install.sh
```
3. 安装后,请使用 systemctl 命令来启动 TDengine 的服务进程。
```shell
sudo systemctl start taosd
```
4. 检查服务是否正常工作:
```shell
sudo systemctl status taosd
```
5. 如果服务进程处于活动状态,则 status 指令会显示如下的相关信息:
```shell
Active: active (running)
```
6. 如果后台服务进程处于停止状态,则 status 指令会显示如下的相关信息:
```shell
Active: inactive (dead)
```
如果 TDengine 服务正常工作,那么您可以通过 TDengine 的命令行程序 taos 来访问并体验 TDengine。
如下 systemctl 命令可以帮助你管理 TDengine 服务:
```shell
1. 启动服务进程sudo systemctl start taosd
2. 停止服务进程sudo systemctl stop taosd
3. 重启服务进程sudo systemctl restart taosd
4. 查看服务状态sudo systemctl status taosd
```
**注意**
- 当执行 systemctl stop taosd 命令时TDengine 服务并不会立即终止,而是会等待必要的数据成功落盘,确保数据的完整性。在处理大量数据的情况下,这一过程可能会花费较长的时间。
- 如果操作系统不支持 systemctl可以通过手动运行 /usr/local/taos/bin/taosd 命令来启动 TDengine 服务。
## 目录结构
安装 TDengine 后,默认会在操作系统中生成下列目录或文件:
| 目录/文件 | 说明 |
| ------------------------- | -------------------------------------------------------------------- |
| /usr/local/taos/bin | TDengine 可执行文件目录。其中的执行文件都会软链接到/usr/bin 目录下。 |
| /usr/local/taos/driver | TDengine 动态链接库目录。会软链接到/usr/lib 目录下。 |
| /usr/local/taos/examples | TDengine 各种语言应用示例目录。 |
| /usr/local/taos/include | TDengine 对外提供的 C 语言接口的头文件。 |
| /etc/taos/taos.cfg | TDengine 默认[配置文件] |
| /var/lib/taos | TDengine 默认数据文件目录。可通过[配置文件]修改位置。 |
| /var/log/taos | TDengine 默认日志文件目录。可通过[配置文件]修改位置。 |
## 可执行程序
TDengine 的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下。其中包括:
- _taosd_TDengine 服务端可执行文件
- _taos_TDengine Shell 可执行文件
- _taosdump_:数据导入导出工具
- _taosBenchmark_TDengine 测试工具
- _remove.sh_:卸载 TDengine 的脚本,请谨慎执行,链接到/usr/bin 目录下的**rmtaos**命令。会删除 TDengine 的安装目录/usr/local/taos但会保留/etc/taos、/var/lib/taos、/var/log/taos
- _taosadapter_: 提供 RESTful 服务和接受其他多种软件写入请求的服务端可执行文件
- _TDinsight.sh_:用于下载 TDinsight 并安装的脚本
- _set_core.sh_用于方便调试设置系统生成 core dump 文件的脚本
- _taosd-dump-cfg.gdb_:用于方便调试 taosd 的 gdb 执行脚本。

View File

@ -0,0 +1,48 @@
---
sidebar_label: Docker
title: 使用 Docker 快速启动 TDengine
toc_max_heading_level: 4
---
本节简介如何使用 Docker 快速启动 TDengine。
## Docker
1. 测试机器如果已经安装了 Docker首先拉取最新的 TDengine 容器镜像:
```shell
docker pull tdengine/tdengine:latest
或者指定版本的容器镜像:
```shell
docker pull tdengine/tdengine:3.3.0.0
```
2. 然后只需执行下面的命令:
```shell
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
```
**注意**TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。
如果需要将数据持久化到本机的某一个文件夹,则执行下边的命令:
```shell
docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \
-v ~/data/taos/dnode/log:/var/log/taos \
-p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
```
3. 确定该容器已经启动并且在正常运行。
```shell
docker ps
```
4. 进入该容器并执行 bash
```shell
docker exec -it <container name bash
```
然后就可以执行相关的 Linux 命令操作和访问 TDengine。
## 故障排查
如果启动 TDengine 服务时出现异常,请查看数据库日志以获取更多信息。你也可以参考 TDengine 的官方文档中的故障排除部分,或者在 TDengine 开源社区中寻求帮助。

View File

@ -0,0 +1,35 @@
---
sidebar_label: 云服务
title: 通过 云服务 快速体验 TDengine
toc_max_heading_level: 4
---
TDengine Cloud 作为一个全托管的时序大数据云服务平台致力于让用户迅速领略TDengine 的强大功能。该平台不仅继承了 TDengine Enterprise 的核心功能特性,还充分发挥了 TDengine 的云原生优势。TDengine Cloud 以其极致的资源弹性伸缩、高可用性、容器化部署以及按需付费等特点,灵活满足各类用户需求,为用户打造高效、可靠且经济的时序大数据处理解决方案。
TDengine Cloud 大幅减轻了用户在部署、运维等方面的人力负担同时提供了全方位的企业级服务。这些服务涵盖多角色、多层次的用户管理、数据共享功能以适应各种异构网络环境。此外TDengine Cloud 还提供私有链接服务和极简的数据备份与恢复功能,确保数据安全无忧。
对于中小型企业,选择 TDengine Cloud 不仅能有效降低成本还能享受到专业的服务支持。而对于大型企业TDengine Cloud 则可作为强大的概念验证和测试平台助力加速项目的上线周期。在项目成熟后企业还可根据自身需求选择私有化部署以实现更高级别的定制化需求。综上所述TDengine Cloud 为不同规模的企业提供了灵活、高效且经济的时序大数据解决方案。
## 新用户注册
要在 TDengine Cloud 注册新用户,请遵循以下简易步骤完成注册流程:
1. 打开浏览器,访问 TDengine Cloud 的首页https://cloud.taosdata.com在右边的“注册”部分填入自己的姓名以及企业邮箱地址点击“获取验证码”按钮。
2. 检查企业邮箱,找到主题为“你的 TDengine Cloud 注册账户验证码”的邮件。从邮件内容中复制 6 位验证码,并将其粘贴到注册页面上的“验证码”输入框中。接着,点击“注册 TDengine Cloud”按钮进入客户信息补全页面。
3. 在客户信息补全页面的“手机号”输入框中输入有效的手机号码,并点击“验证”按钮完成验证。验证通过后,设置一个符合要求的密码,然后点击“继续”按钮,将进入“创建实例”部分。
## 创建实例
要在 TDengine Cloud 中创建 TDengine 实例,只须遵循以下 3 个简单步骤。
1. 第 1 步选择公共数据库。在此步骤中TDengine Cloud 提供了可供公共访问的智能电表等数据库。通过浏览和查询这些数据库,你可以立即体验 TDengine 的各种功能和高性能。你可以根据需求在此步骤启动数据库访问,或在后续使用过程中再进行启动。若不需要此步骤,可直接点击“下一步”按钮跳过。
2. 第 2 步,创建组织。在此步骤中,请输入一个具有意义的名称,代表你的公司或组织,这将有助于你和平台更好地管理云上资源。
3. 第 3 步,创建实例。在此步骤中,你需要填写实例的区域、名称、是否选择高可用选项以及计费方案等必填信息。确认无误后,点击“创建”按钮。大约等待 1min新的TDengine 实例便会创建完成。随后,你可以在控制台中对该实例进行各种操作,如查询数据、创建订阅、创建流等。
TDengine Cloud 提供多种级别的计费方案,包括入门版、基础版、标准版、专业版和旗舰版,以满足不同客户的需求。如果你觉得现有计费方案无法满足自己的特定需求,请联系 TDengine Cloud 的客户支持团队,他们将为你量身定制计费方案。注册后,你将获得一定的免费额度,以便体验服务

View File

@ -0,0 +1,46 @@
---
sidebar_label: 快速体验
title: 快速体验写入和查询
toc_max_heading_level: 4
---
## 体验写入
taosBenchmark 是一个专为测试 TDengine 性能而设计的工具它能够全面评估TDengine 在写入、查询和订阅等方面的功能表现。该工具能够模拟大量设备产生的数据,并允许用户灵活控制数据库、超级表、标签列的数量和类型、数据列的数量和类型、子表数量、每张子表的数据量、写入数据的时间间隔、工作线程数量以及是否写入乱序数据等策略。
启动 TDengine 的服务,在终端中执行 taosBenchmark -y 命令,系统将自动在数据库 test 下创建一张名为 meters的超级表。这张超级表将包含 10 000 张子表,表名从 d0 到 d9999每张表包含 10,000条记录。每条记录包含 ts时间戳、current电流、voltage电压和 phase相位4个字段。时间戳范围从“2017-07-14 10:40:00 000”到“2017-07-14 10:40:09 999”。每张表还带有 location 和 groupId 两个标签其中groupId 设置为 1 到 10而 location 则设置为 California.Campbell、California.Cupertino 等城市信息。
执行该命令后,系统将迅速完成 1 亿条记录的写入过程。实际所需时间取决于硬件性能,但即便在普通 PC 服务器上,这个过程通常也只需要十几秒。
taosBenchmark 提供了丰富的选项,允许用户自定义测试参数,如表的数目、记录条数等。要查看详细的参数列表,请在终端中输入 taosBenchmark --help 命令。有关taosBenchmark 的详细使用方法,请参考 TDengine 的官方文档
## 体验查询
使用上述 taosBenchmark 插入数据后,可以在 TDengine CLItaos输入查询命令体验查询速度。
1. 查询超级表 meters 下的记录总条数
```shell
SELECT COUNT(*) FROM test.meters;
```
2. 查询 1 亿条记录的平均值、最大值、最小值
```shell
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
```
3. 查询 location = "California.SanFrancisco" 的记录总条数
```shell
SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco";
```
4. 查询 groupId = 10 的所有记录的平均值、最大值、最小值
```shell
SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
```
5. 对表 d1001 按每 10 秒进行平均值、最大值和最小值聚合统计
```shell
SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s);
```
在上面的查询中使用系统提供的伪列_wstart 来给出每个窗口的开始时间。

View File

@ -0,0 +1,15 @@
---
sidebar_label: 立即开始
title: 快速体验 TDengine
toc_max_heading_level: 4
---
TDengine 的安装包含括服务端taosd、应用驱动taosc、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、命令行程序CLItaos和一些工具软件。
为了适应不同用户的操作系统偏好TDengine 在 Linux 系统上提供 tar.gz 、 Deb 和 RPM 格式安装包。此外,还支持 apt-get 方式安装,这种方式简便快捷,适合熟悉 Linux 包管理的用户。
除了 Linux 平台以外TDengine 还支持在 Windows X64 平台和 macOS X64/M1 平台上安装,扩大了其适用性,满足了跨平台的需求。
对于希望进行虚拟化安装的用户TDengine 同样提供了 Docker 镜像,使得用户可以快速搭建和体验 TDengine 环境,不需要烦琐的手动配置过程。
本节将详细指导如何在 Linux 操作系统中高效地安装和启动 TDengine 3.3.0.0 版本。同时,为了迎合不同用户的多样化需求,本节还将介绍 TDengine 在 Docker 容器中的安装和启动步骤,为用户提供更多灵活性和便利性选项。

View File

@ -0,0 +1,217 @@
---
sidebar_label: 数据模型
title: TDengine 数据模型
toc_max_heading_level: 4
---
为了清晰地阐述时序数据的基本概念,并为示例程序的编写提供便利,整个文档都将以智能电表为例,探讨时序数据的典型应用场景。设想有一种型号的智能电表,它能够采集
电流、电压和相位这 3 个模拟量。此外,每块智能电表还具有位置和分组等静态属性。这些智能电表采集的数据示例如下表所示。
|Device ID| Timestamp | Current | Voltage | Phase | Location | Group ID |
|:-------:|:---------:|:-------:|:-------:|:-----:|:--------:|:--------:|
|d1001 |1538548685000 | 10.3 | 219 | 0.31 | California.SanFrancisco |2|
|d1002 | 1538548684000 | 10.2 | 220 | 0.23 | California.SanFrancisco |3|
|d1003 | 1538548686500 | 11.5 | 221 | 0.35 | California.LosAngeles | 3 |
|d1004 | 1538548685500 | 13.4 | 223 | 0.29 | California.LosAngeles | 2 |
|d1001 | 1538548695000 | 12.6 | 218 | 0.33 | California.SanFrancisco |2|
|d1004 | 1538548696600 | 11.8 | 221 | 0.28 | California.LosAngeles | 2 |
|d1002 | 1538548696650 | 10.3 | 218 | 0.25 | California.SanFrancisco | 3 |
|d1001 | 1538548696800 | 12.3 | 221 | 0.31 | California.SanFrancisco | 2 |
上表详细展示了各设备 IDDevice ID对应的智能电表在特定时刻采集的物理量数据涵盖电流current、电压voltage和相位phase等重要信息。除了动态采集的数据以外每块智能电表还配备了一组静态标签tag例如位置location和分组 IDGroup ID等。这些设备能够根据外部触发事件或预设的周期进行数据采集确保数据的连续性和时序性从而构成一个持续更新的数据流。
## 基本概念
### 采集量
采集量是指通过各种传感器、设备或其他类型的采集点所获取的物理量如电流、电压、温度、压力、GPS 等。由于这些物理量随时间不断变化,因此采集的数据类型多
样,包括整型、浮点型、布尔型以及字符串等。随着时间的积累,存储的数据将持续增长。以智能电表为例,其中的 current电流、voltage电压和 phase相位便是典型的采集量。
### 标签
标签是指附着在传感器、设备或其他类型采集点上的静态属性这些属性不会随时间发生变化例如设备型号、颜色、设备所在地等。标签的数据类型可以是任意类型。尽管标签本身是静态的但在实际应用中用户可能需要对标签进行修改、删除或添加。与采集量不同随着时间的推移存储的标签数据量保持相对稳定不会呈现明显的增长趋势。在智能电表的示例中location位置和 Group ID分组 ID就是典型的标签。
### 数据采集点
数据采集点是指在一定的预设时间周期内或受到特定事件触发时,负责采集物理量的硬件或软件设备。一个数据采集点可以同时采集一个或多个采集量,但这些采集量都是在同一时刻获取的,并拥有相同的时间戳。对于结构复杂的设备,通常会有多个数据采集点,每个数据采集点的采集周期可能各不相同,它们之间完全独立,互不干扰。以一辆汽车为例,可能有专门的数据采集点用于采集 GPS有的数据采集点负责监控发动机状态还有的数据采集点则专注于车内环境的监测。这样一辆汽车就包含了 3个不同类型的数据采集点。在智能电表的示例中d1001、d1002、d1003、d1004 等标识符即代表了不同的数据采集点。
### 表
鉴于采集的数据通常是结构化数据为了降低用户的学习难度TDengine 采用传统的关系型数据库模型来管理数据。同时为了充分发挥时序数据的特性TDengine 采取了“一个数据采集点一张表”的设计策略,即要求为每个数据采集点单独建立一张表。例如,若有千万块智能电表,则在 TDengine 中需要创建相应数量的表。在智能电表的示例数据中,设备 ID 为 d1001 的智能电表对应着 TDengine 中的一张表,该电表采集的所有时序数据均存储于此表中。这种设计方式既保留了关系型数据库的易用性,又充分利用了时序数据的独特优势。
“一个数据采集点一张表”的设计有几大优点:
1. 由于不同数据采集点产生数据的过程完全独立,每个数据采集点的数据源是唯一的,一张表也就只有一个写入者,这样就可采用无锁方式来写数据,写入速度能大幅提升。
2. 对于一个数据采集点而言,其产生的数据是按照时间递增的,因此写的操作可用追加的方式实现,进一步大幅提高数据写入速度。
3. 一个数据采集点的数据是以块为单位连续存储的。这样,每次读取一个时间段的数据,能大幅减少随机读取操作,成数量级的提升读取和查询速度。
4. 一个数据块内部,采用列式存储,对于不同的数据类型,可以采用不同压缩算法来提高压缩率。并且,由于采集量的变化通常是缓慢的,压缩率会更高。
如果采用传统的方式,将多个数据采集点的数据写入一张表,由于网络延时不可控,不同数据采集点的数据到达服务器的时序是无法保证的,写入操作是要有锁保护的,而且一个数据采集点的数据是难以保证连续存储在一起的。采用一个数据采集点一张表的方式,能最大程度的保证单个数据采集点的插入和查询的性能是最优的,,而且数据压缩率最高。
在 TDengine 中通常使用数据采集点的名称d1001来做表名每个数据采集点可以有多个采集量current、voltage、phase 等),每个采集量对应一张表的一列。采集量的数据类型可以是整型、浮点型、字符串等。
此外,表的第一列必须是时间戳,即数据类型为 Timestamp。对于每个采集量TDengine 将使用第一列时间戳建立索引,采用列式存储。对于复杂的设备,比如汽车,它有多个数据采集点,则需要为一辆汽车建立多张表。
### 超级表
采用“一个数据采集点一张表”的设计虽然有助于针对性地管理每个采集点,但随着设备数量不断增加表的数量也会急剧增加,这给数据库管理和数据分析带来了挑战。在进行跨数据采集点的聚合操作时,用户需要面对大量的表,工作变得异常繁重。
为了解决这个问题TDengine 引入超级表Super Table简称为 STable的概念。超级表是一种数据结构它能够将某一特定类型的数据采集点聚集在一起形成一张逻辑上的统一表。这些数据采集点具有相同的表结构但各自的静态属性如标签可能不同。创建超级表时除了定义采集量以外还需定义超级表的标签。一张超级表至少包含一个时间戳列、一个或多个采集量列以及一个或多个标签列。此外超级表的标签可以灵活地进行增加、修改或删除操作。
在 TDengine 中,表代表具体的数据采集点,而超级表则代表一组具有相同属性的数据采集点集合。以智能电表为例,我们可以为该类型的电表创建一张超级表,其中包含了所有智能电表的共有属性和采集量。这种设计不仅简化了表的管理,还便于进行跨数据采集点的聚合操作,从而提高数据处理的效率。
### 子表
子表是数据采集点在逻辑上的一种抽象表示,它是隶属于某张超级表的具体表。用户可以将超级表的定义作为模板,并通过指定子表的标签值来创建子表。这样,通过超级表生成的表便被称为子表。超级表与子表之间的关系主要体现在以下几个方面。
- 一张超级表包含多张子表,这些子表具有相同的表结构,但标签值各异。
- 子表的表结构不能直接修改,但可以修改超级表的列和标签,且修改对所有子表立即生效。
- 超级表定义了一个模板,自身并不存储任何数据或标签信息。
在 TDengine 中查询操作既可以在子表上进行也可以在超级表上进行。针对超级表的查询TDengine 将所有子表中的数据视为一个整体首先通过标签筛选出满足查询条件的表然后在这些子表上分别查询时序数据最终将各张子表的查询结果合并。本质上TDengine 通过对超级表查询的支持,实现了多个同类数据采集点的高效聚合。为了更好地理解采集量、标签、超级表与子表之间的关系,这里以智能电表的数据模型为例进行说明。可以参考图 3-1 的数据模型,以便更直观地了解这些概念。
为了更好地理解采集量、标签、超级与子表的关系,以智能电表为例,可以参考下图
![数据模型示意图](./data-model.png)
### 库
库是 TDengine 中用于管理一组表的集合。TDengine 允许一个运行实例包含多个库,并且每个库都可以配置不同的存储策略。由于不同类型的数据采集点通常具有不同的数据特征,如数据采集频率、数据保留期限、副本数量、数据块大小等。为了在各种场景下确保 TDengine 能够发挥最大效率,建议将具有不同数据特征的超级表创建在不同的库中。
在一个库中,可以包含一到多张超级表,但每张超级表只能属于一个库。同时,一张超级表所拥有的所有子表也都将存储在该库中。这种设计有助于实现更细粒度的数据管理和优化,确保 TDengine 能够根据不同数据特征提供最佳的处理性能。
### 时间戳
时间戳在时序数据处理中扮演着至关重要的角色,特别是在应用程序需要从多个不同时区访问数据库时,这一问题变得更加复杂。在深入了解 TDengine 如何处理时间戳与时区之前,我们先介绍以下几个基本概念。
- 本地日期时间:指特定地区的当地时间,通常表示为 yyyy-MM-dd hh:mm:ss.SSS 格 式 的 字 符 串。 这 种 时 间 表 示 不 包 含 任 何 时 区 信 息, 如“2021-07-21 12:00:00.000”。
- 时区地球上不同地理位置的标准时间。协调世界时Universal Time CoordinatedUTC或格林尼治时间是国际时间标准其他时区通常表示为相对于 UTC 的偏移量如“UTC+8”代表东八区时间。 UTC 时间戳:表示自 UNIX 纪 元(即 UTC 时 间 1970 年 1 月 1 日 0 点) 起 经 过的毫秒数。例如“1700000000000”对应的日期时间是“2023-11-14 22:13:20UTC+0”。 在 TDengine 中保存时序数据时,实际上保存的是 UTC 时间戳。TDengine 在写入数据时,时间戳的处理分为如下两种情况。
- RFC-3339 格式当使用这种格式时TDengine 能够正确解析带有时区信息的时间字符串为 UTC 时间戳。例如“2018-10-03T14:38:05.000+08:00”会被转换为UTC 时间戳。
- 非 RFC-3339 格式如果时间字符串不包含时区信息TDengine 将使用应用程序所在的时区设置自动将时间转换为 UTC 时间戳。
在查询数据时TDengine 客户端会根据应用程序当前的时区设置自动将保存的UTC 时间戳转换成本地时间进行显示,确保用户在不同时区下都能看到正确的时间信息。
## 数据建模
本节用智能电表做例子,简要的介绍如何在 TDengine 里使用 SQL 创建数据库、超级表、表的基本操作。
### 创建数据库
创建一个数据库以存储电表数据的 SQL 如下:
```sql
CREATE DATABASE power PRECISION 'ms' KEEP 3650 DURATION 10 BUFFER 16;
```
该 SQL 将创建一个名为 `power` 的数据库,各参数说明如下:
- `PRECISION 'ms'` 这个数据库的时序数据使用毫秒ms精度的时间戳
- `KEEP 365`:这个库的数据将保留 3650 天,超过 3650 天的数据将被自动删除
- `DURATION 10` :每 10 天的数据放在一个数据文件中
- `BUFFER 16` :写入使用大小为 16MB 的内存池。
在创建power数据库后可以执行 USE 语句来使用切换数据库。
```sql
use power;
```
该 SQL 将当前数据库切换为 `power`,表示之后的插入、查询等操作,都在当前的 `power` 数据库中进行。
### 创建超级表
创建一张名为 `meters` 的超级表的 SQL 如下:
```sql
CREATE STABLE meters (
ts timestamp,
current float,
voltage int,
phase float
) TAGS (
location varchar(64),
group_id int
);
```
在 TDengine 中,创建超级表的 SQL 语句与关系型数据库类似。例如,上面的 SQL 中,`CREATE STABLE` 为关键字,表示创建超级表;接着,`meters` 是超级表的名称;在表名后面的括号中,定义超级表的列(列名、数据类型等),规则如下:
1. 第 1 列必须为时间戳列。例如:`ts timestamp` 表示,时间戳列名是 `t`s数据类型为 `timestamp`
2. 从第 2 列开始是采集量列。采集量的数据类型可以为整型、浮点型、字符串等。例如:`current float` 表示,采集量电流 `current`,数据类型为 `float`
最后TAGS是关键字表示标签在 TAGS 后面的括号中,定义超级表的标签(标签名、数据类型等)。
1. 标签的数据类型可以为整型、浮点型、字符串等。例如:`location varchar(64)` 表示,标签地区 `location`,数据类型为 `varchar(64)`
2. 标签的名称不能与采集量列的名称相同。
### 创建表
通过超级表创建子表 `d1001` 的 SQL 如下:
```sql
CREATE TABLE d1001
USING meters (
location,
group_id
) TAGS (
"California.SanFrancisco",
2
);
```
上面的 SQL 中,`CREATE TABLE` 为关键字,表示创建表;`d1001` 是子表的名称;`USING` 是关键字,表示要使用超级表作为模版;`meters` 是超级表的名称;在超级表名后的括号中,`location`, `group_id` 表示,是超级表的标签列名列表;`TAGS` 是关键字,在后面的括号中指定子表的标签列的值。`"California.SanFrancisco"` 和 `2` 表示子表 `d1001` 的位置为 `California.SanFrancisco`,分组 ID 为 `2`
当对超级表进行写入或查询操作时,用户可以使用伪列 tbname 来指定或输出对应操作的子表名。
### 自动建表
在 TDengine 中,为了简化用户操作并确保数据的顺利写入,即使子表尚不存在,用户也可以使用带有 using 关键字的自动建表 SQL 进行数据写入。这种机制允许系统在遇到不存在的子表时,自动创建该子表,然后再执行数据写入操作。如果子表已经存在,系统则会直接写入数据,不需要任何额外的步骤。
在写入数据的同时自动建表的 SQL 如下:
```sql
INSERT INTO d1002
USING meters
TAGS (
"California.SanFrancisco",
2
) VALUES (
NOW,
10.2,
219,
0.32
);
```
上面的 SQL 中,`INSERT INTO d1002` 表示,向子表 `d1002` 中写入数据;`USING meters` 表示,使用超级表 `meters` 作为模版;`TAGS ("California.SanFrancisco", 2)` 表示,子表 `d1002` 的标签值分别为 `California.SanFrancisco``2``VALUES (NOW, 10.2, 219, 0.32)` 表示,向子表 `d1002` 插入一行记录值分别为NOW当前时间戳、10.2电流、219电压、0.32(相位)。在 TDengine 执行这条 SQL 时,如果子表 `d1002` 已经存在,则直接写入数据;当子表 `d1002` 不存在,会先自动创建子表,再写入数据。
### 创建普通表
在 TDengine 中,除了具有标签的子表以外,还存在一种不带任何标签的普通表。这类表与普通关系型数据库中的表相似,用户可以使用 SQL 创建它们。
普通表与子表的区别在于:
1. 标签扩展性:子表在普通表的基础上增加了静态标签,这使得子表能够携带更多的元数据信息。此外,子表的标签是可变的,用户可以根据需要增加、删除或修改标签。
2. 表归属:子表总是隶属于某张超级表,它们是超级表的一部分。而普通表则独立存在,不属于任何超级表。
3. 转换限制:在 TDengine 中,普通表无法直接转换为子表,同样,子表也无法转换为普通表。这两种表类型在创建时就确定了它们的结构和属性,后期无法更改。
总结来说,普通表提供了类似于传统关系型数据库的表功能,而子表则通过引入标签机制,为时序数据提供了更丰富的描述能力和更灵活的管理方式。用户可以根据实际需求选择创建普通表还是子表。
创建不带任何标签的普通表的 SQL 如下:
```sql
CREATE TABLE d1003(
ts timestamp,
current float,
voltage int,
phase float,
location varchar(64),
group_id int
);
```
上面的 SQL 表示,创建普通表 `d1003` ,表结构包括 `ts`、`current`、`voltage`、`phase`、`location`、`group_id`,共 6 个列。这样的数据模型,与关系型数据库完全一致。
采用普通表作为数据模型意味着静态标签数据(如 location 和 group_id会重复存储在表的每一行中。这种做法不仅增加了存储空间的消耗而且在进行查询时由于无法直接利用标签数据进行过滤查询性能会显著低于使用超级表的数据模型。
### 多列模型 VS 单列模型
TDengine 支持灵活的数据模型设计,包括多列模型和单列模型。多列模型允许将多个由同一数据采集点同时采集且时间戳一致的物理量作为不同列存储在同一张超级表中。然而,在某些极端情况下,可能会采用单列模型,即每个采集的物理量都单独建立一张表。例如,对于电流、电压和相位这 3 种物理量,可能会分别建立 3 张超级表。
尽管 TDengine 推荐使用多列模型,因为这种模型在写入效率和存储效率方面通常更优,但在某些特定场景下,单列模型可能更为适用。例如,当一个数据采集点的采集量种类经常发生变化时,如果采用多列模型,就需要频繁修改超级表的结构定义,这会增加应用程序的复杂性。在这种情况下,采用单列模型可以简化应用程序的设计和管理,因为它允许独立地管理和扩展每个物理量的超级表。
总之TDengine 提供了灵活的数据模型选项,用户可以根据实际需求和场景选择最适合的模型,以优化性能和管理复杂性。

View File

@ -0,0 +1,135 @@
---
sidebar_label: 数据写入
title: TDengine 数据写入、更新与删除
toc_max_heading_level: 4
---
本章以智能电表的数据模型为例介绍如何在 TDengine 中使用 SQL 来写入、更新、删除时序数据。
## 写入
在 TDengine 中,用户可以使用 SQL 的 insert 语句写入时序数据。
### 一次写入一条
假设设备 ID 为 d1001 的智能电表在 2018 年 10 月 3 日 14:38:05 采集到数据电流10.3A,电压 219V相位 0.31。在第 3 章中,我们已经在 TDengine 的 power 数据库中创建了属于超级表 meters 的子表 d1001。接下来可以通过下面的 insert 语句在子表 d1001 中写入时序数据。
1. 可以通过下面的 INSERT 语句向子表d1001中写入时序数据。
```sql
insert into d1001 (ts, current, voltage, phase) values ( "2018-10-03 14:38:05", 10.3, 219, 0.31)
```
上面的 SQL 向子表 `d1001``ts`, `current`, `voltage`, `phase` 这 4 列分别写入 `2018-10-03 14:38:05`, `10.3`, `219`, `0.31`
2. 当 `INSERT` 语句中的 `VALUES` 部分包含了表的所有列时,可以省略 `VALUES` 前的字段列表,如下面的 SQL 语句所示,其与前面指定列的 INSERT 语句,效果完全一样。
```sql
insert into d1001 values("2018-10-03 14:38:05", 10.3, 219, 0.31)
```
3. 对于表的时间戳列(第一列),也可以直接使用数据库精度的时间戳。
```sql
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31);
```
以上三种 SQL 的效果完全相同。
### 一次写入多条
假设设备 ID 为 d1001 的智能电表每 10s 采集一次数据,每 30s 上报一次数据,即每 30s 需要写入 3 条数据。用户可以在一条 insert 语句中写入多条记录。如下 SQL 一共写入了 3 条数据。
```sql
insert into d1001 values
( "2018-10-03 14:38:05", 10.2, 220, 0.23),
( "2018-10-03 14:38:15", 12.6, 218, 0.33),
( "2018-10-03 14:38:25", 12.3, 221, 0.31)
```
上面的 SQL 一共写入了三条数据。
### 一次写入多表
假设设备 ID 为 d1001、d1002、d1003 的 3 台智能电表,都是每 30 秒需要写入 3 条数据。对于这种情况TDengine 支持一次向多个表写入多条数据。
```sql
INSERT INTO d1001 VALUES
("2018-10-03 14:38:05", 10.2, 220, 0.23),
("2018-10-03 14:38:15", 12.6, 218, 0.33),
("2018-10-03 14:38:25", 12.3, 221, 0.31)
d1002 VALUES
("2018-10-03 14:38:04", 10.2, 220, 0.23),
("2018-10-03 14:38:14", 10.3, 218, 0.25),
("2018-10-03 14:38:24", 10.1, 220, 0.22)
d1003 VALUES
("2018-10-03 14:38:06", 11.5, 221, 0.35),
("2018-10-03 14:38:16", 10.4, 220, 0.36),
("2018-10-03 14:38:26", 10.3, 220, 0.33)
;
```
上面的 SQL 一共写入了九条数据。
### 指定列写入
可以通过指定列向表的部分列写入数据。SQL 中没有出现的列数据库将自动填充为空值NULL。注意时间戳列必须存在且值不能为空。如下 SQL 向子表 d1004 写入了一条数据。这条数据只包含电压和相位,电流值为 NULL。
```sql
insert into d1004 (ts, voltage, phase) values("2018-10-04 14:38:06", 223, 0.29)
```
### 写入时自动建表
用户可以使用带有 using 关键字的自动建表语句进行写入。当子表不存在时,先触发自动建表,再写入数据;当子表已经存在时,则直接写入。使用自动建表的 insert 语句也可以通过指定部分标签列进行写入未被指定的标签列的值为空值NULL。如下 SQL 写入一条数据。当子表 d1005 不存在时,先自动建表,标签 group_id 的值为 NULL再写入数据
```sql
insert into d1005
using meters (location)
tags ( "beijing.chaoyang")
values ( "2018-10-04 14:38:07", 10.15, 217, 0.33)
```
自动建表的 insert 语句也支持在一条语句中向多张表写入数据。如下 SQL 使用自动建表的 insert 语句共写入 9 条数据。
```sql
INSERT INTO d1001 USING meters TAGS ("California.SanFrancisco", 2) VALUES
("2018-10-03 14:38:05", 10.2, 220, 0.23),
("2018-10-03 14:38:15", 12.6, 218, 0.33),
("2018-10-03 14:38:25", 12.3, 221, 0.31)
d1002 USING meters TAGS ("California.SanFrancisco", 3) VALUES
("2018-10-03 14:38:04", 10.2, 220, 0.23),
("2018-10-03 14:38:14", 10.3, 218, 0.25),
("2018-10-03 14:38:24", 10.1, 220, 0.22)
d1003 USING meters TAGS ("California.LosAngeles", 2) VALUES
("2018-10-03 14:38:06", 11.5, 221, 0.35),
("2018-10-03 14:38:16", 10.4, 220, 0.36),
("2018-10-03 14:38:26", 10.3, 220, 0.33)
;
```
### 通过超级表写入
TDengine 还支持直接向超级表写入数据。需要注意的是,超级表是一个模板,本身不存储数据,写入的数据是存储在对应的子表中。如下 SQL 通过指定 tbname 列向子表d1001 写入一条数据。
```sql
insert into meters (tbname, ts, current, voltage, phase, location, group_id)
values( "d1001v, "2018-10-03 14:38:05", 10.2, 220, 0.23, "California.SanFrancisco", 2)
```
### 零代码写入
为了方便用户轻松写入数据TDengine 已与众多知名第三方工具实现无缝集成,包括 Telegraf、Prometheus、EMQX、StatsD、collectd 和 HiveMQ 等。用户只须对这些工具进行简单的配置,便可轻松将数据导入 TDengine。此外TDengine Enterprise 还提供了丰富的连接器,如 MQTT、OPC、AVEVA PI System、Wonderware、Kafka、MySQL、Oracle 等。通过在 TDengine 端配置相应的连接信息,用户无须编写任何代码,即可高效地将来自不同数据源的数据写入 TDengine。
## 更新
可以通过写入重复时间戳的一条数据来更新时序数据,新写入的数据会替换旧值。 下面的 SQL通过指定列的方式向子表 `d1001` 中写入 1 行数据;当子表 `d1001` 中已经存在日期时间为 `2018-10-03 14:38:05` 的数据时,`current`电流的新值22会替换旧值。
```sql
INSERT INTO d1001 (ts, current) VALUES ("2018-10-03 14:38:05", 22);
```
## 删除
为方便用户清理由于设备故障等原因产生的异常数据TDengine 支持根据时间戳删除时序数据。 下面的 SQL将超级表 `meters` 中所有时间戳早于 `2021-10-01 10:40:00.100` 的数据删除。数据删除后不可恢复,请慎重使用。为了确保删除的数据确实是自己要删除的,建议可以先使用 select 语句加 where 后的删除条件查看要删除的数据内容,确认无误后再执行 delete 。
```sql
delete from meters where ts < '2021-10-01 10:40:00.100' ;
```

Some files were not shown because too many files have changed in this diff Show More