Merge pull request #266 from StoneT2000/master
Docs Update and Node.js Connector updated to 1.3.0 - Subscription, Continuous Query
This commit is contained in:
commit
8e665db561
|
@ -191,7 +191,7 @@ public Connection getConn() throws Exception{
|
|||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIMEZONE, "UTC-8");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
Connection conn = DriverManager.getConnection(jdbcUrl, connProps);
|
||||
return conn;
|
||||
}
|
||||
|
@ -484,13 +484,13 @@ promise.then(function(result) {
|
|||
```
|
||||
#### Async functionality
|
||||
|
||||
Async queries can be performed using the same functions such as `cursor.execute`, `cursor.query`, but now with `_a` appended to them.
|
||||
Async queries can be performed using the same functions such as `cursor.execute`, `TaosQuery.execute`, but now with `_a` appended to them.
|
||||
|
||||
Say you want to execute an two async query on two seperate tables, using `cursor.query_a`, you can do that and get a TaosQuery object, which upon executing with the `execute_a` function, returns a promise that resolves with a TaosResult object.
|
||||
Say you want to execute an two async query on two seperate tables, using `cursor.query`, you can do that and get a TaosQuery object, which upon executing with the `execute_a` function, returns a promise that resolves with a TaosResult object.
|
||||
|
||||
```javascript
|
||||
var promise1 = cursor.query_a('select count(*), avg(v1), avg(v2) from meter1;').execute_a()
|
||||
var promise2 = cursor.query_a('select count(*), avg(v1), avg(v2) from meter2;').execute_a();
|
||||
var promise1 = cursor.query('select count(*), avg(v1), avg(v2) from meter1;').execute_a()
|
||||
var promise2 = cursor.query('select count(*), avg(v1), avg(v2) from meter2;').execute_a();
|
||||
promise1.then(function(result) {
|
||||
result.pretty();
|
||||
})
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
### A Typical IoT Scenario
|
||||
|
||||
In a typical IoT scenario, there are many types of devices. Each device is collecting one or multiple metrics. For a specific type of device, the collected data looks like the table below:
|
||||
In a typical IoT scenario, there are many types of devices. Each device is collecting one or multiple metrics. For a specific type of device, the collected data could look like the table below:
|
||||
|
||||
| Device ID | Time Stamp | Value 1 | Value 2 | Value 3 | Tag 1 | Tag 2 |
|
||||
| :-------: | :-----------: | :-----: | :-----: | :-----: | :---: | :---: |
|
||||
|
@ -14,46 +14,46 @@ In a typical IoT scenario, there are many types of devices. Each device is colle
|
|||
| D1001 | 1538548695000 | 12.6 | 218 | 0.33 | Red | Tesla |
|
||||
| D1004 | 1538548696600 | 11.8 | 221 | 0.28 | Black | Honda |
|
||||
|
||||
Each data record has device ID, timestamp, the collected metrics, and static tags associated with the device. Each device generates a data record in a pre-defined timer or triggered by an event. It is a sequence of data points, like a stream.
|
||||
Each data record contains the device ID, timestamp, collected metrics, and static tags associated with the device. Each device generates a data record in a pre-defined timer or triggered by an event. It is a sequence of data points like a stream.
|
||||
|
||||
### Data Characteristics
|
||||
|
||||
Being a series of data points over time, data points generated by devices, sensors, servers, or applications have strong common characteristics.
|
||||
As the data points are a series of data points over time, the data points generated by devices, sensors, servers, and/or applications have some strong common characteristics:
|
||||
|
||||
1. metric is always structured data;
|
||||
1. metrics are always structured data;
|
||||
2. there are rarely delete/update operations on collected data;
|
||||
3. there is only one single data source for one device or sensor;
|
||||
4. ratio of read/write is much lower than typical Internet application;
|
||||
5. the user pays attention to the trend of data, not the specific value at a specific time;
|
||||
4. ratio of read/write is much lower than typical Internet applications;
|
||||
5. the user pays attention to the trend of data, not a specific value at a specific time;
|
||||
6. there is always a data retention policy;
|
||||
7. the data query is always executed in a given time range and a subset of devices;
|
||||
8. real-time aggregation or analytics is mandatory;
|
||||
9. traffic is predictable based on the number of devices and sampling frequency;
|
||||
10. data volume is huge, a system may generate 10 billion data points in a day.
|
||||
|
||||
By utilizing the above characteristics, TDengine designs the storage and computing engine in a special and optimized way for time-series data. The system efficiency is improved significantly.
|
||||
By utilizing the above characteristics, TDengine designs the storage and computing engine in a special and optimized way for time-series data, resulting in massive improvements in system efficiency.
|
||||
|
||||
### Relational Database Model
|
||||
|
||||
Since time-series data is more likely to be structured data, TDengine adopts the traditional relational database model to process them. You need to create a database, create tables with schema definition, then insert data points and execute queries to explore the data. Standard SQL is used, there is no learning curve.
|
||||
Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them. You need to create a database, create tables with schema definitions, then insert data points and execute queries to explore the data. Standard SQL is used, making it easy for anyone to get started and eliminating any learning curve.
|
||||
|
||||
### One Table for One Device
|
||||
|
||||
Due to different network latency, the data points from different devices may arrive at the server out of order. But for the same device, data points will arrive at the server in order if system is designed well. To utilize this special feature, TDengine requires the user to create a table for each device (time-stream). For example, if there are over 10,000 smart meters, 10,000 tables shall be created. For the table above, 4 tables shall be created for device D1001, D1002, D1003 and D1004, to store the data collected.
|
||||
Due to different network latencies, the data points from different devices may arrive to the server out of order. But for the same device, data points will arrive to the server in order if the system is designed well. To utilize this special feature, TDengine requires the user to create a table for each device (time-stream). For example, if there are over 10,000 smart meters, 10,000 tables shall be created. For the table above, 4 tables shall be created for device D1001, D1002, D1003, and D1004 to store the data collected.
|
||||
|
||||
This strong requirement can guarantee the data points from a device can be saved in a continuous memory/hard disk space block by block. If queries are applied only on one device in a time range, this design will reduce the read latency significantly since a whole block is owned by one single device. Also, write latency can be significantly reduced too, since the data points generated by the same device will arrive in order, the new data point will be simply appended to a block. Cache block size and the rows of records in a file block can be configured to fit the scenarios.
|
||||
This strong requirement can guarantee that all data points from a device can be saved in a continuous memory/hard disk space block by block. If queries are applied only on one device in a time range, this design will reduce the read latency significantly since a whole block is owned by one single device. Additionally, write latency can be significantly reduced too as the data points generated by the same device will arrive in order, the new data point will be simply appended to a block. Cache block size and the rows of records in a file block can be configured to fit different scenarios for optimal efficiency.
|
||||
|
||||
### Best Practices
|
||||
|
||||
**Table**: TDengine suggests to use device ID as the table name (like D1001 in the above diagram). Each device may collect one or more metrics (like value1, valu2, valu3 in the diagram). Each metric has a column in the table, the metric name can be used as the column name. The data type for a column can be int, float, double, tinyint, bigint, bool or binary. Sometimes, a device may have multiple metric group, each group have different sampling period, you shall create a table for each group for each device. The first column in the table must be time stamp. TDengine uses time stamp as the index, and won’t build the index on any metrics stored.
|
||||
**Table**: TDengine suggests to use device ID as the table name (like D1001 in the above diagram). Each device may collect one or more metrics (like value1, value2, value3 in the diagram). Each metric has a column in the table, the metric name can be used as the column name. The data type for a column can be int, float, double, tinyint, bigint, bool or binary. Sometimes, a device may have multiple metric groups, each group containing different sampling periods, so for best practice you should create a table for each group for each device. The first column in the table must be a time stamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored.
|
||||
|
||||
**Tags:** to support aggregation over multiple tables efficiently, [STable(Super Table)](../super-table) concept is introduced by TDengine. A STable is used to represent the same type of device. The schema is used to define the collected metrics(like value1, value2, value3 in the diagram), and tags are used to define the static attributes for each table or device(like tag1, tag2 in the diagram). A table is created via STable with a specific tag value. All or a subset of tables in a STable can be aggregated by filtering tag values.
|
||||
**Tags:** To support aggregation over multiple tables efficiently, the [STable(Super Table)](../super-table) concept is introduced by TDengine. A STable is used to represent the same type of device. The schema is used to define the collected metrics (like value1, value2, value3 in the diagram), and tags are used to define the static attributes for each table or device (like tag1, tag2 in the diagram). A table is created via STable with a specific tag value. All or a subset of tables in a STable can be aggregated by filtering tag values.
|
||||
|
||||
**Database:** different types of devices may generate data points in different patterns and shall be processed differently. For example, sampling frequency, data retention policy, replication number, cache size, record size, the compression algorithm may be different. To make the system more efficient, TDengine suggests creating a different database with unique configurations for different scenarios
|
||||
**Database:** Different types of devices may generate data points in different patterns and should be processed differently. For example, sampling frequency, data retention policy, replication number, cache size, record size, the compression algorithm may be different. To make the system more efficient, TDengine suggests creating a different database with unique configurations for different scenarios.
|
||||
|
||||
**Schemaless vs Schema:** compared with NoSQL database, since a table with schema definition shall be created before the data points can be inserted, flexibilities are not that good, especially when the schema is changed. But in most IoT scenarios, the schema is well defined and is rarely changed, the loss of flexibilities won’t be a big pain to developers or the administrator. TDengine allows the application to change the schema in a second even there is a huge amount of historical data when schema has to be changed.
|
||||
**Schemaless vs Schema:** Compared with NoSQL databases, since a table with schema definitions must be created before the data points can be inserted, flexibilities are not that good, especially when the schema is changed. But in most IoT scenarios, the schema is well defined and is rarely changed, the loss of flexibility won't pose any impact to developers or administrators. TDengine allows the application to change the schema in a second even there is a huge amount of historical data when schema has to be changed.
|
||||
|
||||
TDengine does not impose a limitation on the number of tables, [STables](../super-table), or databases. You can create any number of STable or databases to fit the scenarios.
|
||||
TDengine does not impose a limitation on the number of tables, [STables](../super-table), or databases. You can create any number of STable or databases to fit different scenarios.
|
||||
|
||||
## Architecture
|
||||
|
||||
|
@ -62,12 +62,12 @@ There are two main modules in TDengine server as shown in Picture 1: **Managemen
|
|||
<center> <img src="../assets/structure.png"> </center>
|
||||
<center> Picture 1 TDengine Architecture </center>
|
||||
### MGMT Module
|
||||
The MGMT module deals with the storage and querying on metadata, which includes information about users, databases, and tables. Applications will connect to the MGMT module at first when connecting the TDengine server. When creating/dropping databases/tables, The request is sent to the MGMT module at first to create/delete metadata. Then the MGMT module will send requests to the data module to allocate/free resources required. In the case of writing or querying, applications still need to visit MGMT module to get meta data, according to which, then access the DNODE module.
|
||||
The MGMT module deals with the storage and querying on metadata, which includes information about users, databases, and tables. Applications will connect to the MGMT module at first when connecting the TDengine server. When creating/dropping databases/tables, The request is sent to the MGMT module at first to create/delete metadata. Then the MGMT module will send requests to the data module to allocate/free resources required. In the case of writing or querying, applications still need to visit the MGMT module to get meta data, according to which, then access the DNODE module.
|
||||
|
||||
### DNODE Module
|
||||
The DNODE module is responsible for storing and querying data. For the sake of future scaling and high-efficient resource usage, TDengine applies virtualization on resources it uses. TDengine introduces the concept of virtual node (vnode), which is the unit of storage, resource allocation and data replication (enterprise edition). As is shown in Picture 2, TDengine treats each data node as an aggregation of vnodes.
|
||||
The DNODE module is responsible for storing and querying data. For the sake of future scaling and high-efficient resource usage, TDengine applies virtualization on resources it uses. TDengine introduces the concept of a virtual node (vnode), which is the unit of storage, resource allocation and data replication (enterprise edition). As is shown in Picture 2, TDengine treats each data node as an aggregation of vnodes.
|
||||
|
||||
When a DB is created, the system will allocate a vnode. Each vnode contains multiple tables, but a table belongs to only one vnode. Each DB has one or mode vnodes, but one vnode belongs to only one DB. Each vnode contains all the data in a set of tables. Vnodes have their own cache, directory to store data. Resources between different vnodes are exclusive with each other, no matter cache or file directory. However, resources in the same vnode are shared between all the tables in it. By virtualization, TDengine can distribute resources reasonably to each vnode and improve resource usage and concurrency. The number of vnodes on a dnode is configurable according to its hardware resources.
|
||||
When a DB is created, the system will allocate a vnode. Each vnode contains multiple tables, but a table belongs to only one vnode. Each DB has one or mode vnodes, but one vnode belongs to only one DB. Each vnode contains all the data in a set of tables. Vnodes have their own cache and directory to store data. Resources between different vnodes are exclusive with each other, no matter cache or file directory. However, resources in the same vnode are shared between all the tables in it. Through virtualization, TDengine can distribute resources reasonably to each vnode and improve resource usage and concurrency. The number of vnodes on a dnode is configurable according to its hardware resources.
|
||||
|
||||
<center> <img src="../assets/vnode.png"> </center>
|
||||
<center> Picture 2 TDengine Virtualization </center>
|
||||
|
@ -75,10 +75,10 @@ When a DB is created, the system will allocate a vnode. Each vnode contains mult
|
|||
### Client Module
|
||||
TDengine client module accepts requests (mainly in SQL form) from applications and converts the requests to internal representations and sends to the server side. TDengine supports multiple interfaces, which are all built on top of TDengine client module.
|
||||
|
||||
For the communication between client and MGMT module, TCP/UDP is used, the port is set by the parameter mgmtShellPort in system configuration file taos.cfg, default is 6030. For the communication between client and DNODE module, TCP/UDP is used, the port is set by the parameter vnodeShellPort in the system configuration file, default is 6035.
|
||||
For the communication between client and MGMT module, TCP/UDP is used, the port is set by the parameter `mgmtShellPort` in system configuration file `taos.cfg`, default is 6030. For communication between the client and the DNODE module, TCP/UDP is used, the port is set by the parameter `vnodeShellPort` in the system configuration file, default is 6035.
|
||||
|
||||
## Writing Process
|
||||
Picture 3 shows the full writing process of TDengine. TDengine uses [Writing Ahead Log] (WAL) strategy to assure data security and integrity. Data received from the client is written to the commit log at first. When TDengine recovers from crashes caused by power lose or other situations, the commit log is used to recover data. After writting to commit log, data will be wrtten to the corresponding vnode cache, then an acknowledgment is sent to the application. There are two mechanisms that can flush data in cache to disk for persistent storage:
|
||||
Picture 3 shows the full writing process of TDengine. TDengine uses the [Writing Ahead Log] (http://en.wikipedia.org/wiki/Write-ahead_logging) strategy to assure data security and integrity. Data received from the client is written to the commit log at first. When TDengine recovers from crashes caused by power loss or other situations, the commit log is used to recover data. After writting to the commit log, data will be wrtten to the corresponding vnode cache, then an acknowledgment is sent to the application. There are two mechanisms that can flush data in cache to disk for persistent storage:
|
||||
|
||||
1. **Flush driven by timer**: There is a backend timer which flushes data in cache periodically to disks. The period is configurable via parameter commitTime in system configuration file taos.cfg.
|
||||
2. **Flush driven by data**: Data in the cache is also flushed to disks when the left buffer size is below a threshold. Flush driven by data can reset the timer of flush driven by the timer.
|
||||
|
@ -86,16 +86,16 @@ Picture 3 shows the full writing process of TDengine. TDengine uses [Writing Ahe
|
|||
<center> <img src="../assets/write_process.png"> </center>
|
||||
<center> Picture 3 TDengine Writting Process </center>
|
||||
|
||||
New commit log file will be opened when the committing process begins. When the committing process finishes, the old commit file will be removed.
|
||||
New commit log files will be opened when the committing process begins. When the committing process finishes, the old commit file will be removed.
|
||||
|
||||
## Data Storage
|
||||
|
||||
TDengine data are saved in _/var/lib/taos_ directory by default. It can be changed to other directories by setting the parameter dataDir in system configuration file taos.cfg.
|
||||
TDengine data are saved in _/var/lib/taos_ directory by default. It can be changed to other directories by setting the parameter `dataDir` in system configuration file taos.cfg.
|
||||
|
||||
TDengine's metadata includes the database, table, user, super table and tag information. To reduce the latency, metadata are all buffered in the cache.
|
||||
|
||||
Data records saved in tables are sharded according to the time range. Data of tables in the same vnode in a certain time range are saved in the same file group. This sharding strategy can effectively improve data searching speed. By default, one group of files contain data in 10 days, which can be configured by *daysPerFile* in the configuration file or by *DAYS* keyword in *CREATE DATABASE* clause.
|
||||
Data records saved in tables are sharded according to the time range. Data from tables in the same vnode in a certain time range are saved in the same file group. This sharding strategy can effectively improve data search speed. By default, one group of files contain data in 10 days, which can be configured by `daysPerFile` in the configuration file or by the *DAYS* keyword in *CREATE DATABASE* clause.
|
||||
|
||||
Data records are removed automatically once their lifetime is passed. The lifetime is configurable via parameter daysToKeep in the system configuration file. The default value is 3650 days.
|
||||
|
||||
Data in files are blockwise. A data block only contains one table's data. Records in the same data block are sorted according to the primary timestamp. To improve the compression ratio, records are stored column by column, and the different compression algorithm is applied based on each column's data type.
|
||||
Data in files are blockwise. A data block only contains one table's data. Records in the same data block are sorted according to the primary timestamp. To improve the compression ratio, records are stored column by column, and different compression algorithms are applied based on each column's data type.
|
|
@ -1,6 +1,6 @@
|
|||
# TAOS SQL
|
||||
|
||||
TDengine provides a SQL like query language to insert or query data. You can execute the SQL statements through TDengine Shell, or through C/C++, Java(JDBC), Python, Restful, Go APIs to interact with the `taosd` service.
|
||||
TDengine provides a SQL like query language to insert or query data. You can execute the SQL statements through the TDengine Shell, or through C/C++, Java(JDBC), Python, Restful, Go, and Node.js APIs to interact with the `taosd` service.
|
||||
|
||||
Before reading through, please have a look at the conventions used for syntax descriptions here in this documentation.
|
||||
|
||||
|
@ -82,7 +82,13 @@ All the keywords in a SQL statement are case-insensitive, but strings values are
|
|||
```mysql
|
||||
CREATE TABLE [IF NOT EXISTS] tb_name (timestamp_field_name TIMESTAMP, field1_name data_type1 [, field2_name data_type2 ...])
|
||||
```
|
||||
Note: 1) the first column must be timstamp, and system will set it as the primary key; 2) the record size is limited to 4096 bytes; 3) for binary or nachr data type, the length shall be specified, for example, binary(20), it means 20 bytes.
|
||||
Note:
|
||||
|
||||
1) The first column must be a `timestamp`, and the system will set it as the primary key.
|
||||
|
||||
2) The record size is limited to 4096 bytes
|
||||
|
||||
3) For `binary` or `nchar` data types, the length must be specified. For example, binary(20) means a binary data type with 20 bytes.
|
||||
|
||||
|
||||
- **Drop a Table**
|
||||
|
@ -96,7 +102,12 @@ All the keywords in a SQL statement are case-insensitive, but strings values are
|
|||
```mysql
|
||||
SHOW TABLES [LIKE tb_name_wildcar]
|
||||
```
|
||||
It shows all tables in the current DB. Note: wildcard character can be used in the table name to filter tables. Wildcard character: 1) ’%’ means 0 to any number of characters; 2)’_’ underscore means exactly one character.
|
||||
It shows all tables in the current DB.
|
||||
|
||||
Note: Wildcard characters can be used in the table name to filter tables.
|
||||
Wildcard characters:
|
||||
1) ’%’ means 0 to any number of characters.
|
||||
2)’_’ underscore means exactly one character.
|
||||
|
||||
|
||||
- **Print Table Schema**
|
||||
|
@ -120,7 +131,7 @@ All the keywords in a SQL statement are case-insensitive, but strings values are
|
|||
```
|
||||
If the table is created via [Super Table](), the schema can only be changed via STable. But for tables not created from STable, you can change their schema directly.
|
||||
|
||||
**Tips**: You can apply an operation on a table not in the current DB by concatenating DB name with the character '.', then with table name. For example, 'demo.tb1' means the operation is applied to table `tb1` in DB `demo` although `demo` is not the current selected DB.
|
||||
**Tips**: You can apply an operation on a table not in the current DB by concatenating DB name with the character '.', then with the table name. For example, 'demo.tb1' means the operation is applied to table `tb1` in DB `demo` even though `demo` is not the currently selected DB.
|
||||
|
||||
## Inserting Records
|
||||
|
||||
|
@ -144,7 +155,7 @@ All the keywords in a SQL statement are case-insensitive, but strings values are
|
|||
```mysql
|
||||
INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...)...;
|
||||
```
|
||||
Insert multiple data records to the table
|
||||
Insert multiple data records into the table
|
||||
|
||||
|
||||
- **Insert a Batch of Records with Selected Columns**
|
||||
|
@ -170,9 +181,9 @@ All the keywords in a SQL statement are case-insensitive, but strings values are
|
|||
tb2_name (tb2_field1_name, ...) VALUES(field1_value1, ...) (field1_value2, ...)
|
||||
```
|
||||
|
||||
Note: For a table, the new record must have timestamp bigger than the last data record, otherwise, it will be thrown away. If timestamp is 0, the time stamp will be set to the system time on server.
|
||||
Note: For a table, the new record must have a timestamp bigger than the last data record, otherwise, it will be discarded and not inserted. If the timestamp is 0, the time stamp will be set to the system time on the server.
|
||||
|
||||
**IMPORT**: If you do want to insert a historical data record into a table, use IMPORT command instead of INSERT. IMPORT has the same syntax as INSERT. If you want to import a batch of historical records, the records shall be ordered in the timestamp, otherwise, TDengine won't handle it in the right way.
|
||||
**IMPORT**: If you do want to insert a historical data record into a table, use IMPORT command instead of INSERT. IMPORT has the same syntax as INSERT. If you want to import a batch of historical records, the records must be ordered by the timestamp, otherwise, TDengine won't handle it in the right way.
|
||||
|
||||
## Data Query
|
||||
|
||||
|
@ -211,7 +222,7 @@ SELECT function_list FROM tb_name
|
|||
| _ | match with a single char | **`binary`** **`nchar`** |
|
||||
|
||||
1. For two or more conditions, only AND is supported, OR is not supported yet.
|
||||
2. For filtering, only a single range is supported. For example, `value>20 and value<30` is valid condition, but `value<20 AND value<>5` is invalid condition
|
||||
2. For filtering, only a single range is supported. For example, `value>20 and value<30` is a valid condition, but `value<20 AND value<>5` is an invalid condition
|
||||
|
||||
### Some Examples
|
||||
|
||||
|
@ -256,10 +267,14 @@ TDengine supports aggregations over numerical values, they are listed below:
|
|||
SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause]
|
||||
```
|
||||
Function: return the number of rows.
|
||||
Return Data Type: integer.
|
||||
Return Data Type: `integer`.
|
||||
Applicable Data Types: all.
|
||||
Applied to: table/STable.
|
||||
Note: 1) * can be used for all columns, as long as a column has non-NULL value, it will be counted; 2) If it is on a specific column, only rows with non-NULL value will be counted
|
||||
Note:
|
||||
|
||||
1) `*` can be used for all columns, as long as a column has non-NULL values, it will be counted.
|
||||
|
||||
2) If it is on a specific column, only rows with non-NULL values will be counted
|
||||
|
||||
|
||||
- **AVG**
|
||||
|
@ -268,8 +283,8 @@ TDengine supports aggregations over numerical values, they are listed below:
|
|||
SELECT AVG(field_name) FROM tb_name [WHERE clause]
|
||||
```
|
||||
Function: return the average value of a specific column.
|
||||
Return Data Type: double.
|
||||
Applicable Data Types: all types except timestamp, binary, nchar, bool.
|
||||
Return Data Type: `double`.
|
||||
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
|
||||
Applied to: table/STable.
|
||||
|
||||
|
||||
|
@ -279,8 +294,8 @@ TDengine supports aggregations over numerical values, they are listed below:
|
|||
SELECT WAVG(field_name) FROM tb_name WHERE clause
|
||||
```
|
||||
Function: return the time-weighted average value of a specific column
|
||||
Return Data Type: double
|
||||
Applicable Data Types: all types except timestamp, binary, nchar, bool
|
||||
Return Data Type: `double`
|
||||
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`
|
||||
Applied to: table/STable
|
||||
|
||||
|
||||
|
@ -290,8 +305,8 @@ TDengine supports aggregations over numerical values, they are listed below:
|
|||
SELECT SUM(field_name) FROM tb_name [WHERE clause]
|
||||
```
|
||||
Function: return the sum of a specific column.
|
||||
Return Data Type: long integer or double.
|
||||
Applicable Data Types: all types except timestamp, binary, nchar, bool.
|
||||
Return Data Type: `long integer` or `double`.
|
||||
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
|
||||
Applied to: table/STable.
|
||||
|
||||
|
||||
|
@ -300,9 +315,9 @@ TDengine supports aggregations over numerical values, they are listed below:
|
|||
```mysql
|
||||
SELECT STDDEV(field_name) FROM tb_name [WHERE clause]
|
||||
```
|
||||
Function: return the standard deviation of a specific column.
|
||||
Function: returns the standard deviation of a specific column.
|
||||
Return Data Type: double.
|
||||
Applicable Data Types: all types except timestamp, binary, nchar, bool.
|
||||
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
|
||||
Applied to: table.
|
||||
|
||||
|
||||
|
@ -370,9 +385,11 @@ TDengine supports aggregations over numerical values, they are listed below:
|
|||
```
|
||||
Function: return the `k` largest values.
|
||||
Return Data Type: the same data type.
|
||||
Applicable Data Types: all types except timestamp, binary, nchar, bool.
|
||||
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
|
||||
Applied to: table/STable.
|
||||
Note: 1) valid range of K: 1≤*k*≤100; 2) the associated time stamp will be returned too.
|
||||
Note:
|
||||
1) Valid range of `k`: 1≤*k*≤100
|
||||
2) The associated `timestamp` will be returned too.
|
||||
|
||||
|
||||
- **BOTTOM**
|
||||
|
@ -382,9 +399,11 @@ TDengine supports aggregations over numerical values, they are listed below:
|
|||
```
|
||||
Function: return the `k` smallest values.
|
||||
Return Data Type: the same data type.
|
||||
Applicable Data Types: all types except timestamp, binary, nchar, bool.
|
||||
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
|
||||
Applied to: table/STable.
|
||||
Note: 1) valid range of K: 1≤*k*≤100; 2) the associated timestamp will be returned too.
|
||||
Note:
|
||||
1) valid range of `k`: 1≤*k*≤100;
|
||||
2) The associated `timestamp` will be returned too.
|
||||
|
||||
|
||||
- **PERCENTILE**
|
||||
|
@ -393,7 +412,7 @@ TDengine supports aggregations over numerical values, they are listed below:
|
|||
```
|
||||
Function: the value of the specified column below which `P` percent of the data points fall.
|
||||
Return Data Type: the same data type.
|
||||
Applicable Data Types: all types except timestamp, binary, nchar, bool.
|
||||
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
|
||||
Applied to: table/STable.
|
||||
Note: The range of `P` is `[0, 100]`. When `P=0` , `PERCENTILE` returns the equal value as `MIN`; when `P=100`, `PERCENTILE` returns the equal value as `MAX`.
|
||||
|
||||
|
@ -406,7 +425,7 @@ TDengine supports aggregations over numerical values, they are listed below:
|
|||
Return Data Type: the same data type.
|
||||
Applicable Data Types: all types.
|
||||
Applied to: table/STable.
|
||||
Note: different from last, last_row returns the last row even it has NULL value.
|
||||
Note: different from last, last_row returns the last row even if it has NULL values.
|
||||
|
||||
|
||||
### Transformation Functions
|
||||
|
@ -417,7 +436,7 @@ TDengine supports aggregations over numerical values, they are listed below:
|
|||
```
|
||||
Function: return the difference between successive values of the specified column.
|
||||
Return Data Type: the same data type.
|
||||
Applicable Data Types: all types except timestamp, binary, nchar, bool.
|
||||
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
|
||||
Applied to: table.
|
||||
|
||||
|
||||
|
@ -427,7 +446,7 @@ TDengine supports aggregations over numerical values, they are listed below:
|
|||
```
|
||||
Function: return the difference between the maximum and the mimimum value.
|
||||
Return Data Type: the same data type.
|
||||
Applicable Data Types: all types except timestamp, binary, nchar, bool.
|
||||
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
|
||||
Applied to: table/STable.
|
||||
Note: spread gives the range of data variation in a table/supertable; it is equivalent to `MAX()` - `MIN()`
|
||||
|
||||
|
@ -438,7 +457,7 @@ TDengine supports aggregations over numerical values, they are listed below:
|
|||
```
|
||||
Function: arithmetic operations on the selected columns.
|
||||
Return Data Type: double.
|
||||
Applicable Data Types: all types except timestamp, binary, nchar, bool.
|
||||
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
|
||||
Applied to: table/STable.
|
||||
Note: 1) bracket can be used for operation priority; 2) If a column has NULL value, the result is NULL.
|
||||
|
||||
|
|
|
@ -73,8 +73,8 @@ By this design, the application can retrieve the latest data from each device su
|
|||
select last(*) from thermometers where location=’beijing’
|
||||
```
|
||||
|
||||
By this design, caching tool, like Redis, is not needed in the system. It will reduce the complexity of the system.
|
||||
Through this design, caching tools like Redis are no longer needed in the system, helping reduce the complexity of the system.
|
||||
|
||||
TDengine creates one or more virtual nodes(vnode) in each data node. Each vnode contains data for multiple tables and has its own buffer. The buffer of a vnode is fully separated from the buffer of another vnode, not shared. But the tables in a vnode share the same buffer.
|
||||
|
||||
System configuration parameter cacheBlockSize configures the cache block size in bytes, and another parameter cacheNumOfBlocks configures the number of cache blocks. The total memory for the buffer of a vnode is $cacheBlockSize \times cacheNumOfBlocks$. Another system parameter numOfBlocksPerMeter configures the maximum number of cache blocks a table can use. When you create a database, you can specify these parameters.
|
||||
System configuration parameter cacheBlockSize configures the cache block size in bytes, and another parameter cacheNumOfBlocks configures the number of cache blocks. The total memory for the buffer of a vnode is `cacheBlockSize * cacheNumOfBlocks`. Another system parameter `numOfBlocksPerMeter` configures the maximum number of cache blocks a table can use. When you create a database, you can specify these parameters.
|
||||
|
|
|
@ -52,9 +52,12 @@ function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
|||
if (data[i] == 0) {
|
||||
res[i] = false;
|
||||
}
|
||||
else {
|
||||
else if (data[i] == 1){
|
||||
res[i] = true;
|
||||
}
|
||||
else if (data[i] == FieldTypes.C_BOOL_NULL) {
|
||||
res[i] = null;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -63,7 +66,8 @@ function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, micro=false)
|
|||
let res = [];
|
||||
let currOffset = 0;
|
||||
while (currOffset < data.length) {
|
||||
res.push(data.readIntLE(currOffset,1));
|
||||
let d = data.readIntLE(currOffset,1);
|
||||
res.push(d == FieldTypes.C_TINYINT_NULL ? null : d);
|
||||
currOffset += nbytes;
|
||||
}
|
||||
return res;
|
||||
|
@ -73,7 +77,8 @@ function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, micro=false)
|
|||
let res = [];
|
||||
let currOffset = 0;
|
||||
while (currOffset < data.length) {
|
||||
res.push(data.readIntLE(currOffset,2));
|
||||
let d = data.readIntLE(currOffset,2);
|
||||
res.push(d == FieldTypes.C_SMALLINT_NULL ? null : d);
|
||||
currOffset += nbytes;
|
||||
}
|
||||
return res;
|
||||
|
@ -83,7 +88,8 @@ function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
|||
let res = [];
|
||||
let currOffset = 0;
|
||||
while (currOffset < data.length) {
|
||||
res.push(data.readInt32LE(currOffset));
|
||||
let d = data.readInt32LE(currOffset);
|
||||
res.push(d == FieldTypes.C_INT_NULL ? null : d);
|
||||
currOffset += nbytes;
|
||||
}
|
||||
return res;
|
||||
|
@ -93,7 +99,8 @@ function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
|||
let res = [];
|
||||
let currOffset = 0;
|
||||
while (currOffset < data.length) {
|
||||
res.push(BigInt(data.readInt64LE(currOffset)));
|
||||
let d = data.readInt64LE(currOffset);
|
||||
res.push(d == FieldTypes.C_BIGINT_NULL ? null : BigInt(d));
|
||||
currOffset += nbytes;
|
||||
}
|
||||
return res;
|
||||
|
@ -103,7 +110,8 @@ function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
|||
let res = [];
|
||||
let currOffset = 0;
|
||||
while (currOffset < data.length) {
|
||||
res.push(parseFloat(data.readFloatLE(currOffset).toFixed(7)));
|
||||
let d = parseFloat(data.readFloatLE(currOffset).toFixed(5));
|
||||
res.push(isNaN(d) ? null : d);
|
||||
currOffset += nbytes;
|
||||
}
|
||||
return res;
|
||||
|
@ -113,7 +121,8 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
|||
let res = [];
|
||||
let currOffset = 0;
|
||||
while (currOffset < data.length) {
|
||||
res.push(parseFloat(data.readDoubleLE(currOffset).toFixed(16)));
|
||||
let d = parseFloat(data.readDoubleLE(currOffset).toFixed(16));
|
||||
res.push(isNaN(d) ? null : d);
|
||||
currOffset += nbytes;
|
||||
}
|
||||
return res;
|
||||
|
@ -123,8 +132,13 @@ function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
|||
let res = [];
|
||||
let currOffset = 0;
|
||||
while (currOffset < data.length) {
|
||||
let dataEntry = data.slice(currOffset, currOffset + nbytes); //one entry in a row under a column;
|
||||
let dataEntry = data.slice(currOffset, currOffset + nbytes);
|
||||
if (dataEntry[0] == FieldTypes.C_BINARY_NULL) {
|
||||
res.push(null);
|
||||
}
|
||||
else {
|
||||
res.push(ref.readCString(dataEntry));
|
||||
}
|
||||
currOffset += nbytes;
|
||||
}
|
||||
return res;
|
||||
|
@ -133,10 +147,15 @@ function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
|
|||
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
|
||||
let res = [];
|
||||
let currOffset = 0;
|
||||
//every 4;
|
||||
// every 4 bytes, a character is encoded;
|
||||
while (currOffset < data.length) {
|
||||
let dataEntry = data.slice(currOffset, currOffset + nbytes); //one entry in a row under a column;
|
||||
if (dataEntry.readInt64LE(0) == FieldTypes.C_NCHAR_NULL) {
|
||||
res.push(null);
|
||||
}
|
||||
else {
|
||||
res.push(dataEntry.toString("utf16le").replace(/\u0000/g, ""));
|
||||
}
|
||||
currOffset += nbytes;
|
||||
}
|
||||
return res;
|
||||
|
@ -178,7 +197,7 @@ function CTaosInterface (config = null, pass = false) {
|
|||
ref.types.void_ptr = ref.refType(ref.types.void);
|
||||
ref.types.void_ptr2 = ref.refType(ref.types.void_ptr);
|
||||
/*Declare a bunch of functions first*/
|
||||
/* Note, pointers to TAOS_RES, TAOS, are ref.types.void_ptr. The connection._conn buffer is supplied for pointers to TAOS */
|
||||
/* Note, pointers to TAOS_RES, TAOS, are ref.types.void_ptr. The connection._conn buffer is supplied for pointers to TAOS * */
|
||||
this.libtaos = ffi.Library('libtaos', {
|
||||
'taos_options': [ ref.types.int, [ ref.types.int , ref.types.void_ptr ] ],
|
||||
'taos_init': [ ref.types.void, [ ] ],
|
||||
|
@ -211,16 +230,42 @@ function CTaosInterface (config = null, pass = false) {
|
|||
'taos_errno': [ ref.types.int, [ ref.types.void_ptr] ],
|
||||
//char *taos_errstr(TAOS *taos)
|
||||
'taos_errstr': [ ref.types.char, [ ref.types.void_ptr] ],
|
||||
//void taos_stop_query(TAOS_RES *res);
|
||||
'taos_stop_query': [ ref.types.void, [ ref.types.void_ptr] ],
|
||||
//char *taos_get_server_info(TAOS *taos);
|
||||
'taos_get_server_info': [ ref.types.char_ptr, [ ref.types.void_ptr ] ],
|
||||
//char *taos_get_client_info();
|
||||
'taos_get_client_info': [ ref.types.char_ptr, [ ] ],
|
||||
|
||||
// ASYNC
|
||||
// void taos_query_a(TAOS *taos, char *sqlstr, void (*fp)(void *, TAOS_RES *, int), void *param)
|
||||
'taos_query_a': [ ref.types.void, [ ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.void_ptr ] ],
|
||||
// void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);
|
||||
'taos_fetch_rows_a': [ ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.types.void_ptr ]]
|
||||
'taos_fetch_rows_a': [ ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.types.void_ptr ]],
|
||||
|
||||
// Subscription
|
||||
//TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, long time, int mseconds)
|
||||
////TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, int64_t time, int mseconds);
|
||||
'taos_subscribe': [ ref.types.void_ptr, [ ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.char_ptr, ref.types.int64, ref.types.int] ],
|
||||
//TAOS_ROW taos_consume(TAOS_SUB *tsub);
|
||||
'taos_consume': [ ref.refType(ref.types.void_ptr2), [ref.types.void_ptr] ],
|
||||
//void taos_unsubscribe(TAOS_SUB *tsub);
|
||||
'taos_unsubscribe': [ ref.types.void, [ ref.types.void_ptr ] ],
|
||||
//int taos_subfields_count(TAOS_SUB *tsub);
|
||||
'taos_subfields_count': [ ref.types.int, [ref.types.void_ptr ] ],
|
||||
//TAOS_FIELD *taos_fetch_subfields(TAOS_SUB *tsub);
|
||||
'taos_fetch_subfields': [ ref.refType(TaosField), [ ref.types.void_ptr ] ],
|
||||
|
||||
// Continuous Query
|
||||
//TAOS_STREAM *taos_open_stream(TAOS *taos, char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
|
||||
// int64_t stime, void *param, void (*callback)(void *));
|
||||
'taos_open_stream': [ ref.types.void_ptr, [ ref.types.void_ptr, ref.types.char_ptr, ref.types.void_ptr, ref.types.int64, ref.types.void_ptr, ref.types.void_ptr ] ],
|
||||
//void taos_close_stream(TAOS_STREAM *tstr);
|
||||
'taos_close_stream': [ ref.types.void, [ ref.types.void_ptr ] ]
|
||||
|
||||
});
|
||||
if (pass == false) {
|
||||
if (config == null) {
|
||||
//check this buffer
|
||||
this._config = ref.alloc(ref.types.char_ptr, ref.NULL);
|
||||
}
|
||||
else {
|
||||
|
@ -343,7 +388,7 @@ CTaosInterface.prototype.freeResult = function freeResult(result) {
|
|||
CTaosInterface.prototype.numFields = function numFields(result) {
|
||||
return this.libtaos.taos_num_fields(result);
|
||||
}
|
||||
/** @deprecated */
|
||||
// Fetch fields count by connection, the latest query
|
||||
CTaosInterface.prototype.fieldsCount = function fieldsCount(connection) {
|
||||
return this.libtaos.taos_field_count(connection);
|
||||
}
|
||||
|
@ -375,7 +420,7 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback,
|
|||
// Data preparation to pass to cursor. Could be bottleneck in query execution callback times.
|
||||
let row = cti.libtaos.taos_fetch_row(result2);
|
||||
let fields = cti.fetchFields_a(result2);
|
||||
let isMicro = (cti.libtaos.taos_result_precision(result) == FieldTypes.C_TIMESTAMP_MICRO);
|
||||
let isMicro = (cti.libtaos.taos_result_precision(result2) == FieldTypes.C_TIMESTAMP_MICRO);
|
||||
let blocks = new Array(fields.length);
|
||||
blocks.fill(null);
|
||||
numOfRows2 = Math.abs(numOfRows2);
|
||||
|
@ -397,7 +442,6 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback,
|
|||
}
|
||||
// Fetch field meta data by result handle
|
||||
CTaosInterface.prototype.fetchFields_a = function fetchFields_a (result) {
|
||||
//
|
||||
let pfields = this.fetchFields(result);
|
||||
let pfieldscount = this.numFields(result);
|
||||
let fields = [];
|
||||
|
@ -414,3 +458,157 @@ CTaosInterface.prototype.fetchFields_a = function fetchFields_a (result) {
|
|||
}
|
||||
return fields;
|
||||
}
|
||||
// Stop a query by result handle
|
||||
CTaosInterface.prototype.stopQuery = function stopQuery(result) {
|
||||
if (result != null){
|
||||
this.libtaos.taos_stop_query(result);
|
||||
}
|
||||
else {
|
||||
throw new errors.ProgrammingError("No result handle passed to stop query");
|
||||
}
|
||||
}
|
||||
CTaosInterface.prototype.getServerInfo = function getServerInfo(connection) {
|
||||
return ref.readCString(this.libtaos.taos_get_server_info(connection));
|
||||
}
|
||||
CTaosInterface.prototype.getClientInfo = function getClientInfo() {
|
||||
return ref.readCString(this.libtaos.taos_get_client_info());
|
||||
}
|
||||
|
||||
// Subscription
|
||||
CTaosInterface.prototype.subscribe = function subscribe(host=null, user="root", password="taosdata", db=null, table=null, time=null, mseconds=null) {
|
||||
let dbOrig = db;
|
||||
let tableOrig = table;
|
||||
try {
|
||||
host = host != null ? ref.allocCString(host) : ref.alloc(ref.types.char_ptr, ref.NULL);
|
||||
}
|
||||
catch(err) {
|
||||
throw "Attribute Error: host is expected as a str";
|
||||
}
|
||||
try {
|
||||
user = ref.allocCString(user)
|
||||
}
|
||||
catch(err) {
|
||||
throw "Attribute Error: user is expected as a str";
|
||||
}
|
||||
try {
|
||||
password = ref.allocCString(password);
|
||||
}
|
||||
catch(err) {
|
||||
throw "Attribute Error: password is expected as a str";
|
||||
}
|
||||
try {
|
||||
db = db != null ? ref.allocCString(db) : ref.alloc(ref.types.char_ptr, ref.NULL);
|
||||
}
|
||||
catch(err) {
|
||||
throw "Attribute Error: db is expected as a str";
|
||||
}
|
||||
try {
|
||||
table = table != null ? ref.allocCString(table) : ref.alloc(ref.types.char_ptr, ref.NULL);
|
||||
}
|
||||
catch(err) {
|
||||
throw TypeError("table is expected as a str");
|
||||
}
|
||||
try {
|
||||
mseconds = ref.alloc(ref.types.int, mseconds);
|
||||
}
|
||||
catch(err) {
|
||||
throw TypeError("mseconds is expected as an int");
|
||||
}
|
||||
//TAOS_SUB *taos_subscribe(char *host, char *user, char *pass, char *db, char *table, int64_t time, int mseconds);
|
||||
let subscription = this.libtaos.taos_subscribe(host, user, password, db, table, time, mseconds);
|
||||
if (ref.isNull(subscription)) {
|
||||
throw new errors.TDError('Failed to subscribe to TDengine | Database: ' + dbOrig + ', Table: ' + tableOrig);
|
||||
}
|
||||
else {
|
||||
console.log('Successfully subscribed to TDengine | Database: ' + dbOrig + ', Table: ' + tableOrig);
|
||||
}
|
||||
return subscription;
|
||||
}
|
||||
CTaosInterface.prototype.subFieldsCount = function subFieldsCount(subscription) {
|
||||
return this.libtaos.taos_subfields_count(subscription);
|
||||
}
|
||||
CTaosInterface.prototype.fetchSubFields = function fetchSubFields(subscription) {
|
||||
let pfields = this.libtaos.taos_fetch_subfields(subscription);
|
||||
let pfieldscount = this.subFieldsCount(subscription);
|
||||
let fields = [];
|
||||
if (ref.isNull(pfields) == false) {
|
||||
pfields = ref.reinterpret(pfields, 68 * pfieldscount , 0);
|
||||
for (let i = 0; i < pfields.length; i += 68) {
|
||||
//0 - 63 = name //64 - 65 = bytes, 66 - 67 = type
|
||||
fields.push( {
|
||||
name: ref.readCString(ref.reinterpret(pfields,64,i)),
|
||||
bytes: pfields[i + 64],
|
||||
type: pfields[i + 66]
|
||||
})
|
||||
}
|
||||
}
|
||||
return fields;
|
||||
}
|
||||
CTaosInterface.prototype.consume = function consume(subscription) {
|
||||
let row = this.libtaos.taos_consume(subscription);
|
||||
let fields = this.fetchSubFields(subscription);
|
||||
//let isMicro = (cti.libtaos.taos_result_precision(result) == FieldTypes.C_TIMESTAMP_MICRO);
|
||||
let isMicro = false; //no supported function for determining precision?
|
||||
let blocks = new Array(fields.length);
|
||||
blocks.fill(null);
|
||||
let numOfRows2 = 1; //Math.abs(numOfRows2);
|
||||
let offset = 0;
|
||||
if (numOfRows2 > 0){
|
||||
for (let i = 0; i < fields.length; i++) {
|
||||
if (!convertFunctions[fields[i]['type']] ) {
|
||||
throw new errors.DatabaseError("Invalid data type returned from database");
|
||||
}
|
||||
blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, isMicro);
|
||||
offset += fields[i]['bytes'] * numOfRows2;
|
||||
}
|
||||
}
|
||||
return {blocks:blocks, fields:fields};
|
||||
}
|
||||
CTaosInterface.prototype.unsubscribe = function unsubscribe(subscription) {
|
||||
//void taos_unsubscribe(TAOS_SUB *tsub);
|
||||
this.libtaos.taos_unsubscribe(subscription);
|
||||
}
|
||||
|
||||
// Continuous Query
|
||||
CTaosInterface.prototype.openStream = function openStream(connection, sql, callback, stime,stoppingCallback, param = ref.ref(ref.NULL)) {
|
||||
try {
|
||||
sql = ref.allocCString(sql);
|
||||
}
|
||||
catch(err) {
|
||||
throw "Attribute Error: sql string is expected as a str";
|
||||
}
|
||||
var cti = this;
|
||||
let asyncCallbackWrapper = function (param2, result2, row) {
|
||||
let fields = cti.fetchFields_a(result2);
|
||||
let isMicro = (cti.libtaos.taos_result_precision(result2) == FieldTypes.C_TIMESTAMP_MICRO);
|
||||
let blocks = new Array(fields.length);
|
||||
blocks.fill(null);
|
||||
let numOfRows2 = 1;
|
||||
let offset = 0;
|
||||
if (numOfRows2 > 0) {
|
||||
for (let i = 0; i < fields.length; i++) {
|
||||
if (!convertFunctions[fields[i]['type']] ) {
|
||||
throw new errors.DatabaseError("Invalid data type returned from database");
|
||||
}
|
||||
blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, isMicro);
|
||||
offset += fields[i]['bytes'] * numOfRows2;
|
||||
}
|
||||
}
|
||||
callback(param2, result2, blocks, fields);
|
||||
}
|
||||
asyncCallbackWrapper = ffi.Callback(ref.types.void, [ ref.types.void_ptr, ref.types.void_ptr, ref.refType(ref.types.void_ptr2) ], asyncCallbackWrapper);
|
||||
asyncStoppingCallbackWrapper = ffi.Callback( ref.types.void, [ ref.types.void_ptr ], stoppingCallback);
|
||||
let streamHandle = this.libtaos.taos_open_stream(connection, sql, asyncCallbackWrapper, stime, param, asyncStoppingCallbackWrapper);
|
||||
if (ref.isNull(streamHandle)) {
|
||||
throw new errors.TDError('Failed to open a stream with TDengine');
|
||||
return false;
|
||||
}
|
||||
else {
|
||||
console.log("Succesfully opened stream");
|
||||
return streamHandle;
|
||||
}
|
||||
}
|
||||
CTaosInterface.prototype.closeStream = function closeStream(stream) {
|
||||
this.libtaos.taos_close_stream(stream);
|
||||
console.log("Closed stream");
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ module.exports = {
|
|||
C_NCHAR : 10,
|
||||
// NULL value definition
|
||||
// NOTE: These values should change according to C definition in tsdb.h
|
||||
C_BOOL_NULL : 0x02,
|
||||
C_BOOL_NULL : 2,
|
||||
C_TINYINT_NULL : -128,
|
||||
C_SMALLINT_NULL : -32768,
|
||||
C_INT_NULL : -2147483648,
|
||||
|
|
|
@ -7,7 +7,7 @@ const { PerformanceObserver, performance } = require('perf_hooks');
|
|||
module.exports = TDengineCursor;
|
||||
|
||||
/**
|
||||
* @typedef {Object} Buffer - A Node.JS buffer. Please refer to {@link https://nodejs.org/api/buffer.html} for more details
|
||||
* @typedef {Object} Buffer - A Node.js buffer. Please refer to {@link https://nodejs.org/api/buffer.html} for more details
|
||||
* @global
|
||||
*/
|
||||
|
||||
|
@ -24,27 +24,21 @@ module.exports = TDengineCursor;
|
|||
*/
|
||||
function TDengineCursor(connection=null) {
|
||||
//All parameters are store for sync queries only.
|
||||
this._description = null;
|
||||
this._rowcount = -1;
|
||||
this._connection = null;
|
||||
this._result = null;
|
||||
this._fields = null;
|
||||
this.data = [];
|
||||
this.fields = null;
|
||||
this._chandle = new CTaosInterface(null, true); //pass through, just need library loaded.
|
||||
if (connection != null) {
|
||||
this._connection = connection
|
||||
this._chandle = connection._chandle //pass through, just need library loaded.
|
||||
}
|
||||
else {
|
||||
throw new errors.ProgrammingError("A TDengineConnection object is required to be passed to the TDengineCursor");
|
||||
}
|
||||
|
||||
}
|
||||
/**
|
||||
* Get the description of the latest query
|
||||
* @since 1.0.0
|
||||
* @return {string} Description
|
||||
*/
|
||||
TDengineCursor.prototype.description = function description() {
|
||||
return this._description;
|
||||
}
|
||||
/**
|
||||
* Get the row counts of the latest query
|
||||
* @since 1.0.0
|
||||
|
@ -53,9 +47,6 @@ TDengineCursor.prototype.description = function description() {
|
|||
TDengineCursor.prototype.rowcount = function rowcount() {
|
||||
return this._rowcount;
|
||||
}
|
||||
TDengineCursor.prototype.callproc = function callproc() {
|
||||
return;
|
||||
}
|
||||
/**
|
||||
* Close the cursor by setting its connection to null and freeing results from the connection and resetting the results it has stored
|
||||
* @return {boolean} Whether or not the cursor was succesfully closed
|
||||
|
@ -112,6 +103,8 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback
|
|||
|
||||
let stmt = operation;
|
||||
let time = 0;
|
||||
let res;
|
||||
if (options['quiet'] != true) {
|
||||
const obs = new PerformanceObserver((items) => {
|
||||
time = items.getEntries()[0].duration;
|
||||
performance.clearMarks();
|
||||
|
@ -121,6 +114,10 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback
|
|||
res = this._chandle.query(this._connection._conn, stmt);
|
||||
performance.mark('B');
|
||||
performance.measure('query', 'A', 'B');
|
||||
}
|
||||
else {
|
||||
res = this._chandle.query(this._connection._conn, stmt);
|
||||
}
|
||||
|
||||
if (res == 0) {
|
||||
let fieldCount = this._chandle.fieldsCount(this._connection._conn);
|
||||
|
@ -139,7 +136,7 @@ TDengineCursor.prototype.execute = function execute(operation, options, callback
|
|||
this._fields = resAndField.fields;
|
||||
this.fields = resAndField.fields;
|
||||
wrapCB(callback);
|
||||
return this._handle_result(); //return a pointer to the result
|
||||
return this._result; //return a pointer to the result
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
@ -271,8 +268,7 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal
|
|||
if (resCode >= 0) {
|
||||
let fieldCount = cr._chandle.numFields(res2);
|
||||
if (fieldCount == 0) {
|
||||
//get affect fields count
|
||||
cr._chandle.freeResult(res2); //result will no longer be needed
|
||||
cr._chandle.freeResult(res2);
|
||||
}
|
||||
else {
|
||||
return res2;
|
||||
|
@ -280,12 +276,10 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal
|
|||
|
||||
}
|
||||
else {
|
||||
//new errors.ProgrammingError(this._chandle.errStr(this._connection._conn))
|
||||
//how to get error by result handle?
|
||||
throw new errors.ProgrammingError("Error occuring with use of execute_a async function. Status code was returned with failure");
|
||||
}
|
||||
}
|
||||
this._connection._clearResultSet();
|
||||
|
||||
let stmt = operation;
|
||||
let time = 0;
|
||||
|
||||
|
@ -313,7 +307,7 @@ TDengineCursor.prototype.execute_a = function execute_a (operation, options, cal
|
|||
* @param {function} callback - callback function that is callbacked on the COMPLETE fetched data (it is calledback only once!).
|
||||
* Must be of form function (param, result, rowCount, rowData)
|
||||
* @param {Object} param - A parameter that is also passed to the main callback function. Important! Param must be an object, and the key "data" cannot be used
|
||||
* @return {{param:Object, result:buffer}} An object with the passed parameters object and the buffer instance that is a pointer to the result handle.
|
||||
* @return {{param:Object, result:Buffer}} An object with the passed parameters object and the buffer instance that is a pointer to the result handle.
|
||||
* @since 1.2.0
|
||||
* @example
|
||||
* cursor.execute('select * from db.table');
|
||||
|
@ -345,15 +339,14 @@ TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callb
|
|||
// object which holds accumulated data in the data key.
|
||||
let asyncCallbackWrapper = function asyncCallbackWrapper(param2, result2, numOfRows2, rowData) {
|
||||
param2 = ref.readObject(param2); //return the object back from the pointer
|
||||
if (numOfRows2 > 0 && rowData.length != 0) {
|
||||
// Keep fetching until now rows left.
|
||||
if (numOfRows2 > 0) {
|
||||
let buf2 = ref.alloc('Object');
|
||||
param2.data.push(rowData);
|
||||
ref.writeObject(buf2, 0, param2);
|
||||
cr._chandle.fetch_rows_a(result2, asyncCallbackWrapper, buf2);
|
||||
}
|
||||
else {
|
||||
|
||||
let finalData = param2.data;
|
||||
let fields = cr._chandle.fetchFields_a(result2);
|
||||
let data = [];
|
||||
|
@ -371,33 +364,124 @@ TDengineCursor.prototype.fetchall_a = function fetchall_a(result, options, callb
|
|||
}
|
||||
cr._chandle.freeResult(result2); // free result, avoid seg faults and mem leaks!
|
||||
callback(param2, result2, numOfRows2, {data:data,fields:fields});
|
||||
|
||||
}
|
||||
}
|
||||
ref.writeObject(buf, 0, param);
|
||||
param = this._chandle.fetch_rows_a(result, asyncCallbackWrapper, buf); //returned param
|
||||
return {param:param,result:result};
|
||||
}
|
||||
TDengineCursor.prototype.nextset = function nextset() {
|
||||
return;
|
||||
}
|
||||
TDengineCursor.prototype.setinputsize = function setinputsize() {
|
||||
return;
|
||||
}
|
||||
TDengineCursor.prototype.setoutputsize = function setoutputsize(size, column=null) {
|
||||
return;
|
||||
/**
|
||||
* Stop a query given the result handle.
|
||||
* @param {Buffer} result - The buffer that acts as the result handle
|
||||
* @since 1.3.0
|
||||
*/
|
||||
TDengineCursor.prototype.stopQuery = function stopQuery(result) {
|
||||
this._chandle.stopQuery(result);
|
||||
}
|
||||
TDengineCursor.prototype._reset_result = function _reset_result() {
|
||||
this._description = null;
|
||||
this._rowcount = -1;
|
||||
this._result = null;
|
||||
this._fields = null;
|
||||
this.data = [];
|
||||
this.fields = null;
|
||||
}
|
||||
TDengineCursor.prototype._handle_result = function _handle_result() {
|
||||
this._description = [];
|
||||
for (let field of this._fields) {
|
||||
this._description.push([field.name, field.type]);
|
||||
/**
|
||||
* Get server info such as version number
|
||||
* @return {string}
|
||||
* @since 1.3.0
|
||||
*/
|
||||
TDengineCursor.prototype.getServerInfo = function getServerInfo() {
|
||||
return this._chandle.getServerInfo(this._connection._conn);
|
||||
}
|
||||
return this._result;
|
||||
/**
|
||||
* Get client info such as version number
|
||||
* @return {string}
|
||||
* @since 1.3.0
|
||||
*/
|
||||
TDengineCursor.prototype.getClientInfo = function getClientInfo() {
|
||||
return this._chandle.getClientInfo();
|
||||
}
|
||||
/**
|
||||
* Subscribe to a table from a database in TDengine.
|
||||
* @param {Object} config - A configuration object containing the configuration options for the subscription
|
||||
* @param {string} config.host - The host to subscribe to
|
||||
* @param {string} config.user - The user to subscribe as
|
||||
* @param {string} config.password - The password for the said user
|
||||
* @param {string} config.db - The db containing the table to subscribe to
|
||||
* @param {string} config.table - The name of the table to subscribe to
|
||||
* @param {number} config.time - The start time to start a subscription session
|
||||
* @param {number} config.mseconds - The pulling period of the subscription session
|
||||
* @return {Buffer} A buffer pointing to the subscription session handle
|
||||
* @since 1.3.0
|
||||
*/
|
||||
TDengineCursor.prototype.subscribe = function subscribe(config) {
|
||||
return this._chandle.subscribe(config.host, config.user, config.password, config.db, config.table, config.time, config.mseconds);
|
||||
};
|
||||
/**
|
||||
* An infinite loop that consumes the latest data and calls a callback function that is provided.
|
||||
* @param {Buffer} subscription - A buffer object pointing to the subscription session handle
|
||||
* @param {function} callback - The callback function that takes the row data, field/column meta data, and the subscription session handle as input
|
||||
* @since 1.3.0
|
||||
*/
|
||||
TDengineCursor.prototype.consumeData = async function consumeData(subscription, callback) {
|
||||
while (true) {
|
||||
let res = this._chandle.consume(subscription);
|
||||
let data = [];
|
||||
let num_of_rows = res.blocks[0].length;
|
||||
for (let j = 0; j < num_of_rows; j++) {
|
||||
data.push([]);
|
||||
let rowBlock = new Array(res.fields.length);
|
||||
for (let k = 0; k < res.fields.length; k++) {
|
||||
rowBlock[k] = res.blocks[k][j];
|
||||
}
|
||||
data[data.length-1] = rowBlock;
|
||||
}
|
||||
callback(data, res.fields, subscription);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Unsubscribe the provided buffer object pointing to the subscription session handle
|
||||
* @param {Buffer} subscription - A buffer object pointing to the subscription session handle that is to be unsubscribed
|
||||
* @since 1.3.0
|
||||
*/
|
||||
TDengineCursor.prototype.unsubscribe = function unsubscribe(subscription) {
|
||||
this._chandle.unsubscribe(subscription);
|
||||
}
|
||||
/**
|
||||
* Open a stream with TDengine to run the sql query periodically in the background
|
||||
* @param {string} sql - The query to run
|
||||
* @param {function} callback - The callback function to run after each query, accepting inputs as param, result handle, data, fields meta data
|
||||
* @param {number} stime - The time of the stream starts in the form of epoch milliseconds. If 0 is given, the start time is set as the current time.
|
||||
* @param {function} stoppingCallback - The callback function to run when the continuous query stops. It takes no inputs
|
||||
* @param {object} param - A parameter that is passed to the main callback function
|
||||
* @return {Buffer} A buffer pointing to the stream handle
|
||||
* @since 1.3.0
|
||||
*/
|
||||
TDengineCursor.prototype.openStream = function openStream(sql, callback, stime = 0, stoppingCallback, param = {}) {
|
||||
let buf = ref.alloc('Object');
|
||||
ref.writeObject(buf, 0, param);
|
||||
|
||||
let asyncCallbackWrapper = function (param2, result2, blocks, fields) {
|
||||
let data = [];
|
||||
let num_of_rows = blocks[0].length;
|
||||
for (let j = 0; j < num_of_rows; j++) {
|
||||
data.push([]);
|
||||
let rowBlock = new Array(fields.length);
|
||||
for (let k = 0; k < fields.length; k++) {
|
||||
rowBlock[k] = blocks[k][j];
|
||||
}
|
||||
data[data.length-1] = rowBlock;
|
||||
}
|
||||
callback(param2, result2, blocks, fields);
|
||||
}
|
||||
return this._chandle.openStream(this._connection._conn, sql, asyncCallbackWrapper, stime, stoppingCallback, buf);
|
||||
}
|
||||
/**
|
||||
* Close a stream
|
||||
* @param {Buffer} - A buffer pointing to the handle of the stream to be closed
|
||||
* @since 1.3.0
|
||||
*/
|
||||
TDengineCursor.prototype.closeStream = function closeStream(stream) {
|
||||
this._chandle.closeStream(stream);
|
||||
}
|
||||
|
|
|
@ -69,11 +69,9 @@ TaosQuery.prototype.execute_a = async function execute_a(options = {}) {
|
|||
frej = reject;
|
||||
});
|
||||
let asyncCallbackFetchall = async function(param, res, numOfRows, blocks) {
|
||||
//param is expected to be the fetchPromise variable;
|
||||
|
||||
//keep fetching until completion, possibly an issue though
|
||||
if (numOfRows > 0) {
|
||||
frej("cursor.fetchall_a didn't fetch all data properly");
|
||||
// Likely a query like insert
|
||||
fres();
|
||||
}
|
||||
else {
|
||||
fres(new TaosResult(blocks.data, blocks.fields));
|
||||
|
|
|
@ -15,24 +15,17 @@ module.exports = TaosResult;
|
|||
* @since 1.0.6
|
||||
*/
|
||||
function TaosResult(data, fields) {
|
||||
|
||||
this.data = data.map(row => new TaosRow(row));
|
||||
this.rowcount = this.data.length;
|
||||
this.fields = fields.map(field => new TaosField(field));
|
||||
}
|
||||
|
||||
TaosResult.prototype.parseFields = function parseFields(fields) {
|
||||
return fields.map(function(field) {
|
||||
return field;
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Pretty print data and the fields meta data as if you were using the taos shell
|
||||
* @memberof TaosResult
|
||||
* @function pretty
|
||||
* @since 1.0.6
|
||||
*/
|
||||
TaosResult.prototype.pretty = function pretty() {
|
||||
// Pretty print of the fields and the data;
|
||||
let fieldsStr = "";
|
||||
let sizing = [];
|
||||
this.fields.forEach((field,i) => {
|
||||
|
@ -55,10 +48,9 @@ TaosResult.prototype.pretty = function pretty() {
|
|||
entry = entry.toTaosString();
|
||||
}
|
||||
else {
|
||||
entry = entry.toString();
|
||||
entry = entry == null ? 'null' : entry.toString();
|
||||
}
|
||||
rowStr += entry
|
||||
//console.log(this.fields[i]._field.bytes, suggestedWidths[this.fields[i]._field.type]);
|
||||
rowStr += fillEmpty(sizing[i] - entry.length) + " | ";
|
||||
});
|
||||
console.log(rowStr);
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "td-connector",
|
||||
"version": "1.2.1",
|
||||
"version": "1.3.1",
|
||||
"lockfileVersion": 1,
|
||||
"requires": true,
|
||||
"dependencies": {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "td-connector",
|
||||
"version": "1.2.1",
|
||||
"version": "1.3.1",
|
||||
"description": "A Node.js connector for TDengine.",
|
||||
"main": "tdengine.js",
|
||||
"scripts": {
|
||||
|
|
|
@ -130,9 +130,9 @@ console.log(cursor.data); // Latest query's result data is stored in cursor.data
|
|||
|
||||
### Async functionality
|
||||
|
||||
Async queries can be performed using the same functions such as `cursor.execute`, `cursor.query`, but now with `_a` appended to them.
|
||||
Async queries can be performed using the same functions such as `cursor.execute`, `TaosQuery.query`, but now with `_a` appended to them.
|
||||
|
||||
Say you want to execute an two async query on two seperate tables, using `cursor.query_a`, you can do that and get a TaosQuery object, which upon executing with the `execute_a` function, returns a promise that resolves with a TaosResult object.
|
||||
Say you want to execute an two async query on two separate tables, using `cursor.query`, you can do that and get a TaosQuery object, which upon executing with the `execute_a` function, returns a promise that resolves with a TaosResult object.
|
||||
|
||||
```javascript
|
||||
var promise1 = cursor.query('select count(*), avg(v1), avg(v2) from meter1;').execute_a()
|
||||
|
@ -145,7 +145,6 @@ promise2.then(function(result) {
|
|||
})
|
||||
```
|
||||
|
||||
|
||||
## Example
|
||||
|
||||
An example of using the NodeJS connector to create a table with weather data and create and execute queries can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js) (The preferred method for using the connector)
|
||||
|
|
|
@ -0,0 +1,89 @@
|
|||
function memoryUsageData() {
|
||||
let s = process.memoryUsage()
|
||||
for (key in s) {
|
||||
s[key] = (s[key]/1000000).toFixed(3) + "MB";
|
||||
}
|
||||
return s;
|
||||
}
|
||||
console.log("initial mem usage:", memoryUsageData());
|
||||
|
||||
const { PerformanceObserver, performance } = require('perf_hooks');
|
||||
const taos = require('../tdengine');
|
||||
var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0});
|
||||
var c1 = conn.cursor();
|
||||
|
||||
// Initialize env
|
||||
c1.execute('create database if not exists td_connector_test;');
|
||||
c1.execute('use td_connector_test;')
|
||||
c1.execute('create table if not exists all_types (ts timestamp, _int int, _bigint bigint, _float float, _double double, _binary binary(40), _smallint smallint, _tinyint tinyint, _bool bool, _nchar nchar(40));');
|
||||
c1.execute('create table if not exists stabletest (ts timestamp, v1 int, v2 int, v3 int, v4 double) tags (id int, location binary(20));')
|
||||
|
||||
|
||||
// Insertion into single table Performance Test
|
||||
var dataPrepTime = 0;
|
||||
var insertTime = 0;
|
||||
var insertTime5000 = 0;
|
||||
var avgInsert5ktime = 0;
|
||||
const obs = new PerformanceObserver((items) => {
|
||||
let entry = items.getEntries()[0];
|
||||
|
||||
if (entry.name == 'Data Prep') {
|
||||
dataPrepTime += entry.duration;
|
||||
}
|
||||
else if (entry.name == 'Insert'){
|
||||
insertTime += entry.duration
|
||||
}
|
||||
else {
|
||||
console.log(entry.name + ': ' + (entry.duration/1000).toFixed(8) + 's');
|
||||
}
|
||||
performance.clearMarks();
|
||||
});
|
||||
obs.observe({ entryTypes: ['measure'] });
|
||||
|
||||
function R(l,r) {
|
||||
return Math.random() * (r - l) - r;
|
||||
}
|
||||
function randomBool() {
|
||||
if (Math.random() < 0.5) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
function insertN(n) {
|
||||
for (let i = 0; i < n; i++) {
|
||||
performance.mark('A3');
|
||||
let insertData = ["now + " + i + "m", // Timestamp
|
||||
parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // Int
|
||||
parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // BigInt
|
||||
parseFloat( R(-3.4E38, 3.4E38) ), // Float
|
||||
parseFloat( R(-1.7E308, 1.7E308) ), // Double
|
||||
"\"Long Binary\"", // Binary
|
||||
parseInt( R(-32767, 32767) ), // Small Int
|
||||
parseInt( R(-127, 127) ), // Tiny Int
|
||||
randomBool(),
|
||||
"\"Nchars 一些中文字幕\""]; // Bool
|
||||
let query = 'insert into td_connector_test.all_types values(' + insertData.join(',') + ' );';
|
||||
performance.mark('B3');
|
||||
performance.measure('Data Prep', 'A3', 'B3');
|
||||
performance.mark('A2');
|
||||
c1.execute(query, {quiet:true});
|
||||
performance.mark('B2');
|
||||
performance.measure('Insert', 'A2', 'B2');
|
||||
if ( i % 5000 == 4999) {
|
||||
console.log("Insert # " + (i+1));
|
||||
console.log('Insert 5k records: ' + ((insertTime - insertTime5000)/1000).toFixed(8) + 's');
|
||||
insertTime5000 = insertTime;
|
||||
avgInsert5ktime = (avgInsert5ktime/1000 * Math.floor(i / 5000) + insertTime5000/1000) / Math.ceil( i / 5000);
|
||||
console.log('DataPrepTime So Far: ' + (dataPrepTime/1000).toFixed(8) + 's | Inserting time So Far: ' + (insertTime/1000).toFixed(8) + 's | Avg. Insert 5k time: ' + avgInsert5ktime.toFixed(8));
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
performance.mark('insert 1E5')
|
||||
insertN(1E5);
|
||||
performance.mark('insert 1E5 2')
|
||||
performance.measure('Insert With Logs', 'insert 1E5', 'insert 1E5 2');
|
||||
console.log('DataPrepTime: ' + (dataPrepTime/1000).toFixed(8) + 's | Inserting time: ' + (insertTime/1000).toFixed(8) + 's');
|
||||
dataPrepTime = 0; insertTime = 0;
|
||||
//'insert into td_connector_test.all_types values (now, null,null,null,null,null,null,null,null,null);'
|
|
@ -1,5 +1,5 @@
|
|||
const taos = require('../tdengine');
|
||||
var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:0});
|
||||
var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:10});
|
||||
var c1 = conn.cursor();
|
||||
let stime = new Date();
|
||||
let interval = 1000;
|
||||
|
@ -23,14 +23,13 @@ function randomBool() {
|
|||
c1.execute('create database if not exists td_connector_test;');
|
||||
c1.execute('use td_connector_test;')
|
||||
c1.execute('create table if not exists all_types (ts timestamp, _int int, _bigint bigint, _float float, _double double, _binary binary(40), _smallint smallint, _tinyint tinyint, _bool bool, _nchar nchar(40));');
|
||||
|
||||
c1.execute('create table if not exists stabletest (ts timestamp, v1 int, v2 int, v3 int, v4 double) tags (id int, location binary(20));')
|
||||
|
||||
// Shell Test : The following uses the cursor to imitate the taos shell
|
||||
|
||||
// Insert
|
||||
for (let i = 0; i < 5000; i++) {
|
||||
stime.setMilliseconds(stime.getMilliseconds() + interval);
|
||||
let insertData = [convertDateToTS(stime), // Timestamp
|
||||
for (let i = 0; i < 10000; i++) {
|
||||
let insertData = ["now+" + i + "s", // Timestamp
|
||||
parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // Int
|
||||
parseInt( R(-Math.pow(2,31) + 1 , Math.pow(2,31) - 1) ), // BigInt
|
||||
parseFloat( R(-3.4E38, 3.4E38) ), // Float
|
||||
|
@ -60,21 +59,78 @@ console.log(d);
|
|||
|
||||
// Immediate Execution like the Shell
|
||||
|
||||
c1.query('select count(*), stddev(_double), min(_tinyint) from all_types where _tinyint > 50 and _int < 0', true).then(function(result){
|
||||
c1.query('select count(*), stddev(_double), min(_tinyint) from all_types where _tinyint > 50 and _int < 0;', true).then(function(result){
|
||||
result.pretty();
|
||||
})
|
||||
c1.query('select _tinyint, _bool from all_types where _tinyint > 50 and _int < 0 limit 50;', true).then(function(result){
|
||||
result.pretty();
|
||||
})
|
||||
c1.query('select stddev(_double), stddev(_bigint), stddev(_float) from all_types', true).then(function(result){
|
||||
c1.query('select stddev(_double), stddev(_bigint), stddev(_float) from all_types;', true).then(function(result){
|
||||
result.pretty();
|
||||
})
|
||||
c1.query('select stddev(_double), stddev(_bigint), stddev(_float) from all_types interval(1m) limit 100;', true).then(function(result){
|
||||
result.pretty();
|
||||
})
|
||||
|
||||
var q = c1.query('select * from td_connector_test.all_types where ts >= ? and _int > ? limit 100 offset 40;').bind(new Date(1231), 100).execute().then(function(r) {
|
||||
// Binding arguments, and then using promise
|
||||
var q = c1.query('select * from td_connector_test.all_types where ts >= ? and _int > ? limit 100 offset 40;').bind(new Date(1231), 100)
|
||||
console.log(q.query);
|
||||
q.execute().then(function(r) {
|
||||
r.pretty();
|
||||
});
|
||||
console.log(q._query);
|
||||
|
||||
c1.execute('drop database td_connector_test;')
|
||||
|
||||
|
||||
// Raw Async Testing (Callbacks, not promises)
|
||||
function cb2(param, result, rowCount, rd) {
|
||||
console.log("RES *", result);
|
||||
console.log("Async fetched", rowCount, "rows");
|
||||
console.log("Passed Param: ", param);
|
||||
console.log("Fields", rd.fields);
|
||||
console.log("Data", rd.data);
|
||||
|
||||
}
|
||||
function cb1(param,result,code) {
|
||||
console.log('Callbacked!');
|
||||
console.log("RES *", result);
|
||||
console.log("Status: ", code);
|
||||
console.log("Passed Param", param);
|
||||
c1.fetchall_a(result, cb2, param)
|
||||
}
|
||||
|
||||
c1.execute_a("describe td_connector_test.all_types;", cb1, {myparam:3.141});
|
||||
|
||||
function cb4(param, result, rowCount, rd) {
|
||||
console.log("RES *", result);
|
||||
console.log("Async fetched", rowCount, "rows");
|
||||
console.log("Passed Param: ", param);
|
||||
console.log("Fields", rd.fields);
|
||||
console.log("Data", rd.data);
|
||||
|
||||
}
|
||||
// Without directly calling fetchall_a
|
||||
var thisRes;
|
||||
function cb3(param,result,code) {
|
||||
console.log('Callbacked!');
|
||||
console.log("RES *", result);
|
||||
console.log("Status: ", code);
|
||||
console.log("Passed Param", param);
|
||||
thisRes = result;
|
||||
}
|
||||
//Test calling execute and fetchall seperately and not through callbacks
|
||||
var param = c1.execute_a("describe td_connector_test.all_types;", cb3, {e:2.718});
|
||||
console.log("Passed Param outside of callback: ", param);
|
||||
setTimeout(function(){
|
||||
c1.fetchall_a(thisRes, cb4, param);
|
||||
},100);
|
||||
|
||||
// Async through promises
|
||||
var aq = c1.query('select count(*) from td_connector_test.all_types;')
|
||||
aq.execute_a().then(function(data) {
|
||||
data.pretty();
|
||||
})
|
||||
c1.query('describe td_connector_test.stabletest;').execute_a().then(r=> r.pretty());
|
||||
setTimeout(function(){
|
||||
c1.query('drop database td_connector_test;');
|
||||
},2000);
|
||||
conn.close();
|
||||
|
|
Loading…
Reference in New Issue