diff --git a/cmake/cmake.options b/cmake/cmake.options index 090b5d4135..bec64f7bf0 100644 --- a/cmake/cmake.options +++ b/cmake/cmake.options @@ -163,18 +163,6 @@ option( ON ) -option( - BUILD_WITH_CRAFT - "If build with canonical-raft" - OFF -) - -option( - BUILD_WITH_TRAFT - "If build with traft" - OFF -) - IF(${TD_LINUX} MATCHES TRUE) option( diff --git a/cmake/craft_CMakeLists.txt.in b/cmake/craft_CMakeLists.txt.in deleted file mode 100644 index f0ec7feaf2..0000000000 --- a/cmake/craft_CMakeLists.txt.in +++ /dev/null @@ -1,14 +0,0 @@ - -# canonical-raft -ExternalProject_Add(craft - GIT_REPOSITORY https://github.com/canonical/raft.git - GIT_TAG v0.11.2 - SOURCE_DIR "${TD_CONTRIB_DIR}/craft" - BINARY_DIR "${TD_CONTRIB_DIR}/craft" - #BUILD_IN_SOURCE TRUE - # https://answers.ros.org/question/333125/how-to-include-external-automakeautoconf-projects-into-ament_cmake/ - CONFIGURE_COMMAND COMMAND autoreconf -i COMMAND ./configure --enable-example - BUILD_COMMAND "$(MAKE)" - INSTALL_COMMAND "" - TEST_COMMAND "" -) diff --git a/cmake/traft_CMakeLists.txt.in b/cmake/traft_CMakeLists.txt.in deleted file mode 100644 index 0934e2c35c..0000000000 --- a/cmake/traft_CMakeLists.txt.in +++ /dev/null @@ -1,14 +0,0 @@ - -# traft -ExternalProject_Add(traft - GIT_REPOSITORY https://github.com/taosdata/traft.git - GIT_TAG for_3.0 - SOURCE_DIR "${TD_CONTRIB_DIR}/traft" - BINARY_DIR "${TD_CONTRIB_DIR}/traft" - #BUILD_IN_SOURCE TRUE - # https://answers.ros.org/question/333125/how-to-include-external-automakeautoconf-projects-into-ament_cmake/ - CONFIGURE_COMMAND COMMAND autoreconf -i COMMAND ./configure - BUILD_COMMAND "$(MAKE)" - INSTALL_COMMAND "" - TEST_COMMAND "" -) diff --git a/docs/examples/go/connect/afconn/main.go b/docs/examples/go/connect/afconn/main.go index 2d36ef03ab..3c16212a45 100644 --- a/docs/examples/go/connect/afconn/main.go +++ b/docs/examples/go/connect/afconn/main.go @@ -3,7 +3,7 @@ package main import ( "fmt" - "github.com/taosdata/driver-go/v2/af" + "github.com/taosdata/driver-go/v3/af" ) func main() { diff --git a/docs/examples/go/connect/cgoexample/main.go b/docs/examples/go/connect/cgoexample/main.go index ba7ed0f728..57b8d5d91f 100644 --- a/docs/examples/go/connect/cgoexample/main.go +++ b/docs/examples/go/connect/cgoexample/main.go @@ -4,7 +4,7 @@ import ( "database/sql" "fmt" - _ "github.com/taosdata/driver-go/v2/taosSql" + _ "github.com/taosdata/driver-go/v3/taosSql" ) func main() { diff --git a/docs/examples/go/connect/restexample/main.go b/docs/examples/go/connect/restexample/main.go index 1efc98b988..f10151f04f 100644 --- a/docs/examples/go/connect/restexample/main.go +++ b/docs/examples/go/connect/restexample/main.go @@ -4,7 +4,7 @@ import ( "database/sql" "fmt" - _ "github.com/taosdata/driver-go/v2/taosRestful" + _ "github.com/taosdata/driver-go/v3/taosRestful" ) func main() { diff --git a/docs/examples/go/connect/wrapper/main.go b/docs/examples/go/connect/wrapper/main.go index d7e71a8baf..a459539db3 100644 --- a/docs/examples/go/connect/wrapper/main.go +++ b/docs/examples/go/connect/wrapper/main.go @@ -3,7 +3,7 @@ package main import ( "fmt" - "github.com/taosdata/driver-go/v2/wrapper" + "github.com/taosdata/driver-go/v3/wrapper" ) func main() { diff --git a/docs/examples/go/go.mod b/docs/examples/go/go.mod index 5945e395e9..2bc1a74cb6 100644 --- a/docs/examples/go/go.mod +++ b/docs/examples/go/go.mod @@ -2,5 +2,5 @@ module goexample go 1.17 -require github.com/taosdata/driver-go/v2 develop +require github.com/taosdata/driver-go/v3 3.0 diff --git a/docs/examples/go/insert/json/main.go b/docs/examples/go/insert/json/main.go index 6be375270e..805c123342 100644 --- a/docs/examples/go/insert/json/main.go +++ b/docs/examples/go/insert/json/main.go @@ -3,7 +3,7 @@ package main import ( "fmt" - "github.com/taosdata/driver-go/v2/af" + "github.com/taosdata/driver-go/v3/af" ) func prepareDatabase(conn *af.Connector) { diff --git a/docs/examples/go/insert/line/main.go b/docs/examples/go/insert/line/main.go index c17e1a5270..1a09edc40c 100644 --- a/docs/examples/go/insert/line/main.go +++ b/docs/examples/go/insert/line/main.go @@ -3,7 +3,7 @@ package main import ( "fmt" - "github.com/taosdata/driver-go/v2/af" + "github.com/taosdata/driver-go/v3/af" ) func prepareDatabase(conn *af.Connector) { diff --git a/docs/examples/go/insert/sql/main.go b/docs/examples/go/insert/sql/main.go index 6cd5f860e6..a493ef5fb0 100644 --- a/docs/examples/go/insert/sql/main.go +++ b/docs/examples/go/insert/sql/main.go @@ -4,7 +4,7 @@ import ( "database/sql" "fmt" - _ "github.com/taosdata/driver-go/v2/taosRestful" + _ "github.com/taosdata/driver-go/v3/taosRestful" ) func createStable(taos *sql.DB) { diff --git a/docs/examples/go/insert/stmt/main.go b/docs/examples/go/insert/stmt/main.go index 7093fdf1e5..d23a6e345c 100644 --- a/docs/examples/go/insert/stmt/main.go +++ b/docs/examples/go/insert/stmt/main.go @@ -4,9 +4,9 @@ import ( "fmt" "time" - "github.com/taosdata/driver-go/v2/af" - "github.com/taosdata/driver-go/v2/af/param" - "github.com/taosdata/driver-go/v2/common" + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/af/param" + "github.com/taosdata/driver-go/v3/common" ) func checkErr(err error, prompt string) { diff --git a/docs/examples/go/insert/telnet/main.go b/docs/examples/go/insert/telnet/main.go index 91fafbe71a..4cf4375db5 100644 --- a/docs/examples/go/insert/telnet/main.go +++ b/docs/examples/go/insert/telnet/main.go @@ -3,7 +3,7 @@ package main import ( "fmt" - "github.com/taosdata/driver-go/v2/af" + "github.com/taosdata/driver-go/v3/af" ) func prepareDatabase(conn *af.Connector) { diff --git a/docs/examples/go/query/sync/main.go b/docs/examples/go/query/sync/main.go index 0326c7a7da..add422e385 100644 --- a/docs/examples/go/query/sync/main.go +++ b/docs/examples/go/query/sync/main.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - _ "github.com/taosdata/driver-go/v2/taosRestful" + _ "github.com/taosdata/driver-go/v3/taosRestful" ) func main() { diff --git a/docs/examples/go/sub/main.go b/docs/examples/go/sub/main.go index c44521b340..7721eed134 100644 --- a/docs/examples/go/sub/main.go +++ b/docs/examples/go/sub/main.go @@ -1,53 +1,120 @@ package main import ( - "database/sql/driver" + "context" + "encoding/json" "fmt" - "io" - "os" + "strconv" "time" - taos "github.com/taosdata/driver-go/v2/af" + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/af/tmq" + "github.com/taosdata/driver-go/v3/common" + "github.com/taosdata/driver-go/v3/errors" + "github.com/taosdata/driver-go/v3/wrapper" ) func main() { - db, err := taos.Open("", "", "", "log", 0) + db, err := af.Open("", "root", "taosdata", "", 0) if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) + panic(err) } defer db.Close() - topic, err := db.Subscribe(false, "taoslogtail", "select ts, level, ipaddr, content from log", time.Second) + _, err = db.Exec("create database if not exists example_tmq") if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(2) + panic(err) + } + _, err = db.Exec("create topic if not exists example_tmq_topic with meta as DATABASE example_tmq") + if err != nil { + panic(err) + } + config := tmq.NewConfig() + defer config.Destroy() + err = config.SetGroupID("test") + if err != nil { + panic(err) + } + err = config.SetAutoOffsetReset("earliest") + if err != nil { + panic(err) + } + err = config.SetConnectIP("127.0.0.1") + if err != nil { + panic(err) + } + err = config.SetConnectUser("root") + if err != nil { + panic(err) + } + err = config.SetConnectPass("taosdata") + if err != nil { + panic(err) + } + err = config.SetConnectPort("6030") + if err != nil { + panic(err) + } + err = config.SetMsgWithTableName(true) + if err != nil { + panic(err) + } + err = config.EnableHeartBeat() + if err != nil { + panic(err) + } + err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) { + if result.ErrCode != 0 { + errStr := wrapper.TMQErr2Str(result.ErrCode) + err := errors.NewError(int(result.ErrCode), errStr) + panic(err) + } + }) + if err != nil { + panic(err) + } + consumer, err := tmq.NewConsumer(config) + if err != nil { + panic(err) + } + err = consumer.Subscribe([]string{"example_tmq_topic"}) + if err != nil { + panic(err) + } + _, err = db.Exec("create table example_tmq.t1 (ts timestamp,v int)") + if err != nil { + panic(err) } - defer topic.Unsubscribe(true) for { - func() { - rows, err := topic.Consume() - defer func() { rows.Close(); time.Sleep(time.Second) }() - if err != nil { - fmt.Println(err) - os.Exit(3) - } - for { - values := make([]driver.Value, 4) - err := rows.Next(values) - if err == io.EOF { - break - } else if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(4) - } - ts := values[0].(time.Time) - level := values[1].(int8) - ipaddr := values[2].(string) - content := values[3].(string) - fmt.Printf("%s %d %s %s\n", ts.Format(time.StampMilli), level, ipaddr, content) - } - }() + result, err := consumer.Poll(time.Second) + if err != nil { + panic(err) + } + if result.Type != common.TMQ_RES_TABLE_META { + panic("want message type 2 got " + strconv.Itoa(int(result.Type))) + } + data, _ := json.Marshal(result.Meta) + fmt.Println(string(data)) + consumer.Commit(context.Background(), result.Message) + consumer.FreeMessage(result.Message) + break } + _, err = db.Exec("insert into example_tmq.t1 values(now,1)") + if err != nil { + panic(err) + } + for { + result, err := consumer.Poll(time.Second) + if err != nil { + panic(err) + } + if result.Type != common.TMQ_RES_DATA { + panic("want message type 1 got " + strconv.Itoa(int(result.Type))) + } + data, _ := json.Marshal(result.Data) + fmt.Println(string(data)) + consumer.Commit(context.Background(), result.Message) + consumer.FreeMessage(result.Message) + break + } + consumer.Close() } - -// 未完成 diff --git a/docs/zh/14-reference/02-rest-api/02-rest-api.mdx b/docs/zh/14-reference/02-rest-api/02-rest-api.mdx index a8a92606e4..ba43aa30fd 100644 --- a/docs/zh/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs/zh/14-reference/02-rest-api/02-rest-api.mdx @@ -5,7 +5,7 @@ title: REST API 为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 REST API。为最大程度降低学习成本,不同于其他数据库 REST API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。REST 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。 :::note -与原生连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。从 2.2.0.0 版本开始,支持在 RESTful URL 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 URL 中指定的这个 db_name。从 2.4.0.0 版本开始,RESTful 默认由 taosAdapter 提供,要求必须在 URL 中指定 db_name。 +与原生连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。支持在 RESTful URL 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 URL 中指定的这个 db_name。 ::: ## 安装 @@ -28,54 +28,204 @@ curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.t ```json { - "status": "succ", - "head": [ - "name", - "created_time", - "ntables", - "vgroups", - "replica", - "quorum", - "days", - "keep1,keep2,keep(D)", - "cache(MB)", - "blocks", - "minrows", - "maxrows", - "wallevel", - "fsync", - "comp", - "precision", - "status" - ], - "data": [ - [ - "log", - "2020-09-02 17:23:00.039", - 4, - 1, - 1, - 1, - 10, - "30,30,30", - 1, - 3, - 100, - 4096, - 1, - 3000, - 2, - "us", - "ready" - ] - ], - "rows": 1 + "code": 0, + "column_meta": [ + [ + "name", + "VARCHAR", + 64 + ], + [ + "create_time", + "TIMESTAMP", + 8 + ], + [ + "vgroups", + "SMALLINT", + 2 + ], + [ + "ntables", + "BIGINT", + 8 + ], + [ + "replica", + "TINYINT", + 1 + ], + [ + "strict", + "VARCHAR", + 4 + ], + [ + "duration", + "VARCHAR", + 10 + ], + [ + "keep", + "VARCHAR", + 32 + ], + [ + "buffer", + "INT", + 4 + ], + [ + "pagesize", + "INT", + 4 + ], + [ + "pages", + "INT", + 4 + ], + [ + "minrows", + "INT", + 4 + ], + [ + "maxrows", + "INT", + 4 + ], + [ + "comp", + "TINYINT", + 1 + ], + [ + "precision", + "VARCHAR", + 2 + ], + [ + "status", + "VARCHAR", + 10 + ], + [ + "retention", + "VARCHAR", + 60 + ], + [ + "single_stable", + "BOOL", + 1 + ], + [ + "cachemodel", + "VARCHAR", + 11 + ], + [ + "cachesize", + "INT", + 4 + ], + [ + "wal_level", + "TINYINT", + 1 + ], + [ + "wal_fsync_period", + "INT", + 4 + ], + [ + "wal_retention_period", + "INT", + 4 + ], + [ + "wal_retention_size", + "BIGINT", + 8 + ], + [ + "wal_roll_period", + "INT", + 4 + ], + [ + "wal_seg_size", + "BIGINT", + 8 + ] + ], + "data": [ + [ + "information_schema", + null, + null, + 14, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + "ready", + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + "performance_schema", + null, + null, + 3, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + "ready", + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ] + ], + "rows": 2 } ``` ## HTTP 请求格式 -``` +```text http://:/rest/sql/[db_name] ``` @@ -83,21 +233,21 @@ http://:/rest/sql/[db_name] - fqnd: 集群中的任一台主机 FQDN 或 IP 地址 - port: 配置文件中 httpPort 配置项,缺省为 6041 -- db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。(从 2.2.0.0 版本开始支持) +- db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。 例如:`http://h1.taos.com:6041/rest/sql/test` 是指向地址为 `h1.taos.com:6041` 的 URL,并将默认使用的数据库库名设置为 `test`。 HTTP 请求的 Header 里需带有身份认证信息,TDengine 支持 Basic 认证与自定义认证两种机制,后续版本将提供标准安全的数字签名机制来做身份验证。 -- 自定义身份认证信息如下所示(token 稍后介绍) +- [自定义身份认证信息](#自定义授权码)如下所示 - ``` + ```text Authorization: Taosd ``` - Basic 身份认证信息如下所示 - ``` + ```text Authorization: Basic ``` @@ -119,41 +269,165 @@ curl -L -u username:password -d "" :/rest/sql/[db_name] ## HTTP 返回格式 -返回值为 JSON 格式,如下: +### HTTP 响应码 + +| **response code** | **说明** | +|-------------------|----------------| +| 200 | 正确返回和 C 接口错误返回 | +| 400 | 参数错误返回 | +| 401 | 鉴权失败 | +| 404 | 接口不存在 | +| 500 | 内部错误 | +| 503 | 系统资源不足 | + +### HTTP body 结构 + + + + + + + + + + + + + + + + + + + + + + +
执行结果说明样例
正确执行 + code:(int)0 代表成功 +
+
+ column_meta:([][3]any)列信息,每个列会用三个值来说明,分别为:列名(string)、列类型(string)、类型长度(int) +
+
+ rows:(int)数据返回行数 +
+
+ data:([][]any)具体数据内容 +
```json { - "status": "succ", - "head": ["ts","current", …], - "column_meta": [["ts",9,8],["current",6,4], …], - "data": [ - ["2018-10-03 14:38:05.000", 10.3, …], - ["2018-10-03 14:38:15.000", 12.6, …] - ], - "rows": 2 + "code": 0, + "column_meta": [["affected_rows", "INT", 4]], + "data": [[0]], + "rows": 1 } ``` -说明: +
正确查询 + code:(int)0 代表成功 +
+
+ column_meta:([][3]any) 列信息,每个列会用三个值来说明,分别为:列名(string)、列类型(string)、类型长度(int) +
+
+ rows:(int)数据返回行数 +
+
+ data:([][]any)具体数据内容 +
-- status: 告知操作结果是成功还是失败。 -- head: 表的定义,如果不返回结果集,则仅有一列 “affected_rows”。(从 2.0.17.0 版本开始,建议不要依赖 head 返回值来判断数据列类型,而推荐使用 column_meta。在后续版本中,有可能会从返回值中去掉 head 这一项。) -- column_meta: 从 2.0.17.0 版本开始,返回值中增加这一项来说明 data 里每一列的数据类型。具体每个列会用三个值来说明,分别为:列名、列类型、类型长度。例如`["current",6,4]`表示列名为“current”;列类型为 6,也即 float 类型;类型长度为 4,也即对应 4 个字节表示的 float。如果列类型为 binary 或 nchar,则类型长度表示该列最多可以保存的内容长度,而不是本次返回值中的具体数据长度。当列类型是 nchar 的时候,其类型长度表示可以保存的 unicode 字符数量,而不是 bytes。 -- data: 具体返回的数据,一行一行的呈现,如果不返回结果集,那么就仅有 [[affected_rows]]。data 中每一行的数据列顺序,与 column_meta 中描述数据列的顺序完全一致。 -- rows: 表明总共多少行数据。 +```json +{ + "code": 0, + "column_meta": [ + ["ts", "TIMESTAMP", 8], + ["count", "BIGINT", 8], + ["endpoint", "VARCHAR", 45], + ["status_code", "INT", 4], + ["client_ip", "VARCHAR", 40], + ["request_method", "VARCHAR", 15], + ["request_uri", "VARCHAR", 128] + ], + "data": [ + [ + "2022-06-29T05:50:55.401Z", + 2, + "LAPTOP-NNKFTLTG:6041", + 200, + "172.23.208.1", + "POST", + "/rest/sql" + ], + [ + "2022-06-29T05:52:16.603Z", + 1, + "LAPTOP-NNKFTLTG:6041", + 200, + "172.23.208.1", + "POST", + "/rest/sql" + ], + [ + "2022-06-29T06:28:14.118Z", + 1, + "LAPTOP-NNKFTLTG:6041", + 200, + "172.23.208.1", + "POST", + "/rest/sql" + ], + [ + "2022-06-29T05:52:16.603Z", + 2, + "LAPTOP-NNKFTLTG:6041", + 401, + "172.23.208.1", + "POST", + "/rest/sql" + ] + ], + "rows": 4 +} +``` -column_meta 中的列类型说明: +
错误 + code:(int)错误码 +
+
+ desc:(string)错误描述 +
-- 1:BOOL -- 2:TINYINT -- 3:SMALLINT -- 4:INT -- 5:BIGINT -- 6:FLOAT -- 7:DOUBLE -- 8:BINARY -- 9:TIMESTAMP -- 10:NCHAR +```json +{ + "code": 9728, + "desc": "syntax error near \"1\"" +} +``` + +
+ +### 说明 + +- 时间格式仅支持 RFC3339,结果集为 0 时区 +- 列类型使用如下字符串: + > "NULL" + > "BOOL" + > "TINYINT" + > "SMALLINT" + > "INT" + > "BIGINT" + > "FLOAT" + > "DOUBLE" + > "VARCHAR" + > "TIMESTAMP" + > "NCHAR" + > "TINYINT UNSIGNED" + > "SMALLINT UNSIGNED" + > "INT UNSIGNED" + > "BIGINT UNSIGNED" + > "JSON" ## 自定义授权码 @@ -199,19 +473,44 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata ```json { - "status": "succ", - "head": ["ts", "current", "voltage", "phase"], - "column_meta": [ - ["ts", 9, 8], - ["current", 6, 4], - ["voltage", 4, 4], - ["phase", 6, 4] - ], - "data": [ - ["2018-10-03 14:38:05.000", 10.3, 219, 0.31], - ["2018-10-03 14:38:15.000", 12.6, 218, 0.33] - ], - "rows": 2 + "code": 0, + "column_meta": [ + [ + "ts", + "TIMESTAMP", + 8 + ], + [ + "current", + "FLOAT", + 4 + ], + [ + "voltage", + "INT", + 4 + ], + [ + "phase", + "FLOAT", + 4 + ] + ], + "data": [ + [ + "2022-07-30T06:44:40.32Z", + 10.3, + 219, + 0.31 + ], + [ + "2022-07-30T06:44:41.32Z", + 12.6, + 218, + 0.33 + ] + ], + "rows": 2 } ``` @@ -225,83 +524,23 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata ```json { - "status": "succ", - "head": ["affected_rows"], - "column_meta": [["affected_rows", 4, 4]], - "data": [[1]], - "rows": 1 + "code": 0, + "column_meta": [ + [ + "affected_rows", + "INT", + 4 + ] + ], + "data": [ + [ + 0 + ] + ], + "rows": 1 } ``` -## 其他用法 +## 参考 -### 结果集采用 Unix 时间戳 - -HTTP 请求 URL 采用 `/rest/sqlt` 时,返回结果集的时间戳将采用 Unix 时间戳格式表示,例如 - -```bash -curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sqlt -``` - -返回结果: - -```json -{ - "status": "succ", - "head": ["ts", "current", "voltage", "phase"], - "column_meta": [ - ["ts", 9, 8], - ["current", 6, 4], - ["voltage", 4, 4], - ["phase", 6, 4] - ], - "data": [ - [1538548685000, 10.3, 219, 0.31], - [1538548695000, 12.6, 218, 0.33] - ], - "rows": 2 -} -``` - -### 结果集采用 UTC 时间字符串 - -HTTP 请求 URL 采用 `/rest/sqlutc` 时,返回结果集的时间戳将采用 UTC 时间字符串表示,例如 - -```bash - curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.t1" 192.168.0.1:6041/rest/sqlutc -``` - -返回值: - -```json -{ - "status": "succ", - "head": ["ts", "current", "voltage", "phase"], - "column_meta": [ - ["ts", 9, 8], - ["current", 6, 4], - ["voltage", 4, 4], - ["phase", 6, 4] - ], - "data": [ - ["2018-10-03T14:38:05.000+0800", 10.3, 219, 0.31], - ["2018-10-03T14:38:15.000+0800", 12.6, 218, 0.33] - ], - "rows": 2 -} -``` - -## 重要配置项 - -下面仅列出一些与 RESTful 接口有关的配置参数,其他系统参数请看配置文件里的说明。 - -- 对外提供 RESTful 服务的端口号,默认绑定到 6041(实际取值是 serverPort + 11,因此可以通过修改 serverPort 参数的设置来修改)。 -- httpMaxThreads: 启动的线程数量,默认为 2(2.0.17.0 版本开始,默认值改为 CPU 核数的一半向下取整)。 -- restfulRowLimit: 返回结果集(JSON 格式)的最大条数,默认值为 10240。 -- httpEnableCompress: 是否支持压缩,默认不支持,目前 TDengine 仅支持 gzip 压缩格式。 -- httpDebugFlag: 日志开关,默认 131。131:仅错误和报警信息,135:调试信息,143:非常详细的调试信息。 -- httpDbNameMandatory: 是否必须在 RESTful URL 中指定默认的数据库名。默认为 0,即关闭此检查。如果设置为 1,那么每个 RESTful URL 中都必须设置一个默认数据库名,否则无论此时执行的 SQL 语句是否需要指定数据库,都会返回一个执行错误,拒绝执行此 SQL 语句。 - -:::note -如果使用 taosd 提供的 REST API, 那么以上配置需要写在 taosd 的配置文件 taos.cfg 中。如果使用 taosAdapter 提供的 REST API, 那么需要参考 taosAdapter [对应的配置方法](/reference/taosadapter/)。 -::: +[taosAdapter](/reference/taosadapter/) diff --git a/docs/zh/14-reference/03-connector/go.mdx b/docs/zh/14-reference/03-connector/go.mdx index 88b09aa5d0..a87c948d4a 100644 --- a/docs/zh/14-reference/03-connector/go.mdx +++ b/docs/zh/14-reference/03-connector/go.mdx @@ -65,7 +65,7 @@ REST 连接支持所有能运行 Go 的平台。 ### 使用 go get 安装 -`go get -u github.com/taosdata/driver-go/v2@develop` +`go get -u github.com/taosdata/driver-go/v3@latest` ### 使用 go mod 管理 @@ -80,7 +80,7 @@ REST 连接支持所有能运行 Go 的平台。 ```go import ( "database/sql" - _ "github.com/taosdata/driver-go/v2/taosSql" + _ "github.com/taosdata/driver-go/v3/taosSql" ) ``` @@ -132,7 +132,7 @@ import ( "database/sql" "fmt" - _ "github.com/taosdata/driver-go/v2/taosSql" + _ "github.com/taosdata/driver-go/v3/taosSql" ) func main() { @@ -164,7 +164,7 @@ import ( "database/sql" "fmt" - _ "github.com/taosdata/driver-go/v2/taosRestful" + _ "github.com/taosdata/driver-go/v3/taosRestful" ) func main() { @@ -205,14 +205,14 @@ func main() { ### 更多示例程序 -* [示例程序](https://github.com/taosdata/TDengine/tree/develop/examples/go) +* [示例程序](https://github.com/taosdata/driver-go/tree/3.0/examples) * [视频教程](https://www.taosdata.com/blog/2020/11/11/1951.html)。 ## 使用限制 由于 REST 接口无状态所以 `use db` 语法不会生效,需要将 db 名称放到 SQL 语句中,如:`create table if not exists tb1 (ts timestamp, a int)`改为`create table if not exists test.tb1 (ts timestamp, a int)`否则将报错`[0x217] Database not specified or available`。 -也可以将 db 名称放到 DSN 中,将 `root:taosdata@http(localhost:6041)/` 改为 `root:taosdata@http(localhost:6041)/test`,此方法在 TDengine 2.4.0.5 版本的 taosAdapter 开始支持。当指定的 db 不存在时执行 `create database` 语句不会报错,而执行针对该 db 的其他查询或写入操作会报错。 +也可以将 db 名称放到 DSN 中,将 `root:taosdata@http(localhost:6041)/` 改为 `root:taosdata@http(localhost:6041)/test`。当指定的 db 不存在时执行 `create database` 语句不会报错,而执行针对该 db 的其他查询或写入操作会报错。 完整示例如下: @@ -224,7 +224,7 @@ import ( "fmt" "time" - _ "github.com/taosdata/driver-go/v2/taosRestful" + _ "github.com/taosdata/driver-go/v3/taosRestful" ) func main() { @@ -266,35 +266,27 @@ func main() { ## 常见问题 -1. 无法找到包 `github.com/taosdata/driver-go/v2/taosRestful` - - 将 `go.mod` 中 require 块对`github.com/taosdata/driver-go/v2`的引用改为`github.com/taosdata/driver-go/v2 develop`,之后执行 `go mod tidy`。 - -2. database/sql 中 stmt(参数绑定)相关接口崩溃 +1. database/sql 中 stmt(参数绑定)相关接口崩溃 REST 不支持参数绑定相关接口,建议使用`db.Exec`和`db.Query`。 -3. 使用 `use db` 语句后执行其他语句报错 `[0x217] Database not specified or available` +2. 使用 `use db` 语句后执行其他语句报错 `[0x217] Database not specified or available` 在 REST 接口中 SQL 语句的执行无上下文关联,使用 `use db` 语句不会生效,解决办法见上方使用限制章节。 -4. 使用 taosSql 不报错使用 taosRestful 报错 `[0x217] Database not specified or available` +3. 使用 taosSql 不报错使用 taosRestful 报错 `[0x217] Database not specified or available` 因为 REST 接口无状态,使用 `use db` 语句不会生效,解决办法见上方使用限制章节。 -5. 升级 `github.com/taosdata/driver-go/v2/taosRestful` - - 将 `go.mod` 文件中对 `github.com/taosdata/driver-go/v2` 的引用改为 `github.com/taosdata/driver-go/v2 develop`,之后执行 `go mod tidy`。 - -6. `readBufferSize` 参数调大后无明显效果 +4. `readBufferSize` 参数调大后无明显效果 `readBufferSize` 调大后会减少获取结果时 `syscall` 的调用。如果查询结果的数据量不大,修改该参数不会带来明显提升,如果该参数修改过大,瓶颈会在解析 JSON 数据。如果需要优化查询速度,需要根据实际情况调整该值来达到查询效果最优。 -7. `disableCompression` 参数设置为 `false` 时查询效率降低 +5. `disableCompression` 参数设置为 `false` 时查询效率降低 当 `disableCompression` 参数设置为 `false` 时查询结果会使用 `gzip` 压缩后传输,拿到数据后要先进行 `gzip` 解压。 -8. `go get` 命令无法获取包,或者获取包超时 +6. `go get` 命令无法获取包,或者获取包超时 设置 Go 代理 `go env -w GOPROXY=https://goproxy.cn,direct`。 @@ -334,17 +326,33 @@ func main() { #### 订阅 -* `func (conn *Connector) Subscribe(restart bool, topic string, sql string, interval time.Duration) (Subscriber, error)` +* `func NewConsumer(conf *Config) (*Consumer, error)` - 订阅数据。 +创建消费者。 -* `func (s *taosSubscriber) Consume() (driver.Rows, error)` +* `func (c *Consumer) Subscribe(topics []string) error` - 消费订阅数据,返回 `database/sql/driver` 包的 `Rows` 结构。 +订阅主题。 -* `func (s *taosSubscriber) Unsubscribe(keepProgress bool)` +* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)` - 取消订阅数据。 +轮询消息。 + +* `func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error` + +提交消息。 + +* `func (c *Consumer) FreeMessage(message unsafe.Pointer)` + +释放消息。 + +* `func (c *Consumer) Unsubscribe() error` + +取消订阅。 + +* `func (c *Consumer) Close() error` + +关闭消费者。 #### schemaless @@ -366,10 +374,6 @@ func main() { 参数绑定单行插入。 -* `func (conn *Connector) StmtQuery(sql string, params *param.Param) (rows driver.Rows, err error)` - - 参数绑定查询,返回 `database/sql/driver` 包的 `Rows` 结构。 - * `func (conn *Connector) InsertStmt() *insertstmt.InsertStmt` 初始化参数。 @@ -408,4 +412,4 @@ func main() { ## API 参考 -全部 API 见 [driver-go 文档](https://pkg.go.dev/github.com/taosdata/driver-go/v2) +全部 API 见 [driver-go 文档](https://pkg.go.dev/github.com/taosdata/driver-go/v3) diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index 3d539ea251..95ce5c8338 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -316,6 +316,7 @@ void doDestroyRequest(void *p) { taosArrayDestroy(pRequest->tableList); taosArrayDestroy(pRequest->dbList); + taosArrayDestroy(pRequest->targetTableList); destroyQueryExecRes(&pRequest->body.resInfo.execRes); diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index 4b57f45631..e55c562e38 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -687,6 +687,7 @@ static int32_t mndRetrieveConns(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl int32_t numOfRows = 0; int32_t cols = 0; SConnObj *pConn = NULL; + int32_t keepTime = tsShellActivityTimer * 3; if (pShow->pIter == NULL) { SProfileMgmt *pMgmt = &pMnode->profileMgmt; @@ -700,6 +701,10 @@ static int32_t mndRetrieveConns(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl break; } + if ((taosGetTimestampMs() - pConn->lastAccessTimeMs) > (keepTime * 1000)) { + continue; + } + cols = 0; SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c index e61becbe17..bdf60b110b 100644 --- a/source/libs/catalog/src/ctgUtil.c +++ b/source/libs/catalog/src/ctgUtil.c @@ -30,12 +30,20 @@ void ctgFreeMsgSendParam(void* param) { taosMemoryFree(param); } +void ctgFreeBatchMsg(void* msg) { + if (NULL == msg) { + return; + } + SBatchMsg* pMsg = (SBatchMsg*)msg; + taosMemoryFree(pMsg->msg); +} + void ctgFreeBatch(SCtgBatch *pBatch) { if (NULL == pBatch) { return; } - taosArrayDestroy(pBatch->pMsgs); + taosArrayDestroyEx(pBatch->pMsgs, ctgFreeBatchMsg); taosArrayDestroy(pBatch->pTaskIds); } diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 37d80917b3..abab2126c0 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -745,6 +745,7 @@ void nodesDestroyNode(SNode* pNode) { } taosArrayDestroy(pQuery->pDbList); taosArrayDestroy(pQuery->pTableList); + taosArrayDestroy(pQuery->pTargetTableList); taosArrayDestroy(pQuery->pPlaceholderValues); nodesDestroyNode(pQuery->pPrepareRoot); break; diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 02a7ca0fbb..5d1889194e 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -398,6 +398,7 @@ static void destroyTranslateContext(STranslateContext* pCxt) { taosHashCleanup(pCxt->pDbs); taosHashCleanup(pCxt->pTables); + taosHashCleanup(pCxt->pTargetTables); } static bool isSelectStmt(SNode* pCurrStmt) { diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index cfeac04bbd..d0c5a76f4b 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -140,13 +140,23 @@ int32_t scalarGenerateSetFromList(void **data, void *pNode, uint32_t type) { SCL_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); } + colDataDestroy(out.columnData); + taosMemoryFreeClear(out.columnData); + out.columnData = taosMemoryCalloc(1, sizeof(SColumnInfoData)); + cell = cell->pNext; } *data = pObj; + + colDataDestroy(out.columnData); + taosMemoryFreeClear(out.columnData); return TSDB_CODE_SUCCESS; _return: + + colDataDestroy(out.columnData); + taosMemoryFreeClear(out.columnData); taosHashCleanup(pObj); SCL_RET(code); } diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index 3bec91226c..544ecce175 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -92,7 +92,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa switch (msgType) { case TDMT_VND_COMMIT_RSP: { SCH_ERR_JRET(rspCode); - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + SCH_ERR_JRET(schProcessOnTaskSuccess(pJob, pTask)); break; } case TDMT_VND_CREATE_TABLE_RSP: { @@ -118,7 +118,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa SCH_ERR_JRET(rspCode); taosMemoryFreeClear(msg); - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + SCH_ERR_JRET(schProcessOnTaskSuccess(pJob, pTask)); break; } case TDMT_VND_DROP_TABLE_RSP: { @@ -144,7 +144,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa SCH_ERR_JRET(rspCode); taosMemoryFreeClear(msg); - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + SCH_ERR_JRET(schProcessOnTaskSuccess(pJob, pTask)); break; } case TDMT_VND_ALTER_TABLE_RSP: { @@ -169,7 +169,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa taosMemoryFreeClear(msg); - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + SCH_ERR_JRET(schProcessOnTaskSuccess(pJob, pTask)); break; } case TDMT_VND_SUBMIT_RSP: { @@ -218,7 +218,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa taosMemoryFreeClear(msg); - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + SCH_ERR_JRET(schProcessOnTaskSuccess(pJob, pTask)); break; } @@ -238,7 +238,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa taosMemoryFreeClear(msg); - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + SCH_ERR_JRET(schProcessOnTaskSuccess(pJob, pTask)); break; } @@ -263,7 +263,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa taosMemoryFreeClear(msg); - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + SCH_ERR_JRET(schProcessOnTaskSuccess(pJob, pTask)); break; } @@ -379,13 +379,15 @@ int32_t schHandleCallback(void *param, SDataBuf *pMsg, int32_t rspCode) { qDebug("begin to handle rsp msg, type:%s, handle:%p, code:%s", TMSG_INFO(pMsg->msgType), pMsg->handle, tstrerror(rspCode)); - SCH_ERR_RET(schProcessOnCbBegin(&pJob, &pTask, pParam->queryId, pParam->refId, pParam->taskId)); + SCH_ERR_JRET(schProcessOnCbBegin(&pJob, &pTask, pParam->queryId, pParam->refId, pParam->taskId)); code = schHandleResponseMsg(pJob, pTask, pParam->execId, pMsg, rspCode); pMsg->pData = NULL; schProcessOnCbEnd(pJob, pTask, code); +_return: + taosMemoryFreeClear(pMsg->pData); qDebug("end to handle rsp msg, type:%s, handle:%p, code:%s", TMSG_INFO(pMsg->msgType), pMsg->handle, @@ -398,6 +400,9 @@ int32_t schHandleDropCallback(void *param, SDataBuf *pMsg, int32_t code) { SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param; qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 " drop task rsp received, code:0x%x", pParam->queryId, pParam->taskId, code); + if (pMsg) { + taosMemoryFree(pMsg->pData); + } return TSDB_CODE_SUCCESS; } @@ -408,6 +413,8 @@ int32_t schHandleLinkBrokenCallback(void *param, SDataBuf *pMsg, int32_t code) { qDebug("handle %p is broken", pMsg->handle); if (head->isHbParam) { + taosMemoryFree(pMsg->pData); + SSchHbCallbackParam *hbParam = (SSchHbCallbackParam *)param; SSchTrans trans = {.pTrans = hbParam->pTrans, .pHandle = NULL}; SCH_ERR_RET(schUpdateHbConnection(&hbParam->nodeEpId, &trans)); diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index 025891a26c..ecd0253e1c 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -424,10 +424,15 @@ int32_t schHandleRedirect(SSchJob *pJob, SSchTask *pTask, SDataBuf *pData, int32 } } - SCH_RET(schDoTaskRedirect(pJob, pTask, pData, rspCode)); + code = schDoTaskRedirect(pJob, pTask, pData, rspCode); + taosMemoryFree(pData->pData); + + SCH_RET(code); _return: + taosMemoryFree(pData->pData); + SCH_RET(schProcessOnTaskFailure(pJob, pTask, code)); } diff --git a/source/libs/sync/test/sh/auto_bench.sh b/source/libs/sync/test/sh/auto_bench.sh index 32dc071018..06af59da34 100644 --- a/source/libs/sync/test/sh/auto_bench.sh +++ b/source/libs/sync/test/sh/auto_bench.sh @@ -1,7 +1,7 @@ #!/bin/bash -if [ $# != 5 ] ; then - echo "Uasge: $0 instances vgroups replica ctables rows" +if [ $# != 6 ] ; then + echo "Uasge: $0 instances vgroups replica ctables rows weak" echo "" exit 1 fi @@ -11,11 +11,12 @@ vgroups=$2 replica=$3 ctables=$4 rows=$5 +weak=$6 echo "params: instances:${instances}, vgroups:${vgroups}, replica:${replica}, ctables:${ctables}, rows:${rows}" dt=`date "+%Y-%m-%d-%H-%M-%S"` -casedir=instances_${instances}_vgroups_${vgroups}_replica_${replica}_ctables_${ctables}_rows_${rows}_${dt} +casedir=instances_${instances}_vgroups_${vgroups}_replica_${replica}_ctables_${ctables}_rows_${rows}_weak_${weak}_${dt} mkdir ${casedir} cp ./insert.tpl.json ${casedir} cd ${casedir} diff --git a/source/libs/sync/test/sh/insert.tpl.json b/source/libs/sync/test/sh/insert.tpl.json index 633dd70a24..e68ff3c5ff 100644 --- a/source/libs/sync/test/sh/insert.tpl.json +++ b/source/libs/sync/test/sh/insert.tpl.json @@ -16,7 +16,10 @@ { "dbinfo": { "name": "db1", - "drop": "yes", + "drop": "no", + "wal_retention_period": -1, + "wal_retention_size": -1, + "drop": "no", "vgroups": tpl_vgroups_tpl, "replica": tpl_replica_tpl }, diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 462cf8cfc6..0b47c0894c 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -179,7 +179,6 @@ ./test.sh -f tsim/query/scalarFunction.sim ./test.sh -f tsim/query/scalarNull.sim ./test.sh -f tsim/query/session.sim -./test.sh -f tsim/query/udf.sim # ---- qnode ./test.sh -f tsim/qnode/basic1.sim @@ -316,6 +315,7 @@ ./test.sh -f tsim/valgrind/checkError5.sim ./test.sh -f tsim/valgrind/checkError6.sim ./test.sh -f tsim/valgrind/checkError7.sim +./test.sh -f tsim/valgrind/checkUdf.sim # --- vnode # unsupport ./test.sh -f tsim/vnode/replica3_basic.sim @@ -339,7 +339,7 @@ # --- catalog ---- ./test.sh -f tsim/catalog/alterInCurrent.sim -# --- scalar +# --- scalar ---- ./test.sh -f tsim/scalar/in.sim ./test.sh -f tsim/scalar/scalar.sim @@ -384,7 +384,7 @@ ./test.sh -f tsim/compute/sum.sim ./test.sh -f tsim/compute/top.sim -# ---- field +# ---- field ---- ./test.sh -f tsim/field/2.sim ./test.sh -f tsim/field/3.sim ./test.sh -f tsim/field/4.sim diff --git a/tests/script/tsim/valgrind/basic2.sim b/tests/script/tsim/valgrind/basic2.sim index 7c905209ee..522070f487 100644 --- a/tests/script/tsim/valgrind/basic2.sim +++ b/tests/script/tsim/valgrind/basic2.sim @@ -1,7 +1,6 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c debugflag -v 131 -system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode1 -s start -v sql connect print =============== step1: create drop show dnodes @@ -42,53 +41,16 @@ while $i < $tbNum sql insert into $tb values ($ms , $x , $x , $x ) $x = $x + 1 endw - - $cc = $x * 60000 - $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , NULL , NULL , NULL ) $i = $i + 1 endw -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode1 -s start -v - print =============== step3: tb -sql select avg(tbcol) from tb1 -sql select avg(tbcol) from tb1 where ts <= 1601481840000 -sql select avg(tbcol) as b from tb1 -sql select avg(tbcol) as b from tb1 interval(1d) -sql select avg(tbcol) as b from tb1 where ts <= 1601481840000 interval(1m) -sql select bottom(tbcol, 2) from tb1 where ts <= 1601481840000 -sql select top(tbcol, 2) from tb1 where ts <= 1601481840000 -sql select percentile(tbcol, 2) from tb1 where ts <= 1601481840000 -sql select leastsquares(tbcol, 1, 1) as b from tb1 where ts <= 1601481840000 -sql show table distributed tb1 -sql select count(tbcol) as b from tb1 where ts <= 1601481840000 interval(1m) -sql select diff(tbcol) from tb1 where ts <= 1601481840000 -sql select diff(tbcol) from tb1 where tbcol > 5 and tbcol < 20 -sql select first(tbcol), last(tbcol) as b from tb1 where ts <= 1601481840000 interval(1m) -sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from tb1 where ts <= 1601481840000 partition by tgcol interval(1m) -#sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) -sql select last_row(*) from tb1 where tbcol > 5 and tbcol < 20 - -print =============== step4: stb -sql select avg(tbcol) as c from stb -sql select avg(tbcol) as c from stb where ts <= 1601481840000 -sql select avg(tbcol) as c from stb where tgcol < 5 and ts <= 1601481840000 -sql select avg(tbcol) as c from stb interval(1m) -sql select avg(tbcol) as c from stb interval(1d) -sql select avg(tbcol) as b from stb where ts <= 1601481840000 interval(1m) -sql select avg(tbcol) as c from stb group by tgcol -sql select avg(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m) -sql show table distributed stb -sql select count(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m) -sql select diff(tbcol) from stb where ts <= 1601481840000 -sql select first(tbcol), last(tbcol) as c from stb group by tgcol -sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 and tbcol2 is null partition by tgcol interval(1m) -sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m) -sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from stb where ts <= 1601481840000 partition by tgcol interval(1m) -#sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) -sql select last_row(tbcol), stddev(tbcol) from stb where tbcol > 5 and tbcol < 20 group by tgcol +sql select * from tb1 where ts in ('2018-07-10 16:31:01', '2022-07-10 16:31:03', 1657441865000); +sql select * from tb1 where tbcol2 in (257); +sql select * from tb1 where tbcol3 in (2, 257); +sql select * from stb where ts in ('2018-07-10 16:31:01', '2022-07-10 16:31:03', 1657441865000); +sql select * from stb where tbcol2 in (257); +sql select * from stb where tbcol3 in (2, 257); _OVER: system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/valgrind/checkError6.sim b/tests/script/tsim/valgrind/checkError6.sim index eccf5b31f3..df5e46da8b 100644 --- a/tests/script/tsim/valgrind/checkError6.sim +++ b/tests/script/tsim/valgrind/checkError6.sim @@ -104,6 +104,7 @@ sql select length("abcd1234"), char_length("abcd1234=-+*") from stb sql select tbcol4, length(tbcol4), lower(tbcol4), upper(tbcol4), ltrim(tbcol4), rtrim(tbcol4), concat(tbcol4, tbcol5), concat_ws('_', tbcol4, tbcol5), substr(tbcol4, 1, 4) from stb sql select * from stb where tbcol not in (1,2,3,null); sql select * from stb where tbcol + 3 <> null; +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from stb where tbcol = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol interval(1d) print =============== step5: explain sql explain analyze select ts from stb where -2; @@ -118,6 +119,11 @@ sql explain analyze verbose true select * from information_schema.user_stables w sql explain analyze verbose true select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) sql explain select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) +print =============== step6: in cast +sql select 1+1n; +sql select cast(1 as timestamp)+1n; +sql select cast(1 as timestamp)+1y; + print =============== check $null= diff --git a/tests/script/tsim/query/udf.sim b/tests/script/tsim/valgrind/checkUdf.sim similarity index 91% rename from tests/script/tsim/query/udf.sim rename to tests/script/tsim/valgrind/checkUdf.sim index 5d69887c86..1026bad3d0 100644 --- a/tests/script/tsim/query/udf.sim +++ b/tests/script/tsim/valgrind/checkUdf.sim @@ -1,8 +1,6 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c udf -v 1 - -print ========= start dnode1 as leader system sh/exec.sh -n dnode1 -s start sql connect @@ -149,4 +147,18 @@ if $rows != 0 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGTERM +_OVER: +system sh/exec.sh -n dnode1 -s stop -x SIGINT +print =============== check +$null= + +system_content sh/checkValgrind.sh -n dnode1 +print cmd return result ----> [ $system_content ] +if $system_content > 0 then + return -1 +endi + +if $system_content == $null then + return -1 +endi +