Merge branch '3.0' of https://github.com/taosdata/TDengine into feat/TS-4994-3.0
This commit is contained in:
commit
64e9fd96e8
|
@ -212,3 +212,18 @@ SHOW db_name.ALIVE;
|
|||
```
|
||||
|
||||
Query the availability status of the database db_name, with return values of 0 (unavailable), 1 (fully available), or 2 (partially available, indicating that some VNODEs in the database are available while others are not).
|
||||
|
||||
## View DB Disk Usage
|
||||
|
||||
```sql
|
||||
select * from INFORMATION_SCHEMA.INS_DISK_USAGE where db_name = 'db_name'
|
||||
```
|
||||
|
||||
View the disk usage of each module in the DB.
|
||||
|
||||
```sql
|
||||
SHOW db_name.disk_info;
|
||||
```
|
||||
View the compression ratio and disk usage of the database db_name
|
||||
|
||||
This command is essentially equivalent to `select sum(data1 + data2 + data3)/sum(raw_data), sum(data1 + data2 + data3) from information_schema.ins_disk_usage where db_name="dbname"`
|
|
@ -318,13 +318,29 @@ Configuration parameters for each dnode in the system. Users with SYSINFO set to
|
|||
|
||||
Note: Users with SYSINFO set to 0 cannot view this table.
|
||||
|
||||
| # | **Column Name** | **Data Type** | **Description** |
|
||||
| ---- | :-------------: | -------------- | ------------------------------------ |
|
||||
| 1 | user_name | VARCHAR(24) | Username |
|
||||
| 2 | privilege | VARCHAR(10) | Permission description |
|
||||
| 3 | db_name | VARCHAR(65) | Database name |
|
||||
| 4 | table_name | VARCHAR(193) | Table name |
|
||||
| 5 | condition | VARCHAR(49152) | Subtable permission filter condition |
|
||||
| # | **Column Name** | **Data Type** | **Description** |
|
||||
| --- | :-------------: | -------------- | ------------------------------------ |
|
||||
| 1 | user_name | VARCHAR(24) | Username |
|
||||
| 2 | privilege | VARCHAR(10) | Permission description |
|
||||
| 3 | db_name | VARCHAR(65) | Database name |
|
||||
| 4 | table_name | VARCHAR(193) | Table name |
|
||||
| 5 | condition | VARCHAR(49152) | Subtable permission filter condition |
|
||||
|
||||
## INS_DISK_USAGE
|
||||
|
||||
| # | **Column Name** | **Data type** | **Description** |
|
||||
| --- | :-------------: | ------------- | ------------------------------------------ |
|
||||
| 1 | db_name | VARCHAR(32) | Database name |
|
||||
| 2 | vgroup_id | INT | vgroup ID |
|
||||
| 3 | wal | BIGINT | WAL file size, in KB |
|
||||
| 4 | data1 | BIGINT | Data file size on primary storage, in KB |
|
||||
| 5 | data2 | BIGINT | Data file size on secondary storage, in KB |
|
||||
| 6 | data3 | BIGINT | Data file size on tertiary storage, in KB |
|
||||
| 7 | cache_rdb | BIGINT | Size of last/last_row files, in KB |
|
||||
| 8 | table_meta | BIGINT | Size of meta files, in KB |
|
||||
| 9 | s3 | BIGINT | Size occupied on S3, in KB |
|
||||
| 10 | raw_data | BIGINT | Estimated size of raw data, in KB |
|
||||
|
||||
|
||||
## INS_FILESETS
|
||||
|
||||
|
|
|
@ -217,3 +217,21 @@ SHOW db_name.ALIVE;
|
|||
```
|
||||
|
||||
查询数据库 db_name 的可用状态,返回值 0:不可用 1:完全可用 2:部分可用(即数据库包含的 VNODE 部分节点可用,部分节点不可用)
|
||||
|
||||
## 查看DB 的磁盘空间占用
|
||||
|
||||
```sql
|
||||
select * from INFORMATION_SCHEMA.INS_DISK_USAGE where db_name = 'db_name'
|
||||
```
|
||||
查看DB各个模块所占用磁盘的大小
|
||||
|
||||
```sql
|
||||
SHOW db_name.disk_info;
|
||||
```
|
||||
查看数据库 db_name 的数据压缩压缩率和数据在磁盘上所占用的大小
|
||||
|
||||
该命令本质上等同于 `select sum(data1 + data2 + data3)/sum(raw_data), sum(data1 + data2 + data3) from information_schema.ins_disk_usage where db_name="dbname"`
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -319,11 +319,34 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
注:SYSINFO 属性为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :----------: | ------------ | -------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | user_name | VARCHAR(24) | 用户名
|
||||
| 2 | privilege | VARCHAR(10) | 权限描述
|
||||
| 3 | db_name | VARCHAR(65) | 数据库名称
|
||||
| 4 | table_name | VARCHAR(193) | 表名称
|
||||
| 5 | condition | VARCHAR(49152) | 子表权限过滤条件
|
||||
|
||||
## INS_DISK_USAGE
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :----------: | ------------ | -------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | db_name | VARCHAR(32) | 数据库名称
|
||||
| 2 | vgroup_id | INT | vgroup 的 ID
|
||||
| 3 | wal | BIGINT | wal 文件大小, 单位为 K
|
||||
| 4 | data1 | BIGINT | 一级存储上数据文件的大小,单位为KB
|
||||
| 5 | data2 | BIGINT | 二级存储上数据文件的大小,单位为 KB
|
||||
| 6 | data3 | BIGINT | 三级存储上数据文件的大小, 单位为KB
|
||||
| 7 | cache_rdb | BIGINT | last/last_row 文件的大小,单位为KB
|
||||
| 8 | table_meta | BIGINT | meta 文件的大小, 单位为KB
|
||||
| 9 | s3 | BIGINT | s3 上占用的大小, 单位为KB
|
||||
| 10 | raw_data | BIGINT | 预估的原始数据的大小, 单位为KB
|
||||
|
||||
|
||||
## INS_FILESETS
|
||||
|
||||
提供当前数据存储的文件组的相关信息。
|
||||
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :-----------: | ------------ | --------------------------------------- |
|
||||
| 1 | db_name | VARCHAR(65) | 数据库名 |
|
||||
|
@ -333,4 +356,4 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 5 | end_time | TIMESTAMP | 文件组的覆盖数据的结束时间 |
|
||||
| 6 | total_size | BIGINT | 文件组的总大小 |
|
||||
| 7 | last_compact | TIMESTAMP | 最后一次压缩的时间 |
|
||||
| 8 | shold_compact | bool | 是否需要压缩,true:需要,false:不需要 |
|
||||
| 8 | shold_compact | bool | 是否需要压缩,true:需要,false:不需要 |
|
||||
|
|
|
@ -23,8 +23,8 @@ var adapterLog = log.GetLogger("ADP")
|
|||
type adapterReqType int
|
||||
|
||||
const (
|
||||
rest adapterReqType = iota // 0 - rest
|
||||
ws // 1 - ws
|
||||
rest adapterReqType = iota
|
||||
ws
|
||||
)
|
||||
|
||||
type Adapter struct {
|
||||
|
@ -210,7 +210,7 @@ var adapterTableSql = "create stable if not exists `adapter_requests` (" +
|
|||
"`other_fail` int unsigned, " +
|
||||
"`query_in_process` int unsigned, " +
|
||||
"`write_in_process` int unsigned ) " +
|
||||
"tags (`endpoint` varchar(32), `req_type` tinyint unsigned )"
|
||||
"tags (`endpoint` varchar(255), `req_type` tinyint unsigned )"
|
||||
|
||||
func (a *Adapter) createTable() error {
|
||||
if a.conn == nil {
|
||||
|
|
|
@ -2,6 +2,7 @@ package api
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
|
@ -96,3 +97,38 @@ func TestAdapter2(t *testing.T) {
|
|||
|
||||
conn.Exec(context.Background(), "drop database "+c.Metrics.Database.Name, util.GetQidOwn())
|
||||
}
|
||||
|
||||
func Test_adapterTableSql(t *testing.T) {
|
||||
conn, _ := db.NewConnector("root", "taosdata", "127.0.0.1", 6041, false)
|
||||
defer conn.Close()
|
||||
|
||||
dbName := "db_202412031446"
|
||||
conn.Exec(context.Background(), "create database "+dbName, util.GetQidOwn())
|
||||
defer conn.Exec(context.Background(), "drop database "+dbName, util.GetQidOwn())
|
||||
|
||||
conn, _ = db.NewConnectorWithDb("root", "taosdata", "127.0.0.1", 6041, dbName, false)
|
||||
defer conn.Close()
|
||||
|
||||
conn.Exec(context.Background(), adapterTableSql, util.GetQidOwn())
|
||||
|
||||
testCases := []struct {
|
||||
ep string
|
||||
wantErr bool
|
||||
}{
|
||||
{"", false},
|
||||
{"hello", false},
|
||||
{strings.Repeat("a", 128), false},
|
||||
{strings.Repeat("a", 255), false},
|
||||
{strings.Repeat("a", 256), true},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
sql := fmt.Sprintf("create table d%d using adapter_requests tags ('%s', 0)", i, tc.ep)
|
||||
_, err := conn.Exec(context.Background(), sql, util.GetQidOwn())
|
||||
if tc.wantErr {
|
||||
assert.Error(t, err) // [0x2653] Value too long for column/tag: endpoint
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,10 +17,7 @@ var commonLogger = log.GetLogger("CMN")
|
|||
|
||||
func CreateDatabase(username string, password string, host string, port int, usessl bool, dbname string, databaseOptions map[string]interface{}) {
|
||||
qid := util.GetQidOwn()
|
||||
|
||||
commonLogger := commonLogger.WithFields(
|
||||
logrus.Fields{config.ReqIDKey: qid},
|
||||
)
|
||||
commonLogger := commonLogger.WithFields(logrus.Fields{config.ReqIDKey: qid})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -43,7 +40,6 @@ func CreateDatabase(username string, password string, host string, port int, use
|
|||
}
|
||||
return
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
|
||||
func generateCreateDBSql(dbname string, databaseOptions map[string]interface{}) string {
|
||||
|
|
|
@ -748,20 +748,21 @@ func (gm *GeneralMetric) initColumnSeqMap() error {
|
|||
}
|
||||
|
||||
func (gm *GeneralMetric) createSTables() error {
|
||||
var createTableSql = "create stable if not exists taosd_cluster_basic " +
|
||||
"(ts timestamp, first_ep varchar(100), first_ep_dnode_id INT, cluster_version varchar(20)) " +
|
||||
"tags (cluster_id varchar(50))"
|
||||
|
||||
if gm.conn == nil {
|
||||
return errNoConnection
|
||||
}
|
||||
|
||||
createTableSql := "create stable if not exists taosd_cluster_basic " +
|
||||
"(ts timestamp, first_ep varchar(255), first_ep_dnode_id INT, cluster_version varchar(20)) " +
|
||||
"tags (cluster_id varchar(50))"
|
||||
|
||||
_, err := gm.conn.Exec(context.Background(), createTableSql, util.GetQidOwn())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
createTableSql = "create stable if not exists taos_slow_sql_detail" +
|
||||
" (start_ts TIMESTAMP, request_id BIGINT UNSIGNED PRIMARY KEY, query_time INT, code INT, error_info varchar(128), " +
|
||||
createTableSql = "create stable if not exists taos_slow_sql_detail " +
|
||||
"(start_ts TIMESTAMP, request_id BIGINT UNSIGNED PRIMARY KEY, query_time INT, code INT, error_info varchar(128), " +
|
||||
"type TINYINT, rows_num BIGINT, sql varchar(16384), process_name varchar(32), process_id varchar(32)) " +
|
||||
"tags (db varchar(1024), `user` varchar(32), ip varchar(32), cluster_id varchar(32))"
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/taosdata/taoskeeper/db"
|
||||
|
@ -255,6 +256,7 @@ func TestGenMetric(t *testing.T) {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetSubTableName(t *testing.T) {
|
||||
tests := []struct {
|
||||
stbName string
|
||||
|
@ -356,3 +358,42 @@ func TestGetSubTableName(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_createSTables(t *testing.T) {
|
||||
conn, _ := db.NewConnector("root", "taosdata", "127.0.0.1", 6041, false)
|
||||
defer conn.Close()
|
||||
|
||||
dbName := "db_202412031527"
|
||||
conn.Exec(context.Background(), "create database "+dbName, util.GetQidOwn())
|
||||
defer conn.Exec(context.Background(), "drop database "+dbName, util.GetQidOwn())
|
||||
|
||||
conn, _ = db.NewConnectorWithDb("root", "taosdata", "127.0.0.1", 6041, dbName, false)
|
||||
defer conn.Close()
|
||||
|
||||
gm := GeneralMetric{conn: conn}
|
||||
gm.createSTables()
|
||||
|
||||
testCases := []struct {
|
||||
ep string
|
||||
wantErr bool
|
||||
}{
|
||||
{"", false},
|
||||
{"hello", false},
|
||||
{strings.Repeat("a", 128), false},
|
||||
{strings.Repeat("a", 255), false},
|
||||
{strings.Repeat("a", 256), true},
|
||||
}
|
||||
|
||||
conn.Exec(context.Background(),
|
||||
"create table d0 using taosd_cluster_basic tags('cluster_id')", util.GetQidOwn())
|
||||
|
||||
for _, tc := range testCases {
|
||||
sql := fmt.Sprintf("insert into d0 (ts, first_ep) values(%d, '%s')", time.Now().UnixMilli(), tc.ep)
|
||||
_, err := conn.Exec(context.Background(), sql, util.GetQidOwn())
|
||||
if tc.wantErr {
|
||||
assert.Error(t, err) // [0x2653] Value too long for column/tag: endpoint
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -384,7 +384,7 @@ func insertClusterInfoSql(info ClusterInfo, ClusterID string, protocol int, ts s
|
|||
sqls = append(sqls, fmt.Sprintf("insert into d_info_%s using d_info tags (%d, '%s', '%s') values ('%s', '%s')",
|
||||
ClusterID+strconv.Itoa(dnode.DnodeID), dnode.DnodeID, dnode.DnodeEp, ClusterID, ts, dnode.Status))
|
||||
dtotal++
|
||||
if "ready" == dnode.Status {
|
||||
if dnode.Status == "ready" {
|
||||
dalive++
|
||||
}
|
||||
}
|
||||
|
@ -393,8 +393,8 @@ func insertClusterInfoSql(info ClusterInfo, ClusterID string, protocol int, ts s
|
|||
sqls = append(sqls, fmt.Sprintf("insert into m_info_%s using m_info tags (%d, '%s', '%s') values ('%s', '%s')",
|
||||
ClusterID+strconv.Itoa(mnode.MnodeID), mnode.MnodeID, mnode.MnodeEp, ClusterID, ts, mnode.Role))
|
||||
mtotal++
|
||||
//LEADER FOLLOWER CANDIDATE ERROR
|
||||
if "ERROR" != mnode.Role {
|
||||
// LEADER FOLLOWER CANDIDATE ERROR
|
||||
if mnode.Role != "ERROR" {
|
||||
malive++
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ var dnodeEpLen = strconv.Itoa(255)
|
|||
|
||||
var CreateClusterInfoSql = "create table if not exists cluster_info (" +
|
||||
"ts timestamp, " +
|
||||
"first_ep binary(134), " +
|
||||
"first_ep binary(255), " +
|
||||
"first_ep_dnode_id int, " +
|
||||
"version binary(12), " +
|
||||
"master_uptime float, " +
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/taosdata/taoskeeper/db"
|
||||
"github.com/taosdata/taoskeeper/util"
|
||||
)
|
||||
|
||||
func TestCreateClusterInfoSql(t *testing.T) {
|
||||
conn, _ := db.NewConnector("root", "taosdata", "127.0.0.1", 6041, false)
|
||||
defer conn.Close()
|
||||
|
||||
dbName := "db_202412031539"
|
||||
conn.Exec(context.Background(), "create database "+dbName, util.GetQidOwn())
|
||||
defer conn.Exec(context.Background(), "drop database "+dbName, util.GetQidOwn())
|
||||
|
||||
conn, _ = db.NewConnectorWithDb("root", "taosdata", "127.0.0.1", 6041, dbName, false)
|
||||
defer conn.Close()
|
||||
|
||||
conn.Exec(context.Background(), CreateClusterInfoSql, util.GetQidOwn())
|
||||
|
||||
testCases := []struct {
|
||||
ep string
|
||||
wantErr bool
|
||||
}{
|
||||
{"", false},
|
||||
{"hello", false},
|
||||
{strings.Repeat("a", 128), false},
|
||||
{strings.Repeat("a", 255), false},
|
||||
{strings.Repeat("a", 256), true},
|
||||
}
|
||||
|
||||
conn.Exec(context.Background(),
|
||||
"create table d0 using cluster_info tags('cluster_id')", util.GetQidOwn())
|
||||
|
||||
for _, tc := range testCases {
|
||||
sql := fmt.Sprintf("insert into d0 (ts, first_ep) values(%d, '%s')", time.Now().UnixMilli(), tc.ep)
|
||||
_, err := conn.Exec(context.Background(), sql, util.GetQidOwn())
|
||||
if tc.wantErr {
|
||||
assert.Error(t, err) // [0x2653] Value too long for column/tag: endpoint
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -315,14 +315,13 @@ func (cmd *Command) TransferDataToDest(data *db.Data, dstTable string, tagNum in
|
|||
|
||||
// cluster_info
|
||||
func (cmd *Command) TransferTaosdClusterBasicInfo() error {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
endTime := time.Now()
|
||||
delta := time.Hour * 24 * 10
|
||||
|
||||
var createTableSql = "create stable if not exists taosd_cluster_basic " +
|
||||
"(ts timestamp, first_ep varchar(100), first_ep_dnode_id INT, cluster_version varchar(20)) " +
|
||||
"(ts timestamp, first_ep varchar(255), first_ep_dnode_id INT, cluster_version varchar(20)) " +
|
||||
"tags (cluster_id varchar(50))"
|
||||
|
||||
if _, err := cmd.conn.Exec(ctx, createTableSql, util.GetQidOwn()); err != nil {
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/taosdata/taoskeeper/db"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/config"
|
||||
"github.com/taosdata/taoskeeper/util"
|
||||
)
|
||||
|
||||
func TestTransferTaosdClusterBasicInfo(t *testing.T) {
|
||||
config.InitConfig()
|
||||
|
||||
conn, _ := db.NewConnector("root", "taosdata", "127.0.0.1", 6041, false)
|
||||
defer conn.Close()
|
||||
|
||||
dbName := "db_202412031539"
|
||||
conn.Exec(context.Background(), "create database "+dbName, util.GetQidOwn())
|
||||
defer conn.Exec(context.Background(), "drop database "+dbName, util.GetQidOwn())
|
||||
|
||||
conn, _ = db.NewConnectorWithDb("root", "taosdata", "127.0.0.1", 6041, dbName, false)
|
||||
defer conn.Close()
|
||||
|
||||
cmd := Command{conn: conn, fromTime: time.Now().Add(time.Duration(1 * time.Hour))}
|
||||
cmd.TransferTaosdClusterBasicInfo()
|
||||
|
||||
testCases := []struct {
|
||||
ep string
|
||||
wantErr bool
|
||||
}{
|
||||
{"", false},
|
||||
{"hello", false},
|
||||
{strings.Repeat("a", 128), false},
|
||||
{strings.Repeat("a", 255), false},
|
||||
{strings.Repeat("a", 256), true},
|
||||
}
|
||||
|
||||
conn.Exec(context.Background(),
|
||||
"create table d0 using taosd_cluster_basic tags('cluster_id')", util.GetQidOwn())
|
||||
|
||||
for _, tc := range testCases {
|
||||
sql := fmt.Sprintf("insert into d0 (ts, first_ep) values(%d, '%s')", time.Now().UnixMilli(), tc.ep)
|
||||
_, err := conn.Exec(context.Background(), sql, util.GetQidOwn())
|
||||
if tc.wantErr {
|
||||
assert.Error(t, err) // [0x2653] Value too long for column/tag: endpoint
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
}
|
|
@ -10,7 +10,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/taosdata/driver-go/v3/common"
|
||||
|
||||
_ "github.com/taosdata/driver-go/v3/taosRestful"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/config"
|
||||
|
@ -70,9 +69,13 @@ func NewConnectorWithDb(username, password, host string, port int, dbname string
|
|||
return &Connector{db: db}, nil
|
||||
}
|
||||
|
||||
type ReqIDKeyTy string
|
||||
|
||||
const ReqIDKey ReqIDKeyTy = "taos_req_id"
|
||||
|
||||
func (c *Connector) Exec(ctx context.Context, sql string, qid uint64) (int64, error) {
|
||||
dbLogger := dbLogger.WithFields(logrus.Fields{config.ReqIDKey: qid})
|
||||
ctx = context.WithValue(ctx, common.ReqIDKey, int64(qid))
|
||||
ctx = context.WithValue(ctx, ReqIDKey, int64(qid))
|
||||
|
||||
dbLogger.Tracef("call adapter to execute sql:%s", sql)
|
||||
startTime := time.Now()
|
||||
|
@ -120,7 +123,7 @@ func logData(data *Data, logger *logrus.Entry) {
|
|||
|
||||
func (c *Connector) Query(ctx context.Context, sql string, qid uint64) (*Data, error) {
|
||||
dbLogger := dbLogger.WithFields(logrus.Fields{config.ReqIDKey: qid})
|
||||
ctx = context.WithValue(ctx, common.ReqIDKey, int64(qid))
|
||||
ctx = context.WithValue(ctx, ReqIDKey, int64(qid))
|
||||
|
||||
dbLogger.Tracef("call adapter to execute query, sql:%s", sql)
|
||||
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
}
|
|
@ -13,7 +13,6 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
rotatelogs "github.com/taosdata/file-rotatelogs/v2"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/config"
|
||||
|
||||
"github.com/taosdata/taoskeeper/version"
|
||||
)
|
||||
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
package monitor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
}
|
|
@ -11,10 +11,9 @@ import (
|
|||
"github.com/taosdata/go-utils/web"
|
||||
"github.com/taosdata/taoskeeper/api"
|
||||
"github.com/taosdata/taoskeeper/db"
|
||||
"github.com/taosdata/taoskeeper/util"
|
||||
|
||||
"github.com/taosdata/taoskeeper/infrastructure/config"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/log"
|
||||
"github.com/taosdata/taoskeeper/util"
|
||||
)
|
||||
|
||||
func TestStart(t *testing.T) {
|
||||
|
@ -35,7 +34,7 @@ func TestStart(t *testing.T) {
|
|||
conf.RotationInterval = "1s"
|
||||
StartMonitor("", conf, reporter)
|
||||
time.Sleep(2 * time.Second)
|
||||
for k, _ := range SysMonitor.outputs {
|
||||
for k := range SysMonitor.outputs {
|
||||
SysMonitor.Deregister(k)
|
||||
}
|
||||
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
package system
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
}
|
|
@ -1,8 +1,6 @@
|
|||
package pool
|
||||
|
||||
import (
|
||||
"github.com/panjf2000/ants/v2"
|
||||
)
|
||||
import "github.com/panjf2000/ants/v2"
|
||||
|
||||
var GoroutinePool *ants.Pool
|
||||
|
||||
|
|
|
@ -118,9 +118,7 @@ func GetQid(qidStr string) uint64 {
|
|||
}
|
||||
|
||||
func GetQidOwn() uint64 {
|
||||
|
||||
id := atomic.AddUint64(&globalCounter64, 1)
|
||||
|
||||
if id > 0x00ffffffffffffff {
|
||||
atomic.StoreUint64(&globalCounter64, 1)
|
||||
id = 1
|
||||
|
|
Loading…
Reference in New Issue