Merge branch 'develop' into docs/Update-Latest-Feature
This commit is contained in:
commit
5545e058c0
|
@ -69,7 +69,6 @@ IF (TD_LINUX_32)
|
|||
ENDIF ()
|
||||
|
||||
IF (TD_ARM_64)
|
||||
ADD_DEFINITIONS(-D_M_X64)
|
||||
ADD_DEFINITIONS(-D_TD_ARM_64)
|
||||
ADD_DEFINITIONS(-D_TD_ARM_)
|
||||
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
||||
|
@ -86,17 +85,19 @@ IF (TD_ARM_32)
|
|||
ENDIF ()
|
||||
|
||||
IF (TD_MIPS_64)
|
||||
ADD_DEFINITIONS(-D_TD_MIPS_64_)
|
||||
ADD_DEFINITIONS(-D_TD_MIPS_)
|
||||
ADD_DEFINITIONS(-D_TD_MIPS_64)
|
||||
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
||||
MESSAGE(STATUS "mips64 is defined")
|
||||
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
||||
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
||||
ENDIF ()
|
||||
|
||||
IF (TD_MIPS_32)
|
||||
ADD_DEFINITIONS(-D_TD_MIPS_32_)
|
||||
ADD_DEFINITIONS(-D_TD_MIPS_)
|
||||
ADD_DEFINITIONS(-D_TD_MIPS_32)
|
||||
ADD_DEFINITIONS(-DUSE_LIBICONV)
|
||||
MESSAGE(STATUS "mips32 is defined")
|
||||
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
||||
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
|
||||
ENDIF ()
|
||||
|
||||
IF (TD_APLHINE)
|
||||
|
|
|
@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
|
|||
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
|
||||
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
|
||||
IF (TD_MVN_INSTALLED)
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.26-dist.jar DESTINATION connector/jdbc)
|
||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.28-dist.jar DESTINATION connector/jdbc)
|
||||
ENDIF ()
|
||||
ELSEIF (TD_DARWIN)
|
||||
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
|
||||
|
|
|
@ -4,7 +4,7 @@ PROJECT(TDengine)
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "2.0.19.0")
|
||||
SET(TD_VER_NUMBER "2.0.20.0")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -97,7 +97,7 @@ go build -o bin/taosimport app/main.go
|
|||
|
||||
是否保存统计信息到 tdengine 的 statistic 表中,1 是,0 否, 默认 0。
|
||||
|
||||
* -savetb int
|
||||
* -savetb string
|
||||
|
||||
当 save 为 1 时保存统计信息的表名, 默认 statistic。
|
||||
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
@ -17,47 +16,55 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
dataimport "github.com/taosdata/TDengine/importSampleData/import"
|
||||
dataImport "github.com/taosdata/TDengine/importSampleData/import"
|
||||
|
||||
_ "github.com/taosdata/driver-go/taosSql"
|
||||
)
|
||||
|
||||
const (
|
||||
TIMESTAMP = "timestamp"
|
||||
DATETIME = "datetime"
|
||||
MILLISECOND = "millisecond"
|
||||
DEFAULT_STARTTIME int64 = -1
|
||||
DEFAULT_INTERVAL int64 = 1 * 1000
|
||||
DEFAULT_DELAY int64 = -1
|
||||
DEFAULT_STATISTIC_TABLE = "statistic"
|
||||
// 主键类型必须为 timestamp
|
||||
TIMESTAMP = "timestamp"
|
||||
|
||||
JSON_FORMAT = "json"
|
||||
CSV_FORMAT = "csv"
|
||||
SUPERTABLE_PREFIX = "s_"
|
||||
SUBTABLE_PREFIX = "t_"
|
||||
// 样例数据中主键时间字段是 millisecond 还是 dateTime 格式
|
||||
DATETIME = "datetime"
|
||||
MILLISECOND = "millisecond"
|
||||
|
||||
DRIVER_NAME = "taosSql"
|
||||
STARTTIME_LAYOUT = "2006-01-02 15:04:05.000"
|
||||
INSERT_PREFIX = "insert into "
|
||||
DefaultStartTime int64 = -1
|
||||
DefaultInterval int64 = 1 * 1000 // 导入的记录时间间隔,该设置只会在指定 auto=1 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000。
|
||||
DefaultDelay int64 = -1 //
|
||||
|
||||
// 当 save 为 1 时保存统计信息的表名, 默认 statistic。
|
||||
DefaultStatisticTable = "statistic"
|
||||
|
||||
// 样例数据文件格式,可以是 json 或 csv
|
||||
JsonFormat = "json"
|
||||
CsvFormat = "csv"
|
||||
|
||||
SuperTablePrefix = "s_" // 超级表前缀
|
||||
SubTablePrefix = "t_" // 子表前缀
|
||||
|
||||
DriverName = "taosSql"
|
||||
StartTimeLayout = "2006-01-02 15:04:05.000"
|
||||
InsertPrefix = "insert into "
|
||||
)
|
||||
|
||||
var (
|
||||
cfg string
|
||||
cases string
|
||||
hnum int
|
||||
vnum int
|
||||
thread int
|
||||
batch int
|
||||
auto int
|
||||
starttimestr string
|
||||
interval int64
|
||||
host string
|
||||
port int
|
||||
user string
|
||||
password string
|
||||
dropdb int
|
||||
db string
|
||||
dbparam string
|
||||
cfg string // 导入配置文件路径,包含样例数据文件相关描述及对应 TDengine 配置信息。默认使用 config/cfg.toml
|
||||
cases string // 需要导入的场景名称,该名称可从 -cfg 指定的配置文件中 [usecase] 查看,可同时导入多个场景,中间使用逗号分隔,如:sensor_info,camera_detection,默认为 sensor_info
|
||||
hnum int // 需要将样例数据进行横向扩展的倍数,假设原有样例数据包含 1 张子表 t_0 数据,指定 hnum 为 2 时会根据原有表名创建 t、t_1 两张子表。默认为 100。
|
||||
vnum int // 需要将样例数据进行纵向扩展的次数,如果设置为 0 代表将历史数据导入至当前时间后持续按照指定间隔导入。默认为 1000,表示将样例数据在时间轴上纵向复制1000 次
|
||||
thread int // 执行导入数据的线程数目,默认为 10
|
||||
batch int // 执行导入数据时的批量大小,默认为 100。批量是指一次写操作时,包含多少条记录
|
||||
auto int // 是否自动生成样例数据中的主键时间戳,1 是,0 否, 默认 0
|
||||
startTimeStr string // 导入的记录开始时间,格式为 "yyyy-MM-dd HH:mm:ss.SSS",不设置会使用样例数据中最小时间,设置后会忽略样例数据中的主键时间,会按照指定的 start 进行导入。如果 auto 为 1,则必须设置 start,默认为空
|
||||
interval int64 // 导入的记录时间间隔,该设置只会在指定 auto=1 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000
|
||||
host string // 导入的 TDengine 服务器 IP,默认为 127.0.0.1
|
||||
port int // 导入的 TDengine 服务器端口,默认为 6030
|
||||
user string // 导入的 TDengine 用户名,默认为 root
|
||||
password string // 导入的 TDengine 用户密码,默认为 taosdata
|
||||
dropdb int // 导入数据之前是否删除数据库,1 是,0 否, 默认 0
|
||||
db string // 导入的 TDengine 数据库名称,默认为 test_yyyyMMdd
|
||||
dbparam string // 当指定的数据库不存在时,自动创建数据库时可选项配置参数,如 days 10 cache 16000 ablocks 4,默认为空
|
||||
|
||||
dataSourceName string
|
||||
startTime int64
|
||||
|
@ -72,10 +79,10 @@ var (
|
|||
lastStaticTime time.Time
|
||||
lastTotalRows int64
|
||||
timeTicker *time.Ticker
|
||||
delay int64 // default 10 milliseconds
|
||||
tick int64
|
||||
save int
|
||||
saveTable string
|
||||
delay int64 // 当 vnum 设置为 0 时持续导入的时间间隔,默认为所有场景中最小记录间隔时间的一半,单位 ms。
|
||||
tick int64 // 打印统计信息的时间间隔,默认 2000 ms。
|
||||
save int // 是否保存统计信息到 tdengine 的 statistic 表中,1 是,0 否, 默认 0。
|
||||
saveTable string // 当 save 为 1 时保存统计信息的表名, 默认 statistic。
|
||||
)
|
||||
|
||||
type superTableConfig struct {
|
||||
|
@ -83,7 +90,7 @@ type superTableConfig struct {
|
|||
endTime int64
|
||||
cycleTime int64
|
||||
avgInterval int64
|
||||
config dataimport.CaseConfig
|
||||
config dataImport.CaseConfig
|
||||
}
|
||||
|
||||
type scaleTableInfo struct {
|
||||
|
@ -92,14 +99,14 @@ type scaleTableInfo struct {
|
|||
insertRows int64
|
||||
}
|
||||
|
||||
type tableRows struct {
|
||||
tableName string // tableName
|
||||
value string // values(...)
|
||||
}
|
||||
//type tableRows struct {
|
||||
// tableName string // tableName
|
||||
// value string // values(...)
|
||||
//}
|
||||
|
||||
type dataRows struct {
|
||||
rows []map[string]interface{}
|
||||
config dataimport.CaseConfig
|
||||
config dataImport.CaseConfig
|
||||
}
|
||||
|
||||
func (rows dataRows) Len() int {
|
||||
|
@ -107,9 +114,9 @@ func (rows dataRows) Len() int {
|
|||
}
|
||||
|
||||
func (rows dataRows) Less(i, j int) bool {
|
||||
itime := getPrimaryKey(rows.rows[i][rows.config.Timestamp])
|
||||
jtime := getPrimaryKey(rows.rows[j][rows.config.Timestamp])
|
||||
return itime < jtime
|
||||
iTime := getPrimaryKey(rows.rows[i][rows.config.Timestamp])
|
||||
jTime := getPrimaryKey(rows.rows[j][rows.config.Timestamp])
|
||||
return iTime < jTime
|
||||
}
|
||||
|
||||
func (rows dataRows) Swap(i, j int) {
|
||||
|
@ -123,26 +130,26 @@ func getPrimaryKey(value interface{}) int64 {
|
|||
}
|
||||
|
||||
func init() {
|
||||
parseArg() //parse argument
|
||||
parseArg() // parse argument
|
||||
|
||||
if db == "" {
|
||||
//db = "go"
|
||||
// 导入的 TDengine 数据库名称,默认为 test_yyyyMMdd
|
||||
db = fmt.Sprintf("test_%s", time.Now().Format("20060102"))
|
||||
}
|
||||
|
||||
if auto == 1 && len(starttimestr) == 0 {
|
||||
if auto == 1 && len(startTimeStr) == 0 {
|
||||
log.Fatalf("startTime must be set when auto is 1, the format is \"yyyy-MM-dd HH:mm:ss.SSS\" ")
|
||||
}
|
||||
|
||||
if len(starttimestr) != 0 {
|
||||
t, err := time.ParseInLocation(STARTTIME_LAYOUT, strings.TrimSpace(starttimestr), time.Local)
|
||||
if len(startTimeStr) != 0 {
|
||||
t, err := time.ParseInLocation(StartTimeLayout, strings.TrimSpace(startTimeStr), time.Local)
|
||||
if err != nil {
|
||||
log.Fatalf("param startTime %s error, %s\n", starttimestr, err)
|
||||
log.Fatalf("param startTime %s error, %s\n", startTimeStr, err)
|
||||
}
|
||||
|
||||
startTime = t.UnixNano() / 1e6 // as millisecond
|
||||
} else {
|
||||
startTime = DEFAULT_STARTTIME
|
||||
startTime = DefaultStartTime
|
||||
}
|
||||
|
||||
dataSourceName = fmt.Sprintf("%s:%s@/tcp(%s:%d)/", user, password, host, port)
|
||||
|
@ -154,9 +161,9 @@ func init() {
|
|||
|
||||
func main() {
|
||||
|
||||
importConfig := dataimport.LoadConfig(cfg)
|
||||
importConfig := dataImport.LoadConfig(cfg)
|
||||
|
||||
var caseMinumInterval int64 = -1
|
||||
var caseMinInterval int64 = -1
|
||||
|
||||
for _, userCase := range strings.Split(cases, ",") {
|
||||
caseConfig, ok := importConfig.UserCases[userCase]
|
||||
|
@ -168,7 +175,7 @@ func main() {
|
|||
|
||||
checkUserCaseConfig(userCase, &caseConfig)
|
||||
|
||||
//read file as map array
|
||||
// read file as map array
|
||||
fileRows := readFile(caseConfig)
|
||||
log.Printf("case [%s] sample data file contains %d rows.\n", userCase, len(fileRows.rows))
|
||||
|
||||
|
@ -177,31 +184,31 @@ func main() {
|
|||
continue
|
||||
}
|
||||
|
||||
_, exists := superTableConfigMap[caseConfig.Stname]
|
||||
_, exists := superTableConfigMap[caseConfig.StName]
|
||||
if !exists {
|
||||
superTableConfigMap[caseConfig.Stname] = &superTableConfig{config: caseConfig}
|
||||
superTableConfigMap[caseConfig.StName] = &superTableConfig{config: caseConfig}
|
||||
} else {
|
||||
log.Fatalf("the stname of case %s already exist.\n", caseConfig.Stname)
|
||||
log.Fatalf("the stname of case %s already exist.\n", caseConfig.StName)
|
||||
}
|
||||
|
||||
var start, cycleTime, avgInterval int64 = getSuperTableTimeConfig(fileRows)
|
||||
|
||||
// set super table's startTime, cycleTime and avgInterval
|
||||
superTableConfigMap[caseConfig.Stname].startTime = start
|
||||
superTableConfigMap[caseConfig.Stname].avgInterval = avgInterval
|
||||
superTableConfigMap[caseConfig.Stname].cycleTime = cycleTime
|
||||
superTableConfigMap[caseConfig.StName].startTime = start
|
||||
superTableConfigMap[caseConfig.StName].cycleTime = cycleTime
|
||||
superTableConfigMap[caseConfig.StName].avgInterval = avgInterval
|
||||
|
||||
if caseMinumInterval == -1 || caseMinumInterval > avgInterval {
|
||||
caseMinumInterval = avgInterval
|
||||
if caseMinInterval == -1 || caseMinInterval > avgInterval {
|
||||
caseMinInterval = avgInterval
|
||||
}
|
||||
|
||||
startStr := time.Unix(0, start*int64(time.Millisecond)).Format(STARTTIME_LAYOUT)
|
||||
startStr := time.Unix(0, start*int64(time.Millisecond)).Format(StartTimeLayout)
|
||||
log.Printf("case [%s] startTime %s(%d), average dataInterval %d ms, cycleTime %d ms.\n", userCase, startStr, start, avgInterval, cycleTime)
|
||||
}
|
||||
|
||||
if DEFAULT_DELAY == delay {
|
||||
if DefaultDelay == delay {
|
||||
// default delay
|
||||
delay = caseMinumInterval / 2
|
||||
delay = caseMinInterval / 2
|
||||
if delay < 1 {
|
||||
delay = 1
|
||||
}
|
||||
|
@ -218,7 +225,7 @@ func main() {
|
|||
createSuperTable(superTableConfigMap)
|
||||
log.Printf("create %d superTable ,used %d ms.\n", superTableNum, time.Since(start)/1e6)
|
||||
|
||||
//create sub table
|
||||
// create sub table
|
||||
start = time.Now()
|
||||
createSubTable(subTableMap)
|
||||
log.Printf("create %d times of %d subtable ,all %d tables, used %d ms.\n", hnum, len(subTableMap), len(scaleTableMap), time.Since(start)/1e6)
|
||||
|
@ -278,7 +285,7 @@ func staticSpeed() {
|
|||
defer connection.Close()
|
||||
|
||||
if save == 1 {
|
||||
connection.Exec("use " + db)
|
||||
_, _ = connection.Exec("use " + db)
|
||||
_, err := connection.Exec("create table if not exists " + saveTable + "(ts timestamp, speed int)")
|
||||
if err != nil {
|
||||
log.Fatalf("create %s Table error: %s\n", saveTable, err)
|
||||
|
@ -294,12 +301,12 @@ func staticSpeed() {
|
|||
total := getTotalRows(successRows)
|
||||
currentSuccessRows := total - lastTotalRows
|
||||
|
||||
speed := currentSuccessRows * 1e9 / int64(usedTime)
|
||||
speed := currentSuccessRows * 1e9 / usedTime
|
||||
log.Printf("insert %d rows, used %d ms, speed %d rows/s", currentSuccessRows, usedTime/1e6, speed)
|
||||
|
||||
if save == 1 {
|
||||
insertSql := fmt.Sprintf("insert into %s values(%d, %d)", saveTable, currentTime.UnixNano()/1e6, speed)
|
||||
connection.Exec(insertSql)
|
||||
_, _ = connection.Exec(insertSql)
|
||||
}
|
||||
|
||||
lastStaticTime = currentTime
|
||||
|
@ -327,12 +334,13 @@ func getSuperTableTimeConfig(fileRows dataRows) (start, cycleTime, avgInterval i
|
|||
} else {
|
||||
|
||||
// use the sample data primary timestamp
|
||||
sort.Sort(fileRows) // sort the file data by the primarykey
|
||||
sort.Sort(fileRows) // sort the file data by the primaryKey
|
||||
minTime := getPrimaryKey(fileRows.rows[0][fileRows.config.Timestamp])
|
||||
maxTime := getPrimaryKey(fileRows.rows[len(fileRows.rows)-1][fileRows.config.Timestamp])
|
||||
|
||||
start = minTime // default startTime use the minTime
|
||||
if DEFAULT_STARTTIME != startTime {
|
||||
// 设置了start时间的话 按照start来
|
||||
if DefaultStartTime != startTime {
|
||||
start = startTime
|
||||
}
|
||||
|
||||
|
@ -350,31 +358,21 @@ func getSuperTableTimeConfig(fileRows dataRows) (start, cycleTime, avgInterval i
|
|||
return
|
||||
}
|
||||
|
||||
func createStatisticTable() {
|
||||
connection := getConnection()
|
||||
defer connection.Close()
|
||||
|
||||
_, err := connection.Exec("create table if not exist " + db + "." + saveTable + "(ts timestamp, speed int)")
|
||||
if err != nil {
|
||||
log.Fatalf("createStatisticTable error: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func createSubTable(subTableMaps map[string]*dataRows) {
|
||||
|
||||
connection := getConnection()
|
||||
defer connection.Close()
|
||||
|
||||
connection.Exec("use " + db)
|
||||
_, _ = connection.Exec("use " + db)
|
||||
|
||||
createTablePrefix := "create table if not exists "
|
||||
var buffer bytes.Buffer
|
||||
for subTableName := range subTableMaps {
|
||||
|
||||
superTableName := getSuperTableName(subTableMaps[subTableName].config.Stname)
|
||||
tagValues := subTableMaps[subTableName].rows[0] // the first rows values as tags
|
||||
superTableName := getSuperTableName(subTableMaps[subTableName].config.StName)
|
||||
firstRowValues := subTableMaps[subTableName].rows[0] // the first rows values as tags
|
||||
|
||||
buffers := bytes.Buffer{}
|
||||
// create table t using supertTable tags(...);
|
||||
// create table t using superTable tags(...);
|
||||
for i := 0; i < hnum; i++ {
|
||||
tableName := getScaleSubTableName(subTableName, i)
|
||||
|
||||
|
@ -384,21 +382,21 @@ func createSubTable(subTableMaps map[string]*dataRows) {
|
|||
}
|
||||
scaleTableNames = append(scaleTableNames, tableName)
|
||||
|
||||
buffers.WriteString(createTablePrefix)
|
||||
buffers.WriteString(tableName)
|
||||
buffers.WriteString(" using ")
|
||||
buffers.WriteString(superTableName)
|
||||
buffers.WriteString(" tags(")
|
||||
buffer.WriteString(createTablePrefix)
|
||||
buffer.WriteString(tableName)
|
||||
buffer.WriteString(" using ")
|
||||
buffer.WriteString(superTableName)
|
||||
buffer.WriteString(" tags(")
|
||||
for _, tag := range subTableMaps[subTableName].config.Tags {
|
||||
tagValue := fmt.Sprintf("%v", tagValues[strings.ToLower(tag.Name)])
|
||||
buffers.WriteString("'" + tagValue + "'")
|
||||
buffers.WriteString(",")
|
||||
tagValue := fmt.Sprintf("%v", firstRowValues[strings.ToLower(tag.Name)])
|
||||
buffer.WriteString("'" + tagValue + "'")
|
||||
buffer.WriteString(",")
|
||||
}
|
||||
buffers.Truncate(buffers.Len() - 1)
|
||||
buffers.WriteString(")")
|
||||
buffer.Truncate(buffer.Len() - 1)
|
||||
buffer.WriteString(")")
|
||||
|
||||
createTableSql := buffers.String()
|
||||
buffers.Reset()
|
||||
createTableSql := buffer.String()
|
||||
buffer.Reset()
|
||||
|
||||
//log.Printf("create table: %s\n", createTableSql)
|
||||
_, err := connection.Exec(createTableSql)
|
||||
|
@ -420,7 +418,7 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) {
|
|||
if err != nil {
|
||||
log.Fatalf("drop database error: %s\n", err)
|
||||
}
|
||||
log.Printf("dropDb: %s\n", dropDbSql)
|
||||
log.Printf("dropdb: %s\n", dropDbSql)
|
||||
}
|
||||
|
||||
createDbSql := "create database if not exists " + db + " " + dbparam
|
||||
|
@ -431,7 +429,7 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) {
|
|||
}
|
||||
log.Printf("createDb: %s\n", createDbSql)
|
||||
|
||||
connection.Exec("use " + db)
|
||||
_, _ = connection.Exec("use " + db)
|
||||
|
||||
prefix := "create table if not exists "
|
||||
var buffer bytes.Buffer
|
||||
|
@ -464,7 +462,7 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) {
|
|||
createSql := buffer.String()
|
||||
buffer.Reset()
|
||||
|
||||
//log.Printf("supertable: %s\n", createSql)
|
||||
//log.Printf("superTable: %s\n", createSql)
|
||||
_, err = connection.Exec(createSql)
|
||||
if err != nil {
|
||||
log.Fatalf("create supertable error: %s\n", err)
|
||||
|
@ -473,15 +471,15 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) {
|
|||
|
||||
}
|
||||
|
||||
func getScaleSubTableName(subTableName string, hnum int) string {
|
||||
if hnum == 0 {
|
||||
func getScaleSubTableName(subTableName string, hNum int) string {
|
||||
if hNum == 0 {
|
||||
return subTableName
|
||||
}
|
||||
return fmt.Sprintf("%s_%d", subTableName, hnum)
|
||||
return fmt.Sprintf("%s_%d", subTableName, hNum)
|
||||
}
|
||||
|
||||
func getSuperTableName(stname string) string {
|
||||
return SUPERTABLE_PREFIX + stname
|
||||
func getSuperTableName(stName string) string {
|
||||
return SuperTablePrefix + stName
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -499,7 +497,7 @@ func normalizationData(fileRows dataRows, minTime int64) int64 {
|
|||
|
||||
row[fileRows.config.Timestamp] = getPrimaryKey(row[fileRows.config.Timestamp]) - minTime
|
||||
|
||||
subTableName := getSubTableName(tableValue, fileRows.config.Stname)
|
||||
subTableName := getSubTableName(tableValue, fileRows.config.StName)
|
||||
|
||||
value, ok := subTableMap[subTableName]
|
||||
if !ok {
|
||||
|
@ -527,7 +525,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int
|
|||
continue
|
||||
}
|
||||
|
||||
subTableName := getSubTableName(tableValue, fileRows.config.Stname)
|
||||
subTableName := getSubTableName(tableValue, fileRows.config.StName)
|
||||
|
||||
value, ok := currSubTableMap[subTableName]
|
||||
if !ok {
|
||||
|
@ -543,7 +541,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int
|
|||
|
||||
}
|
||||
|
||||
var maxRows, tableRows int = 0, 0
|
||||
var maxRows, tableRows = 0, 0
|
||||
for tableName := range currSubTableMap {
|
||||
tableRows = len(currSubTableMap[tableName].rows)
|
||||
subTableMap[tableName] = currSubTableMap[tableName] // add to global subTableMap
|
||||
|
@ -556,7 +554,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int
|
|||
}
|
||||
|
||||
func getSubTableName(subTableValue string, superTableName string) string {
|
||||
return SUBTABLE_PREFIX + subTableValue + "_" + superTableName
|
||||
return SubTablePrefix + subTableValue + "_" + superTableName
|
||||
}
|
||||
|
||||
func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []int64) {
|
||||
|
@ -564,25 +562,25 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i
|
|||
defer connection.Close()
|
||||
defer wg.Done()
|
||||
|
||||
connection.Exec("use " + db) // use db
|
||||
_, _ = connection.Exec("use " + db) // use db
|
||||
|
||||
log.Printf("thread-%d start insert into [%d, %d) subtables.\n", threadIndex, start, end)
|
||||
|
||||
num := 0
|
||||
subTables := scaleTableNames[start:end]
|
||||
var buffer bytes.Buffer
|
||||
for {
|
||||
var currSuccessRows int64
|
||||
var appendRows int
|
||||
var lastTableName string
|
||||
|
||||
buffers := bytes.Buffer{}
|
||||
buffers.WriteString(INSERT_PREFIX)
|
||||
buffer.WriteString(InsertPrefix)
|
||||
|
||||
for _, tableName := range subTables {
|
||||
|
||||
subTableInfo := subTableMap[scaleTableMap[tableName].subTableName]
|
||||
subTableRows := int64(len(subTableInfo.rows))
|
||||
superTableConf := superTableConfigMap[subTableInfo.config.Stname]
|
||||
superTableConf := superTableConfigMap[subTableInfo.config.StName]
|
||||
|
||||
tableStartTime := superTableConf.startTime
|
||||
var tableEndTime int64
|
||||
|
@ -605,40 +603,35 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i
|
|||
// append
|
||||
|
||||
if lastTableName != tableName {
|
||||
buffers.WriteString(tableName)
|
||||
buffers.WriteString(" values")
|
||||
buffer.WriteString(tableName)
|
||||
buffer.WriteString(" values")
|
||||
}
|
||||
lastTableName = tableName
|
||||
|
||||
buffers.WriteString("(")
|
||||
buffers.WriteString(fmt.Sprintf("%v", currentTime))
|
||||
buffers.WriteString(",")
|
||||
buffer.WriteString("(")
|
||||
buffer.WriteString(fmt.Sprintf("%v", currentTime))
|
||||
buffer.WriteString(",")
|
||||
|
||||
// fieldNum := len(subTableInfo.config.Fields)
|
||||
for _, field := range subTableInfo.config.Fields {
|
||||
buffers.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)]))
|
||||
buffers.WriteString(",")
|
||||
// if( i != fieldNum -1){
|
||||
|
||||
// }
|
||||
buffer.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)]))
|
||||
buffer.WriteString(",")
|
||||
}
|
||||
|
||||
buffers.Truncate(buffers.Len() - 1)
|
||||
buffers.WriteString(") ")
|
||||
buffer.Truncate(buffer.Len() - 1)
|
||||
buffer.WriteString(") ")
|
||||
|
||||
appendRows++
|
||||
insertRows++
|
||||
if appendRows == batch {
|
||||
// executebatch
|
||||
insertSql := buffers.String()
|
||||
connection.Exec("use " + db)
|
||||
// executeBatch
|
||||
insertSql := buffer.String()
|
||||
affectedRows := executeBatchInsert(insertSql, connection)
|
||||
|
||||
successRows[threadIndex] += affectedRows
|
||||
currSuccessRows += affectedRows
|
||||
|
||||
buffers.Reset()
|
||||
buffers.WriteString(INSERT_PREFIX)
|
||||
buffer.Reset()
|
||||
buffer.WriteString(InsertPrefix)
|
||||
lastTableName = ""
|
||||
appendRows = 0
|
||||
}
|
||||
|
@ -654,15 +647,14 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i
|
|||
|
||||
// left := len(rows)
|
||||
if appendRows > 0 {
|
||||
// executebatch
|
||||
insertSql := buffers.String()
|
||||
connection.Exec("use " + db)
|
||||
// executeBatch
|
||||
insertSql := buffer.String()
|
||||
affectedRows := executeBatchInsert(insertSql, connection)
|
||||
|
||||
successRows[threadIndex] += affectedRows
|
||||
currSuccessRows += affectedRows
|
||||
|
||||
buffers.Reset()
|
||||
buffer.Reset()
|
||||
}
|
||||
|
||||
// log.Printf("thread-%d finished insert %d rows, used %d ms.", threadIndex, currSuccessRows, time.Since(threadStartTime)/1e6)
|
||||
|
@ -688,65 +680,10 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i
|
|||
|
||||
}
|
||||
|
||||
func buildSql(rows []tableRows) string {
|
||||
|
||||
var lastTableName string
|
||||
|
||||
buffers := bytes.Buffer{}
|
||||
|
||||
for i, row := range rows {
|
||||
if i == 0 {
|
||||
lastTableName = row.tableName
|
||||
buffers.WriteString(INSERT_PREFIX)
|
||||
buffers.WriteString(row.tableName)
|
||||
buffers.WriteString(" values")
|
||||
buffers.WriteString(row.value)
|
||||
continue
|
||||
}
|
||||
|
||||
if lastTableName == row.tableName {
|
||||
buffers.WriteString(row.value)
|
||||
} else {
|
||||
buffers.WriteString(" ")
|
||||
buffers.WriteString(row.tableName)
|
||||
buffers.WriteString(" values")
|
||||
buffers.WriteString(row.value)
|
||||
lastTableName = row.tableName
|
||||
}
|
||||
}
|
||||
|
||||
inserSql := buffers.String()
|
||||
return inserSql
|
||||
}
|
||||
|
||||
func buildRow(tableName string, currentTime int64, subTableInfo *dataRows, currentRow map[string]interface{}) tableRows {
|
||||
|
||||
tableRows := tableRows{tableName: tableName}
|
||||
|
||||
buffers := bytes.Buffer{}
|
||||
|
||||
buffers.WriteString("(")
|
||||
buffers.WriteString(fmt.Sprintf("%v", currentTime))
|
||||
buffers.WriteString(",")
|
||||
|
||||
for _, field := range subTableInfo.config.Fields {
|
||||
buffers.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)]))
|
||||
buffers.WriteString(",")
|
||||
}
|
||||
|
||||
buffers.Truncate(buffers.Len() - 1)
|
||||
buffers.WriteString(")")
|
||||
|
||||
insertSql := buffers.String()
|
||||
tableRows.value = insertSql
|
||||
|
||||
return tableRows
|
||||
}
|
||||
|
||||
func executeBatchInsert(insertSql string, connection *sql.DB) int64 {
|
||||
result, error := connection.Exec(insertSql)
|
||||
if error != nil {
|
||||
log.Printf("execute insertSql %s error, %s\n", insertSql, error)
|
||||
result, err := connection.Exec(insertSql)
|
||||
if err != nil {
|
||||
log.Printf("execute insertSql %s error, %s\n", insertSql, err)
|
||||
return 0
|
||||
}
|
||||
affected, _ := result.RowsAffected()
|
||||
|
@ -754,7 +691,6 @@ func executeBatchInsert(insertSql string, connection *sql.DB) int64 {
|
|||
affected = 0
|
||||
}
|
||||
return affected
|
||||
// return 0
|
||||
}
|
||||
|
||||
func getFieldValue(fieldValue interface{}) string {
|
||||
|
@ -762,7 +698,7 @@ func getFieldValue(fieldValue interface{}) string {
|
|||
}
|
||||
|
||||
func getConnection() *sql.DB {
|
||||
db, err := sql.Open(DRIVER_NAME, dataSourceName)
|
||||
db, err := sql.Open(DriverName, dataSourceName)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -773,19 +709,11 @@ func getSubTableNameValue(suffix interface{}) string {
|
|||
return fmt.Sprintf("%v", suffix)
|
||||
}
|
||||
|
||||
func hash(s string) int {
|
||||
v := int(crc32.ChecksumIEEE([]byte(s)))
|
||||
if v < 0 {
|
||||
return -v
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func readFile(config dataimport.CaseConfig) dataRows {
|
||||
func readFile(config dataImport.CaseConfig) dataRows {
|
||||
fileFormat := strings.ToLower(config.Format)
|
||||
if fileFormat == JSON_FORMAT {
|
||||
if fileFormat == JsonFormat {
|
||||
return readJSONFile(config)
|
||||
} else if fileFormat == CSV_FORMAT {
|
||||
} else if fileFormat == CsvFormat {
|
||||
return readCSVFile(config)
|
||||
}
|
||||
|
||||
|
@ -793,7 +721,7 @@ func readFile(config dataimport.CaseConfig) dataRows {
|
|||
return dataRows{}
|
||||
}
|
||||
|
||||
func readCSVFile(config dataimport.CaseConfig) dataRows {
|
||||
func readCSVFile(config dataImport.CaseConfig) dataRows {
|
||||
var rows dataRows
|
||||
f, err := os.Open(config.FilePath)
|
||||
if err != nil {
|
||||
|
@ -813,7 +741,7 @@ func readCSVFile(config dataimport.CaseConfig) dataRows {
|
|||
line := strings.ToLower(string(lineBytes))
|
||||
titles := strings.Split(line, config.Separator)
|
||||
if len(titles) < 3 {
|
||||
// need suffix、 primarykey and at least one other field
|
||||
// need suffix、 primaryKey and at least one other field
|
||||
log.Printf("the first line of file %s should be title row, and at least 3 field.\n", config.FilePath)
|
||||
return rows
|
||||
}
|
||||
|
@ -848,7 +776,7 @@ func readCSVFile(config dataimport.CaseConfig) dataRows {
|
|||
}
|
||||
|
||||
// if the primary key valid
|
||||
primaryKeyValue := getPrimaryKeyMillisec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, dataMap)
|
||||
primaryKeyValue := getPrimaryKeyMilliSec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, dataMap)
|
||||
if primaryKeyValue == -1 {
|
||||
log.Printf("the Timestamp[%s] of line %d is not valid, will filtered.\n", config.Timestamp, lineNum)
|
||||
continue
|
||||
|
@ -861,7 +789,7 @@ func readCSVFile(config dataimport.CaseConfig) dataRows {
|
|||
return rows
|
||||
}
|
||||
|
||||
func readJSONFile(config dataimport.CaseConfig) dataRows {
|
||||
func readJSONFile(config dataImport.CaseConfig) dataRows {
|
||||
|
||||
var rows dataRows
|
||||
f, err := os.Open(config.FilePath)
|
||||
|
@ -899,7 +827,7 @@ func readJSONFile(config dataimport.CaseConfig) dataRows {
|
|||
continue
|
||||
}
|
||||
|
||||
primaryKeyValue := getPrimaryKeyMillisec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, line)
|
||||
primaryKeyValue := getPrimaryKeyMilliSec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, line)
|
||||
if primaryKeyValue == -1 {
|
||||
log.Printf("the Timestamp[%s] of line %d is not valid, will filtered.\n", config.Timestamp, lineNum)
|
||||
continue
|
||||
|
@ -916,7 +844,7 @@ func readJSONFile(config dataimport.CaseConfig) dataRows {
|
|||
/**
|
||||
* get primary key as millisecond , otherwise return -1
|
||||
*/
|
||||
func getPrimaryKeyMillisec(key string, valueType string, valueFormat string, line map[string]interface{}) int64 {
|
||||
func getPrimaryKeyMilliSec(key string, valueType string, valueFormat string, line map[string]interface{}) int64 {
|
||||
if !existMapKeyAndNotEmpty(key, line) {
|
||||
return -1
|
||||
}
|
||||
|
@ -971,13 +899,13 @@ func existMapKeyAndNotEmpty(key string, maps map[string]interface{}) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func checkUserCaseConfig(caseName string, caseConfig *dataimport.CaseConfig) {
|
||||
func checkUserCaseConfig(caseName string, caseConfig *dataImport.CaseConfig) {
|
||||
|
||||
if len(caseConfig.Stname) == 0 {
|
||||
if len(caseConfig.StName) == 0 {
|
||||
log.Fatalf("the stname of case %s can't be empty\n", caseName)
|
||||
}
|
||||
|
||||
caseConfig.Stname = strings.ToLower(caseConfig.Stname)
|
||||
caseConfig.StName = strings.ToLower(caseConfig.StName)
|
||||
|
||||
if len(caseConfig.Tags) == 0 {
|
||||
log.Fatalf("the tags of case %s can't be empty\n", caseName)
|
||||
|
@ -1029,24 +957,24 @@ func checkUserCaseConfig(caseName string, caseConfig *dataimport.CaseConfig) {
|
|||
}
|
||||
|
||||
func parseArg() {
|
||||
flag.StringVar(&cfg, "cfg", "config/cfg.toml", "configuration file which describes usecase and data format.")
|
||||
flag.StringVar(&cases, "cases", "sensor_info", "usecase for dataset to be imported. Multiple choices can be separated by comma, for example, -cases sensor_info,camera_detection.")
|
||||
flag.StringVar(&cfg, "cfg", "config/cfg.toml", "configuration file which describes useCase and data format.")
|
||||
flag.StringVar(&cases, "cases", "sensor_info", "useCase for dataset to be imported. Multiple choices can be separated by comma, for example, -cases sensor_info,camera_detection.")
|
||||
flag.IntVar(&hnum, "hnum", 100, "magnification factor of the sample tables. For example, if hnum is 100 and in the sample data there are 10 tables, then 10x100=1000 tables will be created in the database.")
|
||||
flag.IntVar(&vnum, "vnum", 1000, "copies of the sample records in each table. If set to 0,this program will never stop simulating and importing data even if the timestamp has passed current time.")
|
||||
flag.Int64Var(&delay, "delay", DEFAULT_DELAY, "the delay time interval(millisecond) to continue generating data when vnum set 0.")
|
||||
flag.Int64Var(&delay, "delay", DefaultDelay, "the delay time interval(millisecond) to continue generating data when vnum set 0.")
|
||||
flag.Int64Var(&tick, "tick", 2000, "the tick time interval(millisecond) to print statistic info.")
|
||||
flag.IntVar(&save, "save", 0, "whether to save the statistical info into 'statistic' table. 0 is disabled and 1 is enabled.")
|
||||
flag.StringVar(&saveTable, "savetb", DEFAULT_STATISTIC_TABLE, "the table to save 'statistic' info when save set 1.")
|
||||
flag.StringVar(&saveTable, "savetb", DefaultStatisticTable, "the table to save 'statistic' info when save set 1.")
|
||||
flag.IntVar(&thread, "thread", 10, "number of threads to import data.")
|
||||
flag.IntVar(&batch, "batch", 100, "rows of records in one import batch.")
|
||||
flag.IntVar(&auto, "auto", 0, "whether to use the starttime and interval specified by users when simulating the data. 0 is disabled and 1 is enabled.")
|
||||
flag.StringVar(&starttimestr, "start", "", "the starting timestamp of simulated data, in the format of yyyy-MM-dd HH:mm:ss.SSS. If not specified, the ealiest timestamp in the sample data will be set as the starttime.")
|
||||
flag.Int64Var(&interval, "interval", DEFAULT_INTERVAL, "time inteval between two consecutive records, in the unit of millisecond. Only valid when auto is 1.")
|
||||
flag.IntVar(&auto, "auto", 0, "whether to use the startTime and interval specified by users when simulating the data. 0 is disabled and 1 is enabled.")
|
||||
flag.StringVar(&startTimeStr, "start", "", "the starting timestamp of simulated data, in the format of yyyy-MM-dd HH:mm:ss.SSS. If not specified, the earliest timestamp in the sample data will be set as the startTime.")
|
||||
flag.Int64Var(&interval, "interval", DefaultInterval, "time interval between two consecutive records, in the unit of millisecond. Only valid when auto is 1.")
|
||||
flag.StringVar(&host, "host", "127.0.0.1", "tdengine server ip.")
|
||||
flag.IntVar(&port, "port", 6030, "tdengine server port.")
|
||||
flag.StringVar(&user, "user", "root", "user name to login into the database.")
|
||||
flag.StringVar(&password, "password", "taosdata", "the import tdengine user password")
|
||||
flag.IntVar(&dropdb, "dropdb", 0, "whether to drop the existing datbase. 1 is yes and 0 otherwise.")
|
||||
flag.IntVar(&dropdb, "dropdb", 0, "whether to drop the existing database. 1 is yes and 0 otherwise.")
|
||||
flag.StringVar(&db, "db", "", "name of the database to store data.")
|
||||
flag.StringVar(&dbparam, "dbparam", "", "database configurations when it is created.")
|
||||
|
||||
|
@ -1066,7 +994,7 @@ func printArg() {
|
|||
fmt.Println("-thread:", thread)
|
||||
fmt.Println("-batch:", batch)
|
||||
fmt.Println("-auto:", auto)
|
||||
fmt.Println("-start:", starttimestr)
|
||||
fmt.Println("-start:", startTimeStr)
|
||||
fmt.Println("-interval:", interval)
|
||||
fmt.Println("-host:", host)
|
||||
fmt.Println("-port", port)
|
||||
|
|
|
@ -899,103 +899,103 @@ devid,location,color,devgroup,ts,temperature,humidity
|
|||
8, haerbing, yellow, 2, 1575129697000, 31, 16.321497
|
||||
8, haerbing, yellow, 2, 1575129698000, 25, 15.864515
|
||||
8, haerbing, yellow, 2, 1575129699000, 25, 16.492443
|
||||
9, sijiazhuang, blue, 0, 1575129600000, 23, 16.002889
|
||||
9, sijiazhuang, blue, 0, 1575129601000, 26, 17.034610
|
||||
9, sijiazhuang, blue, 0, 1575129602000, 29, 12.892319
|
||||
9, sijiazhuang, blue, 0, 1575129603000, 34, 15.321807
|
||||
9, sijiazhuang, blue, 0, 1575129604000, 29, 12.562642
|
||||
9, sijiazhuang, blue, 0, 1575129605000, 32, 17.190246
|
||||
9, sijiazhuang, blue, 0, 1575129606000, 19, 15.361774
|
||||
9, sijiazhuang, blue, 0, 1575129607000, 26, 15.022364
|
||||
9, sijiazhuang, blue, 0, 1575129608000, 31, 14.837084
|
||||
9, sijiazhuang, blue, 0, 1575129609000, 25, 11.554289
|
||||
9, sijiazhuang, blue, 0, 1575129610000, 21, 15.313973
|
||||
9, sijiazhuang, blue, 0, 1575129611000, 27, 18.621783
|
||||
9, sijiazhuang, blue, 0, 1575129612000, 31, 18.018101
|
||||
9, sijiazhuang, blue, 0, 1575129613000, 23, 14.421450
|
||||
9, sijiazhuang, blue, 0, 1575129614000, 28, 10.833142
|
||||
9, sijiazhuang, blue, 0, 1575129615000, 33, 18.169837
|
||||
9, sijiazhuang, blue, 0, 1575129616000, 21, 18.772730
|
||||
9, sijiazhuang, blue, 0, 1575129617000, 24, 18.893146
|
||||
9, sijiazhuang, blue, 0, 1575129618000, 24, 10.290187
|
||||
9, sijiazhuang, blue, 0, 1575129619000, 23, 17.393345
|
||||
9, sijiazhuang, blue, 0, 1575129620000, 30, 12.949215
|
||||
9, sijiazhuang, blue, 0, 1575129621000, 19, 19.267621
|
||||
9, sijiazhuang, blue, 0, 1575129622000, 33, 14.831735
|
||||
9, sijiazhuang, blue, 0, 1575129623000, 21, 14.711125
|
||||
9, sijiazhuang, blue, 0, 1575129624000, 16, 17.168485
|
||||
9, sijiazhuang, blue, 0, 1575129625000, 17, 16.426433
|
||||
9, sijiazhuang, blue, 0, 1575129626000, 19, 13.879050
|
||||
9, sijiazhuang, blue, 0, 1575129627000, 21, 18.308168
|
||||
9, sijiazhuang, blue, 0, 1575129628000, 17, 10.845681
|
||||
9, sijiazhuang, blue, 0, 1575129629000, 20, 10.238272
|
||||
9, sijiazhuang, blue, 0, 1575129630000, 19, 19.424976
|
||||
9, sijiazhuang, blue, 0, 1575129631000, 31, 13.885909
|
||||
9, sijiazhuang, blue, 0, 1575129632000, 15, 19.264740
|
||||
9, sijiazhuang, blue, 0, 1575129633000, 30, 12.460645
|
||||
9, sijiazhuang, blue, 0, 1575129634000, 27, 17.608036
|
||||
9, sijiazhuang, blue, 0, 1575129635000, 25, 13.493812
|
||||
9, sijiazhuang, blue, 0, 1575129636000, 19, 10.955939
|
||||
9, sijiazhuang, blue, 0, 1575129637000, 24, 11.956587
|
||||
9, sijiazhuang, blue, 0, 1575129638000, 15, 19.141381
|
||||
9, sijiazhuang, blue, 0, 1575129639000, 24, 14.801530
|
||||
9, sijiazhuang, blue, 0, 1575129640000, 17, 14.347318
|
||||
9, sijiazhuang, blue, 0, 1575129641000, 29, 14.803237
|
||||
9, sijiazhuang, blue, 0, 1575129642000, 28, 10.342297
|
||||
9, sijiazhuang, blue, 0, 1575129643000, 29, 19.368282
|
||||
9, sijiazhuang, blue, 0, 1575129644000, 31, 17.491654
|
||||
9, sijiazhuang, blue, 0, 1575129645000, 18, 13.161736
|
||||
9, sijiazhuang, blue, 0, 1575129646000, 17, 16.067354
|
||||
9, sijiazhuang, blue, 0, 1575129647000, 18, 13.736465
|
||||
9, sijiazhuang, blue, 0, 1575129648000, 23, 19.103276
|
||||
9, sijiazhuang, blue, 0, 1575129649000, 29, 16.075892
|
||||
9, sijiazhuang, blue, 0, 1575129650000, 21, 10.728566
|
||||
9, sijiazhuang, blue, 0, 1575129651000, 15, 18.921849
|
||||
9, sijiazhuang, blue, 0, 1575129652000, 24, 16.914709
|
||||
9, sijiazhuang, blue, 0, 1575129653000, 19, 13.501651
|
||||
9, sijiazhuang, blue, 0, 1575129654000, 19, 13.538347
|
||||
9, sijiazhuang, blue, 0, 1575129655000, 16, 13.261095
|
||||
9, sijiazhuang, blue, 0, 1575129656000, 32, 16.315746
|
||||
9, sijiazhuang, blue, 0, 1575129657000, 27, 16.400939
|
||||
9, sijiazhuang, blue, 0, 1575129658000, 24, 13.321819
|
||||
9, sijiazhuang, blue, 0, 1575129659000, 27, 19.070181
|
||||
9, sijiazhuang, blue, 0, 1575129660000, 27, 13.040922
|
||||
9, sijiazhuang, blue, 0, 1575129661000, 32, 10.872530
|
||||
9, sijiazhuang, blue, 0, 1575129662000, 28, 16.428657
|
||||
9, sijiazhuang, blue, 0, 1575129663000, 32, 13.883854
|
||||
9, sijiazhuang, blue, 0, 1575129664000, 33, 14.299554
|
||||
9, sijiazhuang, blue, 0, 1575129665000, 30, 16.445130
|
||||
9, sijiazhuang, blue, 0, 1575129666000, 15, 18.059404
|
||||
9, sijiazhuang, blue, 0, 1575129667000, 21, 12.348847
|
||||
9, sijiazhuang, blue, 0, 1575129668000, 32, 13.315378
|
||||
9, sijiazhuang, blue, 0, 1575129669000, 17, 15.689507
|
||||
9, sijiazhuang, blue, 0, 1575129670000, 22, 15.591808
|
||||
9, sijiazhuang, blue, 0, 1575129671000, 27, 16.386065
|
||||
9, sijiazhuang, blue, 0, 1575129672000, 25, 10.564803
|
||||
9, sijiazhuang, blue, 0, 1575129673000, 20, 12.276544
|
||||
9, sijiazhuang, blue, 0, 1575129674000, 26, 15.828786
|
||||
9, sijiazhuang, blue, 0, 1575129675000, 18, 12.236420
|
||||
9, sijiazhuang, blue, 0, 1575129676000, 15, 19.439522
|
||||
9, sijiazhuang, blue, 0, 1575129677000, 19, 19.831531
|
||||
9, sijiazhuang, blue, 0, 1575129678000, 22, 17.115744
|
||||
9, sijiazhuang, blue, 0, 1575129679000, 29, 19.879456
|
||||
9, sijiazhuang, blue, 0, 1575129680000, 34, 10.207136
|
||||
9, sijiazhuang, blue, 0, 1575129681000, 16, 17.633523
|
||||
9, sijiazhuang, blue, 0, 1575129682000, 15, 14.227873
|
||||
9, sijiazhuang, blue, 0, 1575129683000, 34, 12.027768
|
||||
9, sijiazhuang, blue, 0, 1575129684000, 22, 11.376610
|
||||
9, sijiazhuang, blue, 0, 1575129685000, 21, 11.711299
|
||||
9, sijiazhuang, blue, 0, 1575129686000, 33, 14.281126
|
||||
9, sijiazhuang, blue, 0, 1575129687000, 31, 10.895302
|
||||
9, sijiazhuang, blue, 0, 1575129688000, 31, 13.971350
|
||||
9, sijiazhuang, blue, 0, 1575129689000, 15, 15.262790
|
||||
9, sijiazhuang, blue, 0, 1575129690000, 23, 12.440568
|
||||
9, sijiazhuang, blue, 0, 1575129691000, 32, 19.731267
|
||||
9, sijiazhuang, blue, 0, 1575129692000, 22, 10.518092
|
||||
9, sijiazhuang, blue, 0, 1575129693000, 34, 17.863021
|
||||
9, sijiazhuang, blue, 0, 1575129694000, 28, 11.478909
|
||||
9, sijiazhuang, blue, 0, 1575129695000, 16, 15.075524
|
||||
9, sijiazhuang, blue, 0, 1575129696000, 16, 10.292127
|
||||
9, sijiazhuang, blue, 0, 1575129697000, 22, 13.716012
|
||||
9, sijiazhuang, blue, 0, 1575129698000, 32, 10.906551
|
||||
9, sijiazhuang, blue, 0, 1575129699000, 19, 18.386868
|
||||
9, shijiazhuang, blue, 0, 1575129600000, 23, 16.002889
|
||||
9, shijiazhuang, blue, 0, 1575129601000, 26, 17.034610
|
||||
9, shijiazhuang, blue, 0, 1575129602000, 29, 12.892319
|
||||
9, shijiazhuang, blue, 0, 1575129603000, 34, 15.321807
|
||||
9, shijiazhuang, blue, 0, 1575129604000, 29, 12.562642
|
||||
9, shijiazhuang, blue, 0, 1575129605000, 32, 17.190246
|
||||
9, shijiazhuang, blue, 0, 1575129606000, 19, 15.361774
|
||||
9, shijiazhuang, blue, 0, 1575129607000, 26, 15.022364
|
||||
9, shijiazhuang, blue, 0, 1575129608000, 31, 14.837084
|
||||
9, shijiazhuang, blue, 0, 1575129609000, 25, 11.554289
|
||||
9, shijiazhuang, blue, 0, 1575129610000, 21, 15.313973
|
||||
9, shijiazhuang, blue, 0, 1575129611000, 27, 18.621783
|
||||
9, shijiazhuang, blue, 0, 1575129612000, 31, 18.018101
|
||||
9, shijiazhuang, blue, 0, 1575129613000, 23, 14.421450
|
||||
9, shijiazhuang, blue, 0, 1575129614000, 28, 10.833142
|
||||
9, shijiazhuang, blue, 0, 1575129615000, 33, 18.169837
|
||||
9, shijiazhuang, blue, 0, 1575129616000, 21, 18.772730
|
||||
9, shijiazhuang, blue, 0, 1575129617000, 24, 18.893146
|
||||
9, shijiazhuang, blue, 0, 1575129618000, 24, 10.290187
|
||||
9, shijiazhuang, blue, 0, 1575129619000, 23, 17.393345
|
||||
9, shijiazhuang, blue, 0, 1575129620000, 30, 12.949215
|
||||
9, shijiazhuang, blue, 0, 1575129621000, 19, 19.267621
|
||||
9, shijiazhuang, blue, 0, 1575129622000, 33, 14.831735
|
||||
9, shijiazhuang, blue, 0, 1575129623000, 21, 14.711125
|
||||
9, shijiazhuang, blue, 0, 1575129624000, 16, 17.168485
|
||||
9, shijiazhuang, blue, 0, 1575129625000, 17, 16.426433
|
||||
9, shijiazhuang, blue, 0, 1575129626000, 19, 13.879050
|
||||
9, shijiazhuang, blue, 0, 1575129627000, 21, 18.308168
|
||||
9, shijiazhuang, blue, 0, 1575129628000, 17, 10.845681
|
||||
9, shijiazhuang, blue, 0, 1575129629000, 20, 10.238272
|
||||
9, shijiazhuang, blue, 0, 1575129630000, 19, 19.424976
|
||||
9, shijiazhuang, blue, 0, 1575129631000, 31, 13.885909
|
||||
9, shijiazhuang, blue, 0, 1575129632000, 15, 19.264740
|
||||
9, shijiazhuang, blue, 0, 1575129633000, 30, 12.460645
|
||||
9, shijiazhuang, blue, 0, 1575129634000, 27, 17.608036
|
||||
9, shijiazhuang, blue, 0, 1575129635000, 25, 13.493812
|
||||
9, shijiazhuang, blue, 0, 1575129636000, 19, 10.955939
|
||||
9, shijiazhuang, blue, 0, 1575129637000, 24, 11.956587
|
||||
9, shijiazhuang, blue, 0, 1575129638000, 15, 19.141381
|
||||
9, shijiazhuang, blue, 0, 1575129639000, 24, 14.801530
|
||||
9, shijiazhuang, blue, 0, 1575129640000, 17, 14.347318
|
||||
9, shijiazhuang, blue, 0, 1575129641000, 29, 14.803237
|
||||
9, shijiazhuang, blue, 0, 1575129642000, 28, 10.342297
|
||||
9, shijiazhuang, blue, 0, 1575129643000, 29, 19.368282
|
||||
9, shijiazhuang, blue, 0, 1575129644000, 31, 17.491654
|
||||
9, shijiazhuang, blue, 0, 1575129645000, 18, 13.161736
|
||||
9, shijiazhuang, blue, 0, 1575129646000, 17, 16.067354
|
||||
9, shijiazhuang, blue, 0, 1575129647000, 18, 13.736465
|
||||
9, shijiazhuang, blue, 0, 1575129648000, 23, 19.103276
|
||||
9, shijiazhuang, blue, 0, 1575129649000, 29, 16.075892
|
||||
9, shijiazhuang, blue, 0, 1575129650000, 21, 10.728566
|
||||
9, shijiazhuang, blue, 0, 1575129651000, 15, 18.921849
|
||||
9, shijiazhuang, blue, 0, 1575129652000, 24, 16.914709
|
||||
9, shijiazhuang, blue, 0, 1575129653000, 19, 13.501651
|
||||
9, shijiazhuang, blue, 0, 1575129654000, 19, 13.538347
|
||||
9, shijiazhuang, blue, 0, 1575129655000, 16, 13.261095
|
||||
9, shijiazhuang, blue, 0, 1575129656000, 32, 16.315746
|
||||
9, shijiazhuang, blue, 0, 1575129657000, 27, 16.400939
|
||||
9, shijiazhuang, blue, 0, 1575129658000, 24, 13.321819
|
||||
9, shijiazhuang, blue, 0, 1575129659000, 27, 19.070181
|
||||
9, shijiazhuang, blue, 0, 1575129660000, 27, 13.040922
|
||||
9, shijiazhuang, blue, 0, 1575129661000, 32, 10.872530
|
||||
9, shijiazhuang, blue, 0, 1575129662000, 28, 16.428657
|
||||
9, shijiazhuang, blue, 0, 1575129663000, 32, 13.883854
|
||||
9, shijiazhuang, blue, 0, 1575129664000, 33, 14.299554
|
||||
9, shijiazhuang, blue, 0, 1575129665000, 30, 16.445130
|
||||
9, shijiazhuang, blue, 0, 1575129666000, 15, 18.059404
|
||||
9, shijiazhuang, blue, 0, 1575129667000, 21, 12.348847
|
||||
9, shijiazhuang, blue, 0, 1575129668000, 32, 13.315378
|
||||
9, shijiazhuang, blue, 0, 1575129669000, 17, 15.689507
|
||||
9, shijiazhuang, blue, 0, 1575129670000, 22, 15.591808
|
||||
9, shijiazhuang, blue, 0, 1575129671000, 27, 16.386065
|
||||
9, shijiazhuang, blue, 0, 1575129672000, 25, 10.564803
|
||||
9, shijiazhuang, blue, 0, 1575129673000, 20, 12.276544
|
||||
9, shijiazhuang, blue, 0, 1575129674000, 26, 15.828786
|
||||
9, shijiazhuang, blue, 0, 1575129675000, 18, 12.236420
|
||||
9, shijiazhuang, blue, 0, 1575129676000, 15, 19.439522
|
||||
9, shijiazhuang, blue, 0, 1575129677000, 19, 19.831531
|
||||
9, shijiazhuang, blue, 0, 1575129678000, 22, 17.115744
|
||||
9, shijiazhuang, blue, 0, 1575129679000, 29, 19.879456
|
||||
9, shijiazhuang, blue, 0, 1575129680000, 34, 10.207136
|
||||
9, shijiazhuang, blue, 0, 1575129681000, 16, 17.633523
|
||||
9, shijiazhuang, blue, 0, 1575129682000, 15, 14.227873
|
||||
9, shijiazhuang, blue, 0, 1575129683000, 34, 12.027768
|
||||
9, shijiazhuang, blue, 0, 1575129684000, 22, 11.376610
|
||||
9, shijiazhuang, blue, 0, 1575129685000, 21, 11.711299
|
||||
9, shijiazhuang, blue, 0, 1575129686000, 33, 14.281126
|
||||
9, shijiazhuang, blue, 0, 1575129687000, 31, 10.895302
|
||||
9, shijiazhuang, blue, 0, 1575129688000, 31, 13.971350
|
||||
9, shijiazhuang, blue, 0, 1575129689000, 15, 15.262790
|
||||
9, shijiazhuang, blue, 0, 1575129690000, 23, 12.440568
|
||||
9, shijiazhuang, blue, 0, 1575129691000, 32, 19.731267
|
||||
9, shijiazhuang, blue, 0, 1575129692000, 22, 10.518092
|
||||
9, shijiazhuang, blue, 0, 1575129693000, 34, 17.863021
|
||||
9, shijiazhuang, blue, 0, 1575129694000, 28, 11.478909
|
||||
9, shijiazhuang, blue, 0, 1575129695000, 16, 15.075524
|
||||
9, shijiazhuang, blue, 0, 1575129696000, 16, 10.292127
|
||||
9, shijiazhuang, blue, 0, 1575129697000, 22, 13.716012
|
||||
9, shijiazhuang, blue, 0, 1575129698000, 32, 10.906551
|
||||
9, shijiazhuang, blue, 0, 1575129699000, 19, 18.386868
|
|
|
@ -0,0 +1,8 @@
|
|||
module github.com/taosdata/TDengine/importSampleData
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/pelletier/go-toml v1.9.0 // indirect
|
||||
github.com/taosdata/driver-go v0.0.0-20210415143420-d99751356e28 // indirect
|
||||
)
|
|
@ -14,23 +14,23 @@ var (
|
|||
once sync.Once
|
||||
)
|
||||
|
||||
// Config inclue all scene import config
|
||||
// Config include all scene import config
|
||||
type Config struct {
|
||||
UserCases map[string]CaseConfig
|
||||
}
|
||||
|
||||
// CaseConfig include the sample data config and tdengine config
|
||||
type CaseConfig struct {
|
||||
Format string
|
||||
FilePath string
|
||||
Separator string
|
||||
Stname string
|
||||
SubTableName string
|
||||
Timestamp string
|
||||
TimestampType string
|
||||
TimestampTypeFormat string
|
||||
Tags []FieldInfo
|
||||
Fields []FieldInfo
|
||||
Format string
|
||||
FilePath string
|
||||
Separator string
|
||||
StName string
|
||||
SubTableName string
|
||||
Timestamp string
|
||||
TimestampType string
|
||||
TimestampTypeFormat string
|
||||
Tags []FieldInfo
|
||||
Fields []FieldInfo
|
||||
}
|
||||
|
||||
// FieldInfo is field or tag info
|
||||
|
|
|
@ -64,7 +64,7 @@
|
|||
# monitorInterval 30
|
||||
|
||||
# number of seconds allowed for a dnode to be offline, for cluster only
|
||||
# offlineThreshold 8640000
|
||||
# offlineThreshold 864000
|
||||
|
||||
# RPC re-try timer, millisecond
|
||||
# rpcTimer 300
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
name: tdengine
|
||||
base: core18
|
||||
version: '2.0.19.0'
|
||||
version: '2.0.20.0'
|
||||
icon: snap/gui/t-dengine.svg
|
||||
summary: an open-source big data platform designed and optimized for IoT.
|
||||
description: |
|
||||
|
@ -72,7 +72,7 @@ parts:
|
|||
- usr/bin/taosd
|
||||
- usr/bin/taos
|
||||
- usr/bin/taosdemo
|
||||
- usr/lib/libtaos.so.2.0.19.0
|
||||
- usr/lib/libtaos.so.2.0.20.0
|
||||
- usr/lib/libtaos.so.1
|
||||
- usr/lib/libtaos.so
|
||||
|
||||
|
|
|
@ -44,23 +44,15 @@ typedef struct SLocalMerger {
|
|||
int32_t numOfCompleted;
|
||||
int32_t numOfVnode;
|
||||
SLoserTreeInfo * pLoserTree;
|
||||
char * prevRowOfInput;
|
||||
tFilePage * pResultBuf;
|
||||
int32_t nResultBufSize;
|
||||
tFilePage * pTempBuffer;
|
||||
struct SQLFunctionCtx *pCtx;
|
||||
int32_t rowSize; // size of each intermediate result.
|
||||
bool hasPrevRow; // cannot be released
|
||||
bool hasUnprocessedRow;
|
||||
tOrderDescriptor * pDesc;
|
||||
SColumnModel * resColModel;
|
||||
SColumnModel* finalModel;
|
||||
tExtMemBuffer ** pExtMemBuffer; // disk-based buffer
|
||||
SFillInfo* pFillInfo; // interpolation support structure
|
||||
char* pFinalRes; // result data after interpo
|
||||
tFilePage* discardData;
|
||||
bool discard;
|
||||
int32_t offset; // limit offset value
|
||||
bool orderPrjOnSTable; // projection query on stable
|
||||
} SLocalMerger;
|
||||
|
||||
|
@ -94,7 +86,7 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde
|
|||
|
||||
void tscDestroyLocalMerger(SSqlObj *pSql);
|
||||
|
||||
int32_t tscDoLocalMerge(SSqlObj *pSql);
|
||||
//int32_t tscDoLocalMerge(SSqlObj *pSql);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -85,15 +85,13 @@ typedef struct SMergeTsCtx {
|
|||
int8_t compared;
|
||||
}SMergeTsCtx;
|
||||
|
||||
|
||||
typedef struct SVgroupTableInfo {
|
||||
SVgroupInfo vgInfo;
|
||||
SArray* itemList; //SArray<STableIdInfo>
|
||||
SArray *itemList; // SArray<STableIdInfo>
|
||||
} SVgroupTableInfo;
|
||||
|
||||
static FORCE_INLINE SQueryInfo* tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex) {
|
||||
static FORCE_INLINE SQueryInfo* tscGetQueryInfo(SSqlCmd* pCmd, int32_t subClauseIndex) {
|
||||
assert(pCmd != NULL && subClauseIndex >= 0);
|
||||
|
||||
if (pCmd->pQueryInfo == NULL || subClauseIndex >= pCmd->numOfClause) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -101,6 +99,8 @@ static FORCE_INLINE SQueryInfo* tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t sub
|
|||
return pCmd->pQueryInfo[subClauseIndex];
|
||||
}
|
||||
|
||||
SQueryInfo* tscGetActiveQueryInfo(SSqlCmd* pCmd);
|
||||
|
||||
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks);
|
||||
void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta);
|
||||
void tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf);
|
||||
|
@ -129,7 +129,13 @@ bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
|
|||
bool tscIsTWAQuery(SQueryInfo* pQueryInfo);
|
||||
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
|
||||
bool tscGroupbyColumn(SQueryInfo* pQueryInfo);
|
||||
bool tscIsTopbotQuery(SQueryInfo* pQueryInfo);
|
||||
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo);
|
||||
bool hasTagValOutput(SQueryInfo* pQueryInfo);
|
||||
bool timeWindowInterpoRequired(SQueryInfo *pQueryInfo);
|
||||
bool isStabledev(SQueryInfo* pQueryInfo);
|
||||
bool isTsCompQuery(SQueryInfo* pQueryInfo);
|
||||
bool isSimpleAggregate(SQueryInfo* pQueryInfo);
|
||||
bool isBlockDistQuery(SQueryInfo* pQueryInfo);
|
||||
int32_t tscGetTopbotQueryParam(SQueryInfo* pQueryInfo);
|
||||
|
||||
bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex);
|
||||
|
@ -143,7 +149,7 @@ bool tscQueryTags(SQueryInfo* pQueryInfo);
|
|||
bool tscMultiRoundQuery(SQueryInfo* pQueryInfo, int32_t tableIndex);
|
||||
bool tscQueryBlockInfo(SQueryInfo* pQueryInfo);
|
||||
|
||||
SSqlExpr* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
|
||||
SExprInfo* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
|
||||
SColumnIndex* pIndex, SSchema* pColSchema, int16_t colType);
|
||||
|
||||
int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql);
|
||||
|
@ -174,27 +180,29 @@ void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes)
|
|||
|
||||
int32_t tscGetResRowLength(SArray* pExprList);
|
||||
|
||||
SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
|
||||
SExprInfo* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
|
||||
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol);
|
||||
|
||||
SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
|
||||
SExprInfo* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
|
||||
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol);
|
||||
|
||||
SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type,
|
||||
SExprInfo* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type,
|
||||
int16_t size);
|
||||
size_t tscSqlExprNumOfExprs(SQueryInfo* pQueryInfo);
|
||||
void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
|
||||
void tscInsertPrimaryTsSourceColumn(SQueryInfo* pQueryInfo, uint64_t uid);
|
||||
|
||||
SSqlExpr* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index);
|
||||
SExprInfo* tscSqlExprGet(SQueryInfo* pQueryInfo, int32_t index);
|
||||
int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy);
|
||||
void tscSqlExprAssign(SExprInfo* dst, const SExprInfo* src);
|
||||
void tscSqlExprInfoDestroy(SArray* pExprInfo);
|
||||
|
||||
SColumn* tscColumnClone(const SColumn* src);
|
||||
bool tscColumnExists(SArray* pColumnList, SColumnIndex* pColIndex);
|
||||
SColumn* tscColumnListInsert(SArray* pColList, SColumnIndex* colIndex);
|
||||
SArray* tscColumnListClone(const SArray* src, int16_t tableIndex);
|
||||
bool tscColumnExists(SArray* pColumnList, int32_t columnIndex, uint64_t uid);
|
||||
SColumn* tscColumnListInsert(SArray* pColumnList, int32_t columnIndex, uint64_t uid, SSchema* pSchema);
|
||||
void tscColumnListDestroy(SArray* pColList);
|
||||
|
||||
void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo);
|
||||
|
||||
void tscDequoteAndTrimToken(SStrToken* pToken);
|
||||
int32_t tscValidateName(SStrToken* pToken);
|
||||
|
||||
|
@ -216,8 +224,11 @@ bool tscShouldBeFreed(SSqlObj* pSql);
|
|||
STableMetaInfo* tscGetTableMetaInfoFromCmd(SSqlCmd *pCmd, int32_t subClauseIndex, int32_t tableIndex);
|
||||
STableMetaInfo* tscGetMetaInfo(SQueryInfo *pQueryInfo, int32_t tableIndex);
|
||||
|
||||
SQueryInfo *tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex);
|
||||
SQueryInfo *tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex);
|
||||
void tscInitQueryInfo(SQueryInfo* pQueryInfo);
|
||||
void tscClearSubqueryInfo(SSqlCmd* pCmd);
|
||||
int32_t tscAddQueryInfo(SSqlCmd *pCmd);
|
||||
SQueryInfo *tscGetQueryInfo(SSqlCmd* pCmd, int32_t subClauseIndex);
|
||||
SQueryInfo *tscGetQueryInfoS(SSqlCmd *pCmd, int32_t subClauseIndex);
|
||||
|
||||
void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo);
|
||||
|
||||
|
@ -225,11 +236,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM
|
|||
SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables);
|
||||
|
||||
STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo *pQueryInfo);
|
||||
int32_t tscAddSubqueryInfo(SSqlCmd *pCmd);
|
||||
|
||||
void tscInitQueryInfo(SQueryInfo* pQueryInfo);
|
||||
|
||||
void tscClearSubqueryInfo(SSqlCmd* pCmd);
|
||||
void tscFreeVgroupTableInfo(SArray* pVgroupTables);
|
||||
SArray* tscVgroupTableInfoDup(SArray* pVgroupTables);
|
||||
void tscRemoveVgroupTableGroup(SArray* pVgroupTable, int32_t index);
|
||||
|
@ -241,6 +248,8 @@ int tscGetTableMetaEx(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, bool creat
|
|||
|
||||
void tscResetForNextRetrieve(SSqlRes* pRes);
|
||||
void tscDoQuery(SSqlObj* pSql);
|
||||
void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo);
|
||||
void doExecuteQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo);
|
||||
|
||||
SVgroupsInfo* tscVgroupInfoClone(SVgroupsInfo *pInfo);
|
||||
void* tscVgroupInfoClear(SVgroupsInfo *pInfo);
|
||||
|
@ -274,7 +283,7 @@ void doAddGroupColumnForSubquery(SQueryInfo* pQueryInfo, int32_t tagIndex);
|
|||
int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid);
|
||||
int16_t tscGetTagColIndexById(STableMeta* pTableMeta, int16_t colId);
|
||||
|
||||
void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex);
|
||||
void tscPrintSelNodeList(SSqlObj* pSql, int32_t subClauseIndex);
|
||||
|
||||
bool hasMoreVnodesToTry(SSqlObj *pSql);
|
||||
bool hasMoreClauseToTry(SSqlObj* pSql);
|
||||
|
@ -299,7 +308,10 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
|
|||
uint32_t tscGetTableMetaMaxSize();
|
||||
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name);
|
||||
STableMeta* tscTableMetaDup(STableMeta* pTableMeta);
|
||||
int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr);
|
||||
|
||||
void tsCreateSQLFunctionCtx(SQueryInfo* pQueryInfo, SQLFunctionCtx* pCtx, SSchema* pSchema);
|
||||
void* createQueryInfoFromQueryNode(SQueryInfo* pQueryInfo, SExprInfo* pExprs, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pOperator, char* sql, void* addr, int32_t stage);
|
||||
|
||||
void* malloc_throw(size_t size);
|
||||
void* calloc_throw(size_t nmemb, size_t size);
|
||||
|
|
|
@ -35,6 +35,7 @@ extern "C" {
|
|||
#include "qExecutor.h"
|
||||
#include "qSqlparser.h"
|
||||
#include "qTsbuf.h"
|
||||
#include "qUtil.h"
|
||||
#include "tcmdtype.h"
|
||||
|
||||
// forward declaration
|
||||
|
@ -96,12 +97,22 @@ typedef struct STableMetaInfo {
|
|||
SArray *tagColList; // SArray<SColumn*>, involved tag columns
|
||||
} STableMetaInfo;
|
||||
|
||||
|
||||
typedef struct SColumnIndex {
|
||||
int16_t tableIndex;
|
||||
int16_t columnIndex;
|
||||
} SColumnIndex;
|
||||
|
||||
typedef struct SColumn {
|
||||
uint64_t tableUid;
|
||||
int32_t columnIndex;
|
||||
SColumnInfo info;
|
||||
} SColumn;
|
||||
|
||||
typedef struct SInternalField {
|
||||
TAOS_FIELD field;
|
||||
bool visible;
|
||||
SExprInfo *pExpr;
|
||||
} SInternalField;
|
||||
|
||||
typedef struct SFieldInfo {
|
||||
int16_t numOfOutput; // number of column in result
|
||||
|
@ -109,43 +120,6 @@ typedef struct SFieldInfo {
|
|||
SArray *internalField; // SArray<SInternalField>
|
||||
} SFieldInfo;
|
||||
|
||||
typedef struct SColumn {
|
||||
SColumnIndex colIndex;
|
||||
int32_t numOfFilters;
|
||||
SColumnFilterInfo *filterInfo;
|
||||
} SColumn;
|
||||
|
||||
/* the structure for sql function in select clause */
|
||||
typedef struct SSqlExpr {
|
||||
char aliasName[TSDB_COL_NAME_LEN]; // as aliasName
|
||||
SColIndex colInfo;
|
||||
uint64_t uid; // refactor use the pointer
|
||||
int16_t functionId; // function id in aAgg array
|
||||
int16_t resType; // return value type
|
||||
int16_t resBytes; // length of return value
|
||||
int32_t interBytes; // inter result buffer size
|
||||
int16_t numOfParams; // argument value of each function
|
||||
tVariant param[3]; // parameters are not more than 3
|
||||
int32_t offset; // sub result column value of arithmetic expression.
|
||||
int16_t resColId; // result column id
|
||||
SColumn *pFilter; // expr filter
|
||||
} SSqlExpr;
|
||||
|
||||
typedef struct SExprFilter {
|
||||
tSqlExpr *pExpr; //used for having parse
|
||||
SSqlExpr *pSqlExpr;
|
||||
SArray *fp;
|
||||
SColumn *pFilters; //having filter info
|
||||
}SExprFilter;
|
||||
|
||||
typedef struct SInternalField {
|
||||
TAOS_FIELD field;
|
||||
bool visible;
|
||||
SExprInfo *pArithExprInfo;
|
||||
SSqlExpr *pSqlExpr;
|
||||
SExprFilter *pFieldFilters;
|
||||
} SInternalField;
|
||||
|
||||
typedef struct SCond {
|
||||
uint64_t uid;
|
||||
int32_t len; // length of tag query condition data
|
||||
|
@ -160,7 +134,7 @@ typedef struct SJoinNode {
|
|||
} SJoinNode;
|
||||
|
||||
typedef struct SJoinInfo {
|
||||
bool hasJoin;
|
||||
bool hasJoin;
|
||||
SJoinNode* joinTables[TSDB_MAX_JOIN_TABLE_NUM];
|
||||
} SJoinInfo;
|
||||
|
||||
|
@ -229,13 +203,14 @@ typedef struct SQueryInfo {
|
|||
SInterval interval; // tumble time window
|
||||
SSessionWindow sessionWindow; // session time window
|
||||
|
||||
SSqlGroupbyExpr groupbyExpr; // group by tags info
|
||||
SSqlGroupbyExpr groupbyExpr; // groupby tags info
|
||||
SArray * colList; // SArray<SColumn*>
|
||||
SFieldInfo fieldsInfo;
|
||||
SArray * exprList; // SArray<SSqlExpr*>
|
||||
SArray * exprList; // SArray<SExprInfo*>
|
||||
SLimitVal limit;
|
||||
SLimitVal slimit;
|
||||
STagCond tagCond;
|
||||
|
||||
SOrderVal order;
|
||||
int16_t fillType; // final result fill type
|
||||
int16_t numOfTables;
|
||||
|
@ -254,7 +229,15 @@ typedef struct SQueryInfo {
|
|||
int32_t round; // 0/1/....
|
||||
int32_t bufLen;
|
||||
char* buf;
|
||||
int32_t havingFieldNum;
|
||||
SQInfo* pQInfo; // global merge operator
|
||||
SArray* pDSOperator; // data source operator
|
||||
SArray* pPhyOperator; // physical query execution plan
|
||||
SQueryAttr* pQueryAttr; // query object
|
||||
|
||||
struct SQueryInfo *sibling; // sibling
|
||||
SArray *pUpstream; // SArray<struct SQueryInfo>
|
||||
struct SQueryInfo *pDownstream;
|
||||
int32_t havingFieldNum;
|
||||
} SQueryInfo;
|
||||
|
||||
typedef struct {
|
||||
|
@ -269,8 +252,6 @@ typedef struct {
|
|||
};
|
||||
|
||||
uint32_t insertType; // TODO remove it
|
||||
int32_t clauseIndex; // index of multiple subclause query
|
||||
|
||||
char * curSql; // current sql, resume position of sql after parsing paused
|
||||
int8_t parseFinished;
|
||||
char reserve2[3]; // fix bus error on arm32
|
||||
|
@ -280,22 +261,26 @@ typedef struct {
|
|||
uint32_t allocSize;
|
||||
char * payload;
|
||||
int32_t payloadLen;
|
||||
|
||||
SQueryInfo **pQueryInfo;
|
||||
int32_t numOfClause;
|
||||
int32_t clauseIndex; // index of multiple subclause query
|
||||
SQueryInfo *active; // current active query info
|
||||
|
||||
int32_t batchSize; // for parameter ('?') binding and batch processing
|
||||
int32_t numOfParams;
|
||||
|
||||
int8_t dataSourceType; // load data from file or not
|
||||
char reserve4[3]; // fix bus error on arm32
|
||||
char reserve4[3]; // fix bus error on arm32
|
||||
int8_t submitSchema; // submit block is built with table schema
|
||||
char reserve5[3]; // fix bus error on arm32
|
||||
char reserve5[3]; // fix bus error on arm32
|
||||
STagData tagData; // NOTE: pTagData->data is used as a variant length array
|
||||
|
||||
SName **pTableNameList; // all involved tableMeta list of current insert sql statement.
|
||||
int32_t numOfTables;
|
||||
|
||||
SHashObj *pTableBlockHashList; // data block for each table
|
||||
SArray *pDataBlocks; // SArray<STableDataBlocks*>. Merged submit block for each vgroup
|
||||
SArray *pDataBlocks; // SArray<STableDataBlocks*>. Merged submit block for each vgroup
|
||||
} SSqlCmd;
|
||||
|
||||
typedef struct SResRec {
|
||||
|
@ -438,7 +423,7 @@ void tscInitMsgsFp();
|
|||
int tsParseSql(SSqlObj *pSql, bool initial);
|
||||
|
||||
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet);
|
||||
int tscProcessSql(SSqlObj *pSql);
|
||||
int tscBuildAndSendRequest(SSqlObj *pSql, SQueryInfo* pQueryInfo);
|
||||
|
||||
int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex);
|
||||
void tscAsyncResultOnError(SSqlObj *pSql);
|
||||
|
@ -453,6 +438,9 @@ void tscRestoreFuncForSTableQuery(SQueryInfo *pQueryInfo);
|
|||
|
||||
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
|
||||
void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo);
|
||||
void tscSetResRawPtrRv(SSqlRes* pRes, SQueryInfo* pQueryInfo, SSDataBlock* pBlock);
|
||||
|
||||
void handleDownstreamOperator(SSqlRes* pRes, SQueryInfo* pQueryInfo);
|
||||
void destroyTableNameList(SSqlCmd* pCmd);
|
||||
|
||||
void tscResetSqlCmd(SSqlCmd *pCmd, bool removeMeta);
|
||||
|
@ -500,47 +488,6 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
|
|||
|
||||
int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo);
|
||||
|
||||
static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex, int32_t offset) {
|
||||
SInternalField* pInfo = (SInternalField*) TARRAY_GET_ELEM(pFieldInfo->internalField, columnIndex);
|
||||
|
||||
int32_t type = pInfo->field.type;
|
||||
int32_t bytes = pInfo->field.bytes;
|
||||
|
||||
char* pData = pRes->data + (int32_t)(offset * pRes->numOfRows + bytes * pRes->row);
|
||||
UNUSED(pData);
|
||||
|
||||
// user defined constant value output columns
|
||||
if (pInfo->pSqlExpr != NULL && TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
|
||||
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
|
||||
pData = pInfo->pSqlExpr->param[1].pz;
|
||||
pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen;
|
||||
pRes->tsrow[columnIndex] = (pInfo->pSqlExpr->param[1].nType == TSDB_DATA_TYPE_NULL) ? NULL : (unsigned char*)pData;
|
||||
} else {
|
||||
assert(bytes == tDataTypes[type].bytes);
|
||||
|
||||
pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)&pInfo->pSqlExpr->param[1].i64;
|
||||
pRes->length[columnIndex] = bytes;
|
||||
}
|
||||
} else {
|
||||
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
|
||||
int32_t realLen = varDataLen(pData);
|
||||
assert(realLen <= bytes - VARSTR_HEADER_SIZE);
|
||||
|
||||
pRes->tsrow[columnIndex] = (isNull(pData, type)) ? NULL : (unsigned char*)((tstr *)pData)->data;
|
||||
if (realLen < pInfo->pSqlExpr->resBytes - VARSTR_HEADER_SIZE) { // todo refactor
|
||||
*(pData + realLen + VARSTR_HEADER_SIZE) = 0;
|
||||
}
|
||||
|
||||
pRes->length[columnIndex] = realLen;
|
||||
} else {
|
||||
assert(bytes == tDataTypes[type].bytes);
|
||||
|
||||
pRes->tsrow[columnIndex] = isNull(pData, type) ? NULL : (unsigned char*)pData;
|
||||
pRes->length[columnIndex] = bytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extern int32_t sentinel;
|
||||
extern SHashObj *tscVgroupMap;
|
||||
extern SHashObj *tscTableMetaInfo;
|
||||
|
|
|
@ -49,6 +49,14 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions
|
|||
JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset
|
||||
(JNIEnv *, jclass);
|
||||
|
||||
/*
|
||||
* Class: com_taosdata_jdbc_TSDBJNIConnector
|
||||
* Method: getResultTimePrecision
|
||||
* Signature: (J)J
|
||||
*/
|
||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrecision
|
||||
(JNIEnv *, jobject, jlong, jlong);
|
||||
|
||||
/*
|
||||
* Class: com_taosdata_jdbc_TSDBJNIConnector
|
||||
* Method: connectImp
|
||||
|
|
|
@ -671,3 +671,20 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTab
|
|||
JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset(JNIEnv *env, jobject jobj) {
|
||||
return (*env)->NewStringUTF(env, (const char *)tsCharset);
|
||||
}
|
||||
|
||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TDDBJNIConnector_getResultTimePrecision(JNIEnv *env, jobject jobj, jlong con,
|
||||
jlong res) {
|
||||
TAOS *tscon = (TAOS *)con;
|
||||
if (tscon == NULL) {
|
||||
jniError("jobj:%p, connection is closed", jobj);
|
||||
return JNI_CONNECTION_NULL;
|
||||
}
|
||||
|
||||
TAOS_RES *result = (TAOS_RES *)res;
|
||||
if (result == NULL) {
|
||||
jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon);
|
||||
return JNI_RESULT_SET_NULL;
|
||||
}
|
||||
|
||||
return taos_result_precision(result);
|
||||
}
|
|
@ -49,7 +49,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
|
|||
|
||||
pSql->sqlstr = calloc(1, sqlLen + 1);
|
||||
if (pSql->sqlstr == NULL) {
|
||||
tscError("%p failed to malloc sql string buffer", pSql);
|
||||
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
|
||||
pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
tscAsyncResultOnError(pSql);
|
||||
return;
|
||||
|
@ -57,7 +57,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
|
|||
|
||||
strntolower(pSql->sqlstr, sqlstr, (int32_t)sqlLen);
|
||||
|
||||
tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
|
||||
tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
|
||||
pCmd->curSql = pSql->sqlstr;
|
||||
|
||||
int32_t code = tsParseSql(pSql, true);
|
||||
|
@ -69,7 +69,8 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
|
|||
return;
|
||||
}
|
||||
|
||||
tscDoQuery(pSql);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex);
|
||||
executeQuery(pSql, pQueryInfo);
|
||||
}
|
||||
|
||||
// TODO return the correct error code to client in tscQueueAsyncError
|
||||
|
@ -80,7 +81,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa
|
|||
TAOS_RES * taos_query_ra(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *param) {
|
||||
STscObj *pObj = (STscObj *)taos;
|
||||
if (pObj == NULL || pObj->signature != pObj) {
|
||||
tscError("bug!!! pObj:%p", pObj);
|
||||
tscError("pObj:%p is NULL or freed", pObj);
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_DISCONNECTED);
|
||||
return NULL;
|
||||
|
@ -179,7 +180,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
|
|||
if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
|
||||
tscFetchDatablockForSubquery(pSql);
|
||||
} else {
|
||||
tscProcessSql(pSql);
|
||||
tscBuildAndSendRequest(pSql, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -193,8 +194,8 @@ static void tscAsyncQueryRowsForNextVnode(void *param, TAOS_RES *tres, int numOf
|
|||
tscProcessAsyncRetrieveImpl(param, tres, numOfRows, tscAsyncFetchRowsProxy);
|
||||
}
|
||||
|
||||
void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
|
||||
SSqlObj *pSql = (SSqlObj *)taosa;
|
||||
void taos_fetch_rows_a(TAOS_RES *tres, __async_cb_func_t fp, void *param) {
|
||||
SSqlObj *pSql = (SSqlObj *)tres;
|
||||
if (pSql == NULL || pSql->signature != pSql) {
|
||||
tscError("sql object is NULL");
|
||||
tscQueueAsyncError(fp, param, TSDB_CODE_TSC_DISCONNECTED);
|
||||
|
@ -206,18 +207,16 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
|
|||
|
||||
// user-defined callback function is stored in fetchFp
|
||||
pSql->fetchFp = fp;
|
||||
pSql->fp = tscAsyncFetchRowsProxy;
|
||||
pSql->fp = tscAsyncFetchRowsProxy;
|
||||
pSql->param = param;
|
||||
|
||||
if (pRes->qId == 0) {
|
||||
tscError("qhandle is NULL");
|
||||
tscError("qhandle is invalid");
|
||||
pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE;
|
||||
pSql->param = param;
|
||||
|
||||
tscAsyncResultOnError(pSql);
|
||||
return;
|
||||
}
|
||||
|
||||
pSql->param = param;
|
||||
tscResetForNextRetrieve(pRes);
|
||||
|
||||
// handle the sub queries of join query
|
||||
|
@ -255,8 +254,9 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
|
|||
if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE && pCmd->command < TSDB_SQL_LOCAL) {
|
||||
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
|
||||
}
|
||||
|
||||
tscProcessSql(pSql);
|
||||
|
||||
SQueryInfo* pQueryInfo1 = tscGetActiveQueryInfo(&pSql->cmd);
|
||||
tscBuildAndSendRequest(pSql, pQueryInfo1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -283,12 +283,12 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) {
|
|||
static void tscAsyncResultCallback(SSchedMsg *pMsg) {
|
||||
SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)pMsg->ahandle);
|
||||
if (pSql == NULL || pSql->signature != pSql) {
|
||||
tscDebug("%p SqlObj is freed, not add into queue async res", pSql);
|
||||
tscDebug("%p SqlObj is freed, not add into queue async res", pMsg->ahandle);
|
||||
return;
|
||||
}
|
||||
|
||||
assert(pSql->res.code != TSDB_CODE_SUCCESS);
|
||||
tscError("%p invoke user specified function due to error occurred, code:%s", pSql, tstrerror(pSql->res.code));
|
||||
tscError("0x%"PRIx64" async result callback, code:%s", pSql->self, tstrerror(pSql->res.code));
|
||||
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
if (pSql->fp == NULL || pSql->fetchFp == NULL){
|
||||
|
@ -323,7 +323,7 @@ static int32_t updateMetaBeforeRetryQuery(SSqlObj* pSql, STableMetaInfo* pTableM
|
|||
|
||||
SSchema *pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
|
||||
for (int32_t i = 0; i < numOfExprs; ++i) {
|
||||
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
|
||||
SSqlExpr *pExpr = &(tscSqlExprGet(pQueryInfo, i)->base);
|
||||
pExpr->uid = pTableMetaInfo->pTableMeta->id.uid;
|
||||
|
||||
if (pExpr->colInfo.colIndex >= 0) {
|
||||
|
@ -344,7 +344,7 @@ static int32_t updateMetaBeforeRetryQuery(SSqlObj* pSql, STableMetaInfo* pTableM
|
|||
// validate the table columns information
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pQueryInfo->colList); ++i) {
|
||||
SColumn *pCol = taosArrayGetP(pQueryInfo->colList, i);
|
||||
if (pCol->colIndex.columnIndex >= numOfCols) {
|
||||
if (pCol->columnIndex >= numOfCols) {
|
||||
return pSql->retryReason;
|
||||
}
|
||||
}
|
||||
|
@ -368,17 +368,17 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
SSqlObj *sub = (SSqlObj*) res;
|
||||
const char* msg = (sub->cmd.command == TSDB_SQL_STABLEVGROUP)? "vgroup-list":"table-meta";
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("%p get %s failed, code:%s", pSql, msg, tstrerror(code));
|
||||
tscError("0x%"PRIx64" get %s failed, code:%s", pSql->self, msg, tstrerror(code));
|
||||
goto _error;
|
||||
}
|
||||
|
||||
tscDebug("%p get %s successfully", pSql, msg);
|
||||
tscDebug("0x%"PRIx64" get %s successfully", pSql->self, msg);
|
||||
if (pSql->pStream == NULL) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex);
|
||||
|
||||
// check if it is a sub-query of super table query first, if true, enter another routine
|
||||
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) {
|
||||
tscDebug("%p update local table meta, continue to process sql and send the corresponding query", pSql);
|
||||
tscDebug("0x%"PRIx64" update local table meta, continue to process sql and send the corresponding query", pSql->self);
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
|
||||
|
@ -396,13 +396,13 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
goto _error;
|
||||
}
|
||||
|
||||
// tscProcessSql can add error into async res
|
||||
tscProcessSql(pSql);
|
||||
// tscBuildAndSendRequest can add error into async res
|
||||
tscBuildAndSendRequest(pSql, NULL);
|
||||
taosReleaseRef(tscObjRef, pSql->self);
|
||||
return;
|
||||
} else { // continue to process normal async query
|
||||
if (pCmd->parseFinished) {
|
||||
tscDebug("%p update local table meta, continue to process sql and send corresponding query", pSql);
|
||||
tscDebug("0x%"PRIx64" update local table meta, continue to process sql and send corresponding query", pSql->self);
|
||||
|
||||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
||||
code = tscGetTableMeta(pSql, pTableMetaInfo);
|
||||
|
@ -416,7 +416,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
assert(pCmd->command != TSDB_SQL_INSERT);
|
||||
|
||||
if (pCmd->command == TSDB_SQL_SELECT) {
|
||||
tscDebug("%p redo parse sql string and proceed", pSql);
|
||||
tscDebug("0x%"PRIx64" redo parse sql string and proceed", pSql->self);
|
||||
pCmd->parseFinished = false;
|
||||
tscResetSqlCmd(pCmd, true);
|
||||
|
||||
|
@ -428,15 +428,15 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
goto _error;
|
||||
}
|
||||
|
||||
tscProcessSql(pSql);
|
||||
tscBuildAndSendRequest(pSql, NULL);
|
||||
} else { // in all other cases, simple retry
|
||||
tscProcessSql(pSql);
|
||||
tscBuildAndSendRequest(pSql, NULL);
|
||||
}
|
||||
|
||||
taosReleaseRef(tscObjRef, pSql->self);
|
||||
return;
|
||||
} else {
|
||||
tscDebug("%p continue parse sql after get table meta", pSql);
|
||||
tscDebug("0x%"PRIx64" continue parse sql after get table meta", pSql->self);
|
||||
|
||||
code = tsParseSql(pSql, false);
|
||||
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||
|
@ -447,21 +447,29 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
}
|
||||
|
||||
if (pCmd->insertType == TSDB_QUERY_TYPE_STMT_INSERT) {
|
||||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
||||
code = tscGetTableMeta(pSql, pTableMetaInfo);
|
||||
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||
taosReleaseRef(tscObjRef, pSql->self);
|
||||
return;
|
||||
} else {
|
||||
assert(code == TSDB_CODE_SUCCESS);
|
||||
assert(code == TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
(*pSql->fp)(pSql->param, pSql, code);
|
||||
taosReleaseRef(tscObjRef, pSql->self);
|
||||
return;
|
||||
} else if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT)) {
|
||||
if (pCmd->dataSourceType == DATA_FROM_DATA_FILE) {
|
||||
tscImportDataFromFile(pSql);
|
||||
} else {
|
||||
tscHandleMultivnodeInsert(pSql);
|
||||
}
|
||||
} else {
|
||||
SQueryInfo* pQueryInfo1 = tscGetQueryInfo(pCmd, pCmd->clauseIndex);
|
||||
executeQuery(pSql, pQueryInfo1);
|
||||
}
|
||||
|
||||
// proceed to invoke the tscDoQuery();
|
||||
taosReleaseRef(tscObjRef, pSql->self);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -486,7 +494,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
}
|
||||
}
|
||||
|
||||
tscDebug("%p stream:%p meta is updated, start new query, command:%d", pSql, pSql->pStream, pSql->cmd.command);
|
||||
tscDebug("0x%"PRIx64" stream:%p meta is updated, start new query, command:%d", pSql->self, pSql->pStream, pSql->cmd.command);
|
||||
if (!pSql->cmd.parseFinished) {
|
||||
tsParseSql(pSql, false);
|
||||
}
|
||||
|
@ -498,7 +506,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
return;
|
||||
}
|
||||
|
||||
tscDoQuery(pSql);
|
||||
// tscDoQuery(pSql);
|
||||
|
||||
taosReleaseRef(tscObjRef, pSql->self);
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) {
|
|||
SSqlRes *pRes = &pSql->res;
|
||||
|
||||
// one column for each row
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
STableMeta * pMeta = pTableMetaInfo->pTableMeta;
|
||||
|
@ -154,14 +154,14 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
|
|||
|
||||
pSql->cmd.numOfCols = numOfCols;
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
pQueryInfo->order.order = TSDB_ORDER_ASC;
|
||||
|
||||
TAOS_FIELD f = {.type = TSDB_DATA_TYPE_BINARY, .bytes = (TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE};
|
||||
tstrncpy(f.name, "Field", sizeof(f.name));
|
||||
|
||||
SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
|
||||
pInfo->pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
|
||||
(TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, -1000, (TSDB_COL_NAME_LEN - 1), false);
|
||||
|
||||
rowLen += ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE);
|
||||
|
@ -171,7 +171,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
|
|||
tstrncpy(f.name, "Type", sizeof(f.name));
|
||||
|
||||
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE),
|
||||
pInfo->pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE),
|
||||
-1000, typeColLength, false);
|
||||
|
||||
rowLen += typeColLength + VARSTR_HEADER_SIZE;
|
||||
|
@ -181,7 +181,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
|
|||
tstrncpy(f.name, "Length", sizeof(f.name));
|
||||
|
||||
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_INT, sizeof(int32_t),
|
||||
pInfo->pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_INT, sizeof(int32_t),
|
||||
-1000, sizeof(int32_t), false);
|
||||
|
||||
rowLen += sizeof(int32_t);
|
||||
|
@ -191,7 +191,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
|
|||
tstrncpy(f.name, "Note", sizeof(f.name));
|
||||
|
||||
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE),
|
||||
pInfo->pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE),
|
||||
-1000, noteColLength, false);
|
||||
|
||||
rowLen += noteColLength + VARSTR_HEADER_SIZE;
|
||||
|
@ -199,7 +199,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
|
|||
}
|
||||
|
||||
static int32_t tscProcessDescribeTable(SSqlObj *pSql) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
|
||||
assert(tscGetMetaInfo(pQueryInfo, 0)->pTableMeta != NULL);
|
||||
|
||||
|
@ -389,7 +389,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
|
|||
SColumnIndex index = {0};
|
||||
pSql->cmd.numOfCols = 2;
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
pQueryInfo->order.order = TSDB_ORDER_ASC;
|
||||
|
||||
TAOS_FIELD f;
|
||||
|
@ -404,7 +404,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
|
|||
}
|
||||
|
||||
SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, f.bytes, -1000, f.bytes - VARSTR_HEADER_SIZE, false);
|
||||
pInfo->pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, f.bytes, -1000, f.bytes - VARSTR_HEADER_SIZE, false);
|
||||
|
||||
rowLen += f.bytes;
|
||||
|
||||
|
@ -417,7 +417,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
|
|||
}
|
||||
|
||||
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
|
||||
pInfo->pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
|
||||
(int16_t)(ddlLen + VARSTR_HEADER_SIZE), -1000, ddlLen, false);
|
||||
|
||||
rowLen += ddlLen + VARSTR_HEADER_SIZE;
|
||||
|
@ -427,7 +427,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
|
|||
static int32_t tscSCreateSetValueToResObj(SSqlObj *pSql, int32_t rowLen, const char *tableName, const char *ddl) {
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
int32_t numOfRows = 1;
|
||||
if (strlen(ddl) == 0) {
|
||||
|
||||
|
@ -444,7 +444,7 @@ static int32_t tscSCreateSetValueToResObj(SSqlObj *pSql, int32_t rowLen, const c
|
|||
return 0;
|
||||
}
|
||||
static int32_t tscSCreateBuildResult(SSqlObj *pSql, BuildType type, const char *str, const char *result) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
int32_t rowLen = tscSCreateBuildResultFields(pSql, type, result);
|
||||
|
||||
tscFieldInfoUpdateOffset(pQueryInfo);
|
||||
|
@ -552,7 +552,7 @@ static int32_t tscGetTableTagColumnName(SSqlObj *pSql, char **result) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, char *ddl) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
STableMeta * pMeta = pTableMetaInfo->pTableMeta;
|
||||
|
@ -606,7 +606,7 @@ static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, ch
|
|||
}
|
||||
|
||||
static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName, char *ddl) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
STableMeta * pMeta = pTableMetaInfo->pTableMeta;
|
||||
|
||||
|
@ -633,7 +633,7 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName,
|
|||
}
|
||||
static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName, char *ddl) {
|
||||
char *result = ddl;
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
STableMeta * pMeta = pTableMetaInfo->pTableMeta;
|
||||
|
||||
|
@ -674,7 +674,7 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
|
|||
}
|
||||
|
||||
static int32_t tscProcessShowCreateTable(SSqlObj *pSql) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
assert(pTableMetaInfo->pTableMeta != NULL);
|
||||
|
||||
|
@ -700,7 +700,7 @@ static int32_t tscProcessShowCreateTable(SSqlObj *pSql) {
|
|||
}
|
||||
|
||||
static int32_t tscProcessShowCreateDatabase(SSqlObj *pSql) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
|
||||
|
@ -727,7 +727,7 @@ static int32_t tscProcessShowCreateDatabase(SSqlObj *pSql) {
|
|||
return TSDB_CODE_TSC_ACTION_IN_PROGRESS;
|
||||
}
|
||||
static int32_t tscProcessCurrentUser(SSqlObj *pSql) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
|
||||
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
|
||||
pExpr->resBytes = TSDB_USER_LEN + TSDB_DATA_TYPE_BINARY;
|
||||
|
@ -754,7 +754,7 @@ static int32_t tscProcessCurrentDB(SSqlObj *pSql) {
|
|||
extractDBName(pSql->pTscObj->db, db);
|
||||
pthread_mutex_unlock(&pSql->pTscObj->mutex);
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, pSql->cmd.clauseIndex);
|
||||
|
||||
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
|
||||
pExpr->resType = TSDB_DATA_TYPE_BINARY;
|
||||
|
@ -781,7 +781,7 @@ static int32_t tscProcessCurrentDB(SSqlObj *pSql) {
|
|||
|
||||
static int32_t tscProcessServerVer(SSqlObj *pSql) {
|
||||
const char* v = pSql->pTscObj->sversion;
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, pSql->cmd.clauseIndex);
|
||||
|
||||
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
|
||||
pExpr->resType = TSDB_DATA_TYPE_BINARY;
|
||||
|
@ -804,7 +804,7 @@ static int32_t tscProcessServerVer(SSqlObj *pSql) {
|
|||
}
|
||||
|
||||
static int32_t tscProcessClientVer(SSqlObj *pSql) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
|
||||
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
|
||||
pExpr->resType = TSDB_DATA_TYPE_BINARY;
|
||||
|
@ -856,7 +856,7 @@ static int32_t tscProcessServStatus(SSqlObj *pSql) {
|
|||
return pSql->res.code;
|
||||
}
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
|
||||
|
||||
int32_t val = 1;
|
||||
|
@ -870,7 +870,7 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa
|
|||
|
||||
pCmd->numOfCols = 1;
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex);
|
||||
pQueryInfo->order.order = TSDB_ORDER_ASC;
|
||||
|
||||
tscFieldInfoClear(&pQueryInfo->fieldsInfo);
|
||||
|
@ -882,7 +882,7 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa
|
|||
tscInitResObjForLocalQuery(pSql, 1, (int32_t)valueLength);
|
||||
|
||||
SInternalField* pInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, 0);
|
||||
pInfo->pSqlExpr = taosArrayGetP(pQueryInfo->exprList, 0);
|
||||
pInfo->pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
|
||||
|
||||
memcpy(pRes->data, val, pInfo->field.bytes);
|
||||
}
|
||||
|
@ -926,7 +926,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
|
|||
pRes->code = tscProcessServStatus(pSql);
|
||||
} else {
|
||||
pRes->code = TSDB_CODE_TSC_INVALID_SQL;
|
||||
tscError("%p not support command:%d", pSql, pCmd->command);
|
||||
tscError("0x%"PRIx64" not support command:%d", pSql->self, pCmd->command);
|
||||
}
|
||||
|
||||
// keep the code in local variable in order to avoid invalid read in case of async query
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -748,7 +748,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
const int32_t STABLE_INDEX = 1;
|
||||
|
||||
SSqlCmd * pCmd = &pSql->cmd;
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, 0);
|
||||
|
||||
char *sql = *sqlstr;
|
||||
|
||||
|
@ -829,6 +829,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
index = 0;
|
||||
sToken = tStrGetToken(sql, &index, false);
|
||||
if (sToken.type != TK_TAGS && sToken.type != TK_LP) {
|
||||
tscDestroyBoundColumnInfo(&spd);
|
||||
return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
|
||||
}
|
||||
|
||||
|
@ -841,6 +842,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
char* end = NULL;
|
||||
code = parseBoundColumns(pCmd, &spd, pTagSchema, sql, &end);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscDestroyBoundColumnInfo(&spd);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -858,11 +860,13 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
sql += index;
|
||||
|
||||
if (sToken.type != TK_LP) {
|
||||
tscDestroyBoundColumnInfo(&spd);
|
||||
return tscInvalidSQLErrMsg(pCmd->payload, "( is expected", sToken.z);
|
||||
}
|
||||
|
||||
SKVRowBuilder kvRowBuilder = {0};
|
||||
if (tdInitKVRowBuilder(&kvRowBuilder) < 0) {
|
||||
tscDestroyBoundColumnInfo(&spd);
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -875,6 +879,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
|
||||
if (TK_ILLEGAL == sToken.type) {
|
||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||
tscDestroyBoundColumnInfo(&spd);
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
|
@ -892,6 +897,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC
|
|||
code = tsParseOneColumn(pSchema, &sToken, tagVal, pCmd->payload, &sql, false, tinfo.precision);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||
tscDestroyBoundColumnInfo(&spd);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1065,7 +1071,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
int32_t totalNum = 0;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, 0);
|
||||
assert(pQueryInfo != NULL);
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = (pQueryInfo->numOfTables == 0)? tscAddEmptyMetaInfo(pQueryInfo):tscGetMetaInfo(pQueryInfo, 0);
|
||||
|
@ -1089,7 +1095,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
str = pCmd->curSql;
|
||||
}
|
||||
|
||||
tscDebug("%p create data block list hashList:%p", pSql, pCmd->pTableBlockHashList);
|
||||
tscDebug("0x%"PRIx64" create data block list hashList:%p", pSql->self, pCmd->pTableBlockHashList);
|
||||
|
||||
while (1) {
|
||||
int32_t index = 0;
|
||||
|
@ -1141,7 +1147,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
return code;
|
||||
}
|
||||
|
||||
tscError("%p async insert parse error, code:%s", pSql, tstrerror(code));
|
||||
tscError("0x%"PRIx64" async insert parse error, code:%s", pSql->self, tstrerror(code));
|
||||
pCmd->curSql = NULL;
|
||||
goto _clean;
|
||||
}
|
||||
|
@ -1285,7 +1291,7 @@ int tsInsertInitialCheck(SSqlObj *pSql) {
|
|||
pCmd->count = 0;
|
||||
pCmd->command = TSDB_SQL_INSERT;
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoS(pCmd, pCmd->clauseIndex);
|
||||
|
||||
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_INSERT | pCmd->insertType);
|
||||
|
||||
|
@ -1303,7 +1309,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
|
|||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
||||
if ((!pCmd->parseFinished) && (!initial)) {
|
||||
tscDebug("%p resume to parse sql: %s", pSql, pCmd->curSql);
|
||||
tscDebug("0x%"PRIx64" resume to parse sql: %s", pSql->self, pCmd->curSql);
|
||||
}
|
||||
|
||||
ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE);
|
||||
|
@ -1375,7 +1381,7 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock
|
|||
return code;
|
||||
}
|
||||
|
||||
return tscProcessSql(pSql);
|
||||
return tscBuildAndSendRequest(pSql, NULL);
|
||||
}
|
||||
|
||||
typedef struct SImportFileSupport {
|
||||
|
@ -1409,7 +1415,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
|
|||
assert(pSql->res.numOfRows == 0);
|
||||
int32_t ret = fseek(fp, 0, SEEK_SET);
|
||||
if (ret < 0) {
|
||||
tscError("%p failed to seek SEEK_SET since:%s", pSql, tstrerror(errno));
|
||||
tscError("0x%"PRIx64" failed to seek SEEK_SET since:%s", pSql->self, tstrerror(errno));
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _error;
|
||||
}
|
||||
|
@ -1521,6 +1527,7 @@ void tscImportDataFromFile(SSqlObj *pSql) {
|
|||
}
|
||||
|
||||
assert(pCmd->dataSourceType == DATA_FROM_DATA_FILE && strlen(pCmd->payload) != 0);
|
||||
pCmd->active = pCmd->pQueryInfo[0];
|
||||
|
||||
SImportFileSupport *pSupporter = calloc(1, sizeof(SImportFileSupport));
|
||||
SSqlObj *pNew = createSubqueryObj(pSql, 0, parseFileSendDataBlock, pSupporter, TSDB_SQL_INSERT, NULL);
|
||||
|
@ -1529,7 +1536,7 @@ void tscImportDataFromFile(SSqlObj *pSql) {
|
|||
FILE *fp = fopen(pCmd->payload, "rb");
|
||||
if (fp == NULL) {
|
||||
pSql->res.code = TAOS_SYSTEM_ERROR(errno);
|
||||
tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code));
|
||||
tscError("0x%"PRIx64" failed to open file %s to load data from file, code:%s", pSql->self, pCmd->payload, tstrerror(pSql->res.code));
|
||||
|
||||
tfree(pSupporter);
|
||||
taos_free_result(pNew);
|
||||
|
|
|
@ -815,7 +815,7 @@ static int insertStmtExecute(STscStmt* stmt) {
|
|||
pRes->numOfRows = 0;
|
||||
pRes->numOfTotal = 0;
|
||||
|
||||
tscProcessSql(pSql);
|
||||
tscBuildAndSendRequest(pSql, NULL);
|
||||
|
||||
// wait for the callback function to post the semaphore
|
||||
tsem_wait(&pSql->rspSem);
|
||||
|
|
|
@ -61,7 +61,7 @@ void tscAddIntoSqlList(SSqlObj *pSql) {
|
|||
pSql->stime = taosGetTimestampMs();
|
||||
pSql->listed = 1;
|
||||
|
||||
tscDebug("%p added into sqlList", pSql);
|
||||
tscDebug("0x%"PRIx64" added into sqlList", pSql->self);
|
||||
}
|
||||
|
||||
void tscSaveSlowQueryFpCb(void *param, TAOS_RES *result, int code) {
|
||||
|
@ -99,12 +99,12 @@ void tscSaveSlowQuery(SSqlObj *pSql) {
|
|||
return;
|
||||
}
|
||||
|
||||
tscDebug("%p query time:%" PRId64 " sql:%s", pSql, pSql->res.useconds, pSql->sqlstr);
|
||||
tscDebug("0x%"PRIx64" query time:%" PRId64 " sql:%s", pSql->self, pSql->res.useconds, pSql->sqlstr);
|
||||
int32_t sqlSize = (int32_t)(TSDB_SLOW_QUERY_SQL_LEN + size);
|
||||
|
||||
char *sql = malloc(sqlSize);
|
||||
if (sql == NULL) {
|
||||
tscError("%p failed to allocate memory to sent slow query to dnode", pSql);
|
||||
tscError("0x%"PRIx64" failed to allocate memory to sent slow query to dnode", pSql->self);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -141,7 +141,7 @@ void tscRemoveFromSqlList(SSqlObj *pSql) {
|
|||
pSql->listed = 0;
|
||||
|
||||
tscSaveSlowQuery(pSql);
|
||||
tscDebug("%p removed from sqlList", pSql);
|
||||
tscDebug("0x%"PRIx64" removed from sqlList", pSql->self);
|
||||
}
|
||||
|
||||
void tscKillQuery(STscObj *pObj, uint32_t killId) {
|
||||
|
@ -158,7 +158,7 @@ void tscKillQuery(STscObj *pObj, uint32_t killId) {
|
|||
if (pSql == NULL) {
|
||||
tscError("failed to kill query, id:%d, it may have completed/terminated", killId);
|
||||
} else {
|
||||
tscDebug("%p query is killed, queryId:%d", pSql, killId);
|
||||
tscDebug("0x%"PRIx64" query is killed, queryId:%d", pSql->self, killId);
|
||||
taos_stop_query(pSql);
|
||||
}
|
||||
}
|
||||
|
@ -213,7 +213,7 @@ void tscKillStream(STscObj *pObj, uint32_t killId) {
|
|||
pthread_mutex_unlock(&pObj->mutex);
|
||||
|
||||
if (pStream) {
|
||||
tscDebug("%p stream:%p is killed, streamId:%d", pStream->pSql, pStream, killId);
|
||||
tscDebug("0x%"PRIx64" stream:%p is killed, streamId:%d", pStream->pSql->self, pStream, killId);
|
||||
if (pStream->callback) {
|
||||
pStream->callback(pStream->param);
|
||||
}
|
||||
|
@ -249,8 +249,8 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
|
|||
pQdesc->stime = htobe64(pSql->stime);
|
||||
pQdesc->queryId = htonl(pSql->queryId);
|
||||
//pQdesc->useconds = htobe64(pSql->res.useconds);
|
||||
pQdesc->useconds = htobe64(now - pSql->stime); // use local time instead of sever rsp elapsed time
|
||||
pQdesc->qHandle = htobe64(pSql->res.qId);
|
||||
pQdesc->useconds = htobe64(now - pSql->stime);
|
||||
pQdesc->qId = htobe64(pSql->res.qId);
|
||||
|
||||
pHeartbeat->numOfQueries++;
|
||||
pQdesc++;
|
||||
|
@ -273,7 +273,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
|
|||
pSdesc->num = htobe64(pStream->num);
|
||||
|
||||
pSdesc->useconds = htobe64(pStream->useconds);
|
||||
pSdesc->stime = htobe64(pStream->stime - pStream->interval.interval);
|
||||
pSdesc->stime = (pStream->stime == INT64_MIN) ? htobe64(pStream->stime) : htobe64(pStream->stime - pStream->interval.interval);
|
||||
pSdesc->ctime = htobe64(pStream->ctime);
|
||||
|
||||
pSdesc->slidingTime = htobe64(pStream->interval.sliding);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -191,7 +191,7 @@ TAOS *taos_connect_internal(const char *ip, const char *user, const char *pass,
|
|||
pSql->fp = syncConnCallback;
|
||||
pSql->param = pSql;
|
||||
|
||||
tscProcessSql(pSql);
|
||||
tscBuildAndSendRequest(pSql, NULL);
|
||||
tsem_wait(&pSql->rspSem);
|
||||
|
||||
if (pSql->res.code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -265,7 +265,7 @@ TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port,
|
|||
if (taos) *taos = pObj;
|
||||
|
||||
pSql->fetchFp = fp;
|
||||
pSql->res.code = tscProcessSql(pSql);
|
||||
pSql->res.code = tscBuildAndSendRequest(pSql, NULL);
|
||||
tscDebug("%p DB async connection is opening", taos);
|
||||
return pObj;
|
||||
}
|
||||
|
@ -292,7 +292,7 @@ void taos_close(TAOS *taos) {
|
|||
pHb->rpcRid = -1;
|
||||
}
|
||||
|
||||
tscDebug("%p HB is freed", pHb);
|
||||
tscDebug("0x%"PRIx64" HB is freed", pHb->self);
|
||||
taosReleaseRef(tscObjRef, pHb->self);
|
||||
#ifdef __APPLE__
|
||||
// to satisfy later tsem_destroy in taos_free_result
|
||||
|
@ -373,7 +373,7 @@ int taos_num_fields(TAOS_RES *res) {
|
|||
if (pSql == NULL || pSql->signature != pSql) return 0;
|
||||
|
||||
int32_t num = 0;
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
if (pQueryInfo == NULL) {
|
||||
return num;
|
||||
}
|
||||
|
@ -408,7 +408,7 @@ TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
|
|||
SSqlRes *pRes = &pSql->res;
|
||||
if (pSql == NULL || pSql->signature != pSql) return 0;
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
if (pQueryInfo == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -559,7 +559,7 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) {
|
|||
return true;
|
||||
}
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, 0);
|
||||
|
||||
if ((pQueryInfo == NULL) || tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
|
||||
return true;
|
||||
|
@ -576,9 +576,9 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) {
|
|||
cmd == TSDB_SQL_FETCH)) {
|
||||
pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE;
|
||||
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
|
||||
tscDebug("%p send msg to dnode to free qhandle ASAP before free sqlObj, command:%s", pSql, sqlCmd[pCmd->command]);
|
||||
tscDebug("0x%"PRIx64" send msg to dnode to free qhandle ASAP before free sqlObj, command:%s", pSql->self, sqlCmd[pCmd->command]);
|
||||
|
||||
tscProcessSql(pSql);
|
||||
tscBuildAndSendRequest(pSql, NULL);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -588,13 +588,13 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) {
|
|||
void taos_free_result(TAOS_RES *res) {
|
||||
SSqlObj* pSql = (SSqlObj*) res;
|
||||
if (pSql == NULL || pSql->signature != pSql) {
|
||||
tscError("%p already released sqlObj", res);
|
||||
tscError("0x%"PRIx64" already released sqlObj", pSql ? pSql->self : -1);
|
||||
return;
|
||||
}
|
||||
|
||||
bool freeNow = tscKillQueryInDnode(pSql);
|
||||
if (freeNow) {
|
||||
tscDebug("%p free sqlObj in cache", pSql);
|
||||
tscDebug("0x%"PRIx64" free sqlObj in cache", pSql->self);
|
||||
taosReleaseRef(tscObjRef, pSql->self);
|
||||
}
|
||||
}
|
||||
|
@ -672,7 +672,7 @@ char *taos_get_client_info() { return version; }
|
|||
static void tscKillSTableQuery(SSqlObj *pSql) {
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex);
|
||||
|
||||
if (!tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
|
||||
return;
|
||||
|
@ -708,7 +708,7 @@ static void tscKillSTableQuery(SSqlObj *pSql) {
|
|||
|
||||
tscUnlockByThread(&pSql->squeryLock);
|
||||
|
||||
tscDebug("%p super table query cancelled", pSql);
|
||||
tscDebug("0x%"PRIx64" super table query cancelled", pSql->self);
|
||||
}
|
||||
|
||||
void taos_stop_query(TAOS_RES *res) {
|
||||
|
@ -717,13 +717,13 @@ void taos_stop_query(TAOS_RES *res) {
|
|||
return;
|
||||
}
|
||||
|
||||
tscDebug("%p start to cancel query", res);
|
||||
tscDebug("0x%"PRIx64" start to cancel query", pSql->self);
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
|
||||
// set the error code for master pSqlObj firstly
|
||||
pSql->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex);
|
||||
|
||||
if (tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
|
||||
assert(pSql->rpcRid <= 0);
|
||||
|
@ -744,7 +744,7 @@ void taos_stop_query(TAOS_RES *res) {
|
|||
}
|
||||
}
|
||||
|
||||
tscDebug("%p query is cancelled", res);
|
||||
tscDebug("0x%"PRIx64" query is cancelled", pSql->self);
|
||||
}
|
||||
|
||||
bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) {
|
||||
|
@ -753,7 +753,7 @@ bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) {
|
|||
return true;
|
||||
}
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
if (pQueryInfo == NULL) {
|
||||
return true;
|
||||
}
|
||||
|
@ -877,19 +877,18 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
|
|||
pRes->numOfClauseTotal = 0;
|
||||
|
||||
|
||||
tscDebug("%p Valid SQL: %s pObj:%p", pSql, sql, pObj);
|
||||
tscDebug("0x%"PRIx64" Valid SQL: %s pObj:%p", pSql->self, sql, pObj);
|
||||
|
||||
int32_t sqlLen = (int32_t)strlen(sql);
|
||||
if (sqlLen > tsMaxSQLStringLen) {
|
||||
tscError("%p sql too long", pSql);
|
||||
tscError("0x%"PRIx64" sql too long", pSql->self);
|
||||
tfree(pSql);
|
||||
return TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
|
||||
}
|
||||
|
||||
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
|
||||
if (pSql->sqlstr == NULL) {
|
||||
tscError("%p failed to malloc sql string buffer", pSql);
|
||||
tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(pSql), pObj);
|
||||
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
|
||||
tfree(pSql);
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
@ -914,7 +913,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
|
|||
}
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, code, taos_errstr(pSql), pObj);
|
||||
tscError("0x%"PRIx64" invalid SQL result:%d, %s pObj:%p", pSql->self, code, taos_errstr(pSql), pObj);
|
||||
}
|
||||
|
||||
taos_free_result(pSql);
|
||||
|
@ -933,7 +932,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
|
|||
int code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
|
||||
char *str = (char *)tblNameList;
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoS(pCmd, pCmd->clauseIndex);
|
||||
if (pQueryInfo == NULL) {
|
||||
pSql->res.code = terrno;
|
||||
return terrno;
|
||||
|
@ -1027,18 +1026,18 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
|
|||
pRes->numOfClauseTotal = 0;
|
||||
|
||||
assert(pSql->fp == NULL);
|
||||
tscDebug("%p tableNameList: %s pObj:%p", pSql, tableNameList, pObj);
|
||||
tscDebug("0x%"PRIx64" tableNameList: %s pObj:%p", pSql->self, tableNameList, pObj);
|
||||
|
||||
int32_t tblListLen = (int32_t)strlen(tableNameList);
|
||||
if (tblListLen > MAX_TABLE_NAME_LENGTH) {
|
||||
tscError("%p tableNameList too long, length:%d, maximum allowed:%d", pSql, tblListLen, MAX_TABLE_NAME_LENGTH);
|
||||
tscError("0x%"PRIx64" tableNameList too long, length:%d, maximum allowed:%d", pSql->self, tblListLen, MAX_TABLE_NAME_LENGTH);
|
||||
tscFreeSqlObj(pSql);
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
char *str = calloc(1, tblListLen + 1);
|
||||
if (str == NULL) {
|
||||
tscError("%p failed to malloc sql string buffer", pSql);
|
||||
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
|
||||
tscFreeSqlObj(pSql);
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
@ -1048,7 +1047,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
|
|||
|
||||
/*
|
||||
* set the qhandle to 0 before return in order to erase the qhandle value assigned in the previous successful query.
|
||||
* If qhandle is NOT set 0, the function of taos_free_result() will send message to server by calling tscProcessSql()
|
||||
* If qhandle is NOT set 0, the function of taos_free_result() will send message to server by calling tscBuildAndSendRequest()
|
||||
* to free connection, which may cause segment fault, when the parse phrase is not even successfully executed.
|
||||
*/
|
||||
pRes->qId = 0;
|
||||
|
@ -1061,7 +1060,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
|
|||
|
||||
tscDoQuery(pSql);
|
||||
|
||||
tscDebug("%p load multi table meta result:%d %s pObj:%p", pSql, pRes->code, taos_errstr(pSql), pObj);
|
||||
tscDebug("0x%"PRIx64" load multi-table meta result:%d %s pObj:%p", pSql->self, pRes->code, taos_errstr(pSql), pObj);
|
||||
if ((code = pRes->code) != TSDB_CODE_SUCCESS) {
|
||||
tscFreeSqlObj(pSql);
|
||||
}
|
||||
|
|
|
@ -37,8 +37,8 @@ static int64_t getDelayValueAfterTimewindowClosed(SSqlStream* pStream, int64_t l
|
|||
|
||||
static bool isProjectStream(SQueryInfo* pQueryInfo) {
|
||||
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
|
||||
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
|
||||
if (pExpr->functionId != TSDB_FUNC_PRJ) {
|
||||
SExprInfo *pExpr = tscSqlExprGet(pQueryInfo, i);
|
||||
if (pExpr->base.functionId != TSDB_FUNC_PRJ) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ static void setRetryInfo(SSqlStream* pStream, int32_t code) {
|
|||
|
||||
pSql->res.code = code;
|
||||
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
|
||||
tscDebug("%p stream:%p, get table Meta failed, retry in %" PRId64 "ms", pSql, pStream, retryDelayTime);
|
||||
tscDebug("0x%"PRIx64" stream:%p, get table Meta failed, retry in %" PRId64 "ms", pSql->self, pStream, retryDelayTime);
|
||||
tscSetRetryTimer(pStream, pSql, retryDelayTime);
|
||||
}
|
||||
|
||||
|
@ -89,7 +89,7 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
|
|||
return;
|
||||
}
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
|
||||
code = tscGetTableMeta(pSql, pTableMetaInfo);
|
||||
|
@ -101,14 +101,22 @@ static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) {
|
|||
return;
|
||||
}
|
||||
|
||||
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) && (pTableMetaInfo->pVgroupTables == NULL) && (pTableMetaInfo->vgroupList == NULL || pTableMetaInfo->vgroupList->numOfVgroups <= 0)) {
|
||||
tscDebug("0x%"PRIx64" empty vgroup list", pSql->self);
|
||||
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
|
||||
code = TSDB_CODE_TSC_APP_ERROR;
|
||||
}
|
||||
|
||||
// failed to get table Meta or vgroup list, retry in 10sec.
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
tscTansformFuncForSTableQuery(pQueryInfo);
|
||||
tscDebug("%p stream:%p, start stream query on:%s", pSql, pStream, tNameGetTableName(&pTableMetaInfo->name));
|
||||
tscDebug("0x%"PRIx64" stream:%p started to query table:%s", pSql->self, pStream, tNameGetTableName(&pTableMetaInfo->name));
|
||||
|
||||
pQueryInfo->command = TSDB_SQL_SELECT;
|
||||
|
||||
pSql->fp = tscProcessStreamQueryCallback;
|
||||
pSql->fetchFp = tscProcessStreamQueryCallback;
|
||||
tscDoQuery(pSql);
|
||||
executeQuery(pSql, pQueryInfo);
|
||||
tscIncStreamExecutionCount(pStream);
|
||||
} else {
|
||||
setRetryInfo(pStream, code);
|
||||
|
@ -130,8 +138,8 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
|
|||
|
||||
pStream->numOfRes = 0; // reset the numOfRes.
|
||||
SSqlObj *pSql = pStream->pSql;
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
tscDebug("%p add into timer", pSql);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
tscDebug("0x%"PRIx64" timer launch query", pSql->self);
|
||||
|
||||
if (pStream->isProject) {
|
||||
/*
|
||||
|
@ -186,8 +194,8 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
|
|||
SSqlStream *pStream = (SSqlStream *)param;
|
||||
if (tres == NULL || numOfRows < 0) {
|
||||
int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
|
||||
tscError("%p stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
|
||||
retryDelay);
|
||||
tscError("0x%"PRIx64" stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql->self,
|
||||
pStream, numOfRows, retryDelay);
|
||||
|
||||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0);
|
||||
|
||||
|
@ -195,6 +203,14 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
|
|||
tNameExtractFullName(&pTableMetaInfo->name, name);
|
||||
|
||||
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
|
||||
|
||||
tfree(pTableMetaInfo->pTableMeta);
|
||||
|
||||
tscFreeSqlResult(pStream->pSql);
|
||||
tscFreeSubobj(pStream->pSql);
|
||||
tfree(pStream->pSql->pSubs);
|
||||
pStream->pSql->subState.numOfSub = 0;
|
||||
|
||||
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
|
||||
|
||||
tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
|
||||
|
@ -208,7 +224,7 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
|
|||
static void tscStreamFillTimeGap(SSqlStream* pStream, TSKEY ts) {
|
||||
#if 0
|
||||
SSqlObj * pSql = pStream->pSql;
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
|
||||
if (pQueryInfo->fillType != TSDB_FILL_SET_VALUE && pQueryInfo->fillType != TSDB_FILL_NULL) {
|
||||
return;
|
||||
|
@ -237,7 +253,7 @@ static void tscStreamFillTimeGap(SSqlStream* pStream, TSKEY ts) {
|
|||
}
|
||||
|
||||
if (rowNum > 0) {
|
||||
tscDebug("%p stream:%p %d rows padded", pSql, pStream, rowNum);
|
||||
tscDebug("0x%"PRIx64" stream:%p %d rows padded", pSql, pStream, rowNum);
|
||||
}
|
||||
|
||||
pRes->numOfRows = 0;
|
||||
|
@ -251,19 +267,20 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
|
|||
|
||||
if (pSql == NULL || numOfRows < 0) {
|
||||
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
|
||||
tscError("%p stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
|
||||
tscError("0x%"PRIx64" stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 " ms", pSql->self, pStream, numOfRows, retryDelayTime);
|
||||
|
||||
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
|
||||
return;
|
||||
}
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
STableMetaInfo *pTableMetaInfo = pQueryInfo->pTableMetaInfo[0];
|
||||
|
||||
if (numOfRows > 0) { // when reaching here the first execution of stream computing is successful.
|
||||
for(int32_t i = 0; i < numOfRows; ++i) {
|
||||
TAOS_ROW row = taos_fetch_row(res);
|
||||
if (row != NULL) {
|
||||
tscDebug("%p stream:%p fetch result", pSql, pStream);
|
||||
tscDebug("0x%"PRIx64" stream:%p fetch result", pSql->self, pStream);
|
||||
tscStreamFillTimeGap(pStream, *(TSKEY*)row[0]);
|
||||
pStream->stime = *(TSKEY *)row[0];
|
||||
// user callback function
|
||||
|
@ -284,7 +301,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
|
|||
/* no resuls in the query range, retry */
|
||||
// todo set retry dynamic time
|
||||
int32_t retry = tsProjectExecInterval;
|
||||
tscError("%p stream:%p, retrieve no data, code:0x%08x, retry in %" PRId32 "ms", pSql, pStream, numOfRows, retry);
|
||||
tscError("0x%"PRIx64" stream:%p, retrieve no data, code:0x%08x, retry in %" PRId32 "ms", pSql->self, pStream, numOfRows, retry);
|
||||
|
||||
tscSetRetryTimer(pStream, pStream->pSql, retry);
|
||||
return;
|
||||
|
@ -293,10 +310,14 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
|
|||
pStream->stime += 1;
|
||||
}
|
||||
|
||||
tscDebug("%p stream:%p, query on:%s, fetch result completed, fetched rows:%" PRId64, pSql, pStream, tNameGetTableName(&pTableMetaInfo->name),
|
||||
tscDebug("0x%"PRIx64" stream:%p, query on:%s, fetch result completed, fetched rows:%" PRId64, pSql->self, pStream, tNameGetTableName(&pTableMetaInfo->name),
|
||||
pStream->numOfRes);
|
||||
|
||||
tfree(pTableMetaInfo->pTableMeta);
|
||||
if (pQueryInfo->pQInfo != NULL) {
|
||||
qDestroyQueryInfo(pQueryInfo->pQInfo);
|
||||
pQueryInfo->pQInfo = NULL;
|
||||
}
|
||||
|
||||
tscFreeSqlResult(pSql);
|
||||
tscFreeSubobj(pSql);
|
||||
|
@ -318,8 +339,8 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
|
|||
/*
|
||||
* current time window will be closed, since it too early to exceed the maxRetentWindow value
|
||||
*/
|
||||
tscDebug("%p stream:%p, etime:%" PRId64 " is too old, exceeds the max retention time window:%" PRId64 ", stop the stream",
|
||||
pStream->pSql, pStream, pStream->stime, pStream->etime);
|
||||
tscDebug("0x%"PRIx64" stream:%p, etime:%" PRId64 " is too old, exceeds the max retention time window:%" PRId64 ", stop the stream",
|
||||
pStream->pSql->self, pStream, pStream->stime, pStream->etime);
|
||||
// TODO : How to terminate stream here
|
||||
if (pStream->callback) {
|
||||
// Callback function from upper level
|
||||
|
@ -329,10 +350,10 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
|
|||
return;
|
||||
}
|
||||
|
||||
tscDebug("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql, pStream,
|
||||
tscDebug("0x%"PRIx64" stream:%p, next start at %" PRId64 "(ts window ekey), in %" PRId64 " ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql->self, pStream,
|
||||
now + timer, timer, delay, pStream->stime, etime);
|
||||
} else {
|
||||
tscDebug("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql, pStream,
|
||||
tscDebug("0x%"PRIx64" stream:%p, next start at %" PRId64 "(ts window ekey), in %" PRId64 " ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql->self, pStream,
|
||||
pStream->stime, timer, delay, pStream->stime - pStream->interval.interval, pStream->stime - 1);
|
||||
}
|
||||
|
||||
|
@ -378,8 +399,8 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
|
|||
*/
|
||||
timer = pStream->interval.sliding;
|
||||
if (pStream->stime > pStream->etime) {
|
||||
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
|
||||
pStream->stime, pStream->etime);
|
||||
tscDebug("0x%"PRIx64" stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream",
|
||||
pStream->pSql->self, pStream, pStream->stime, pStream->etime);
|
||||
// TODO : How to terminate stream here
|
||||
if (pStream->callback) {
|
||||
// Callback function from upper level
|
||||
|
@ -390,9 +411,8 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
|
|||
}
|
||||
} else {
|
||||
int64_t stime = taosTimeTruncate(pStream->stime - 1, &pStream->interval, pStream->precision);
|
||||
//int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
|
||||
if (stime >= pStream->etime) {
|
||||
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
|
||||
tscDebug("0x%"PRIx64" stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql->self, pStream,
|
||||
pStream->stime, pStream->etime);
|
||||
// TODO : How to terminate stream here
|
||||
if (pStream->callback) {
|
||||
|
@ -402,10 +422,12 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
|
|||
taos_close_stream(pStream);
|
||||
return;
|
||||
}
|
||||
|
||||
timer = pStream->stime - taosGetTimestamp(pStream->precision);
|
||||
if (timer < 0) {
|
||||
timer = 0;
|
||||
|
||||
if (pStream->stime > 0) {
|
||||
timer = pStream->stime - taosGetTimestamp(pStream->precision);
|
||||
if (timer < 0) {
|
||||
timer = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -422,7 +444,7 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
|
|||
int64_t minIntervalTime =
|
||||
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinIntervalTime * 1000L : tsMinIntervalTime;
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
|
||||
if (!pStream->isProject && pQueryInfo->interval.interval == 0) {
|
||||
sprintf(pSql->cmd.payload, "the interval value is 0");
|
||||
|
@ -430,7 +452,7 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
|
|||
}
|
||||
|
||||
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.interval < minIntervalTime) {
|
||||
tscWarn("%p stream:%p, original sample interval:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
|
||||
tscWarn("0x%"PRIx64" stream:%p, original sample interval:%" PRId64 " too small, reset to:%" PRId64, pSql->self, pStream,
|
||||
(int64_t)pQueryInfo->interval.interval, minIntervalTime);
|
||||
pQueryInfo->interval.interval = minIntervalTime;
|
||||
}
|
||||
|
@ -447,14 +469,14 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
|
|||
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
|
||||
|
||||
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.sliding < minSlidingTime) {
|
||||
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
|
||||
tscWarn("0x%"PRIx64" stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql->self, pStream,
|
||||
pQueryInfo->interval.sliding, minSlidingTime);
|
||||
|
||||
pQueryInfo->interval.sliding = minSlidingTime;
|
||||
}
|
||||
|
||||
if (pQueryInfo->interval.sliding > pQueryInfo->interval.interval) {
|
||||
tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64, pSql, pStream,
|
||||
tscWarn("0x%"PRIx64" stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64, pSql->self, pStream,
|
||||
pQueryInfo->interval.sliding, pQueryInfo->interval.interval);
|
||||
|
||||
pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
|
||||
|
@ -472,29 +494,32 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
|
|||
}
|
||||
|
||||
static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, int64_t stime) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, 0);
|
||||
|
||||
if (pStream->isProject) {
|
||||
// no data in table, flush all data till now to destination meter, 10sec delay
|
||||
pStream->interval.interval = tsProjectExecInterval;
|
||||
pStream->interval.sliding = tsProjectExecInterval;
|
||||
|
||||
if (stime != 0) { // first projection start from the latest event timestamp
|
||||
if (stime != INT64_MIN) { // first projection start from the latest event timestamp
|
||||
assert(stime >= pQueryInfo->window.skey);
|
||||
stime += 1; // exclude the last records from table
|
||||
} else {
|
||||
stime = pQueryInfo->window.skey;
|
||||
}
|
||||
} else { // timewindow based aggregation stream
|
||||
if (stime == 0) { // no data in meter till now
|
||||
if (stime == INT64_MIN) { // no data in meter till now
|
||||
if (pQueryInfo->window.skey != INT64_MIN) {
|
||||
stime = pQueryInfo->window.skey;
|
||||
} else {
|
||||
return stime;
|
||||
}
|
||||
|
||||
stime = taosTimeTruncate(stime, &pStream->interval, pStream->precision);
|
||||
} else {
|
||||
int64_t newStime = taosTimeTruncate(stime, &pStream->interval, pStream->precision);
|
||||
if (newStime != stime) {
|
||||
tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql, pStream, stime, newStime);
|
||||
tscWarn("0x%"PRIx64" stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql->self, pStream, stime, newStime);
|
||||
stime = newStime;
|
||||
}
|
||||
}
|
||||
|
@ -525,13 +550,13 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
|
|||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pSql->res.code = code;
|
||||
tscError("%p open stream failed, sql:%s, reason:%s, code:%s", pSql, pSql->sqlstr, pCmd->payload, tstrerror(code));
|
||||
tscError("0x%"PRIx64" open stream failed, sql:%s, reason:%s, code:%s", pSql->self, pSql->sqlstr, pCmd->payload, tstrerror(code));
|
||||
|
||||
pStream->fp(pStream->param, NULL, NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, 0);
|
||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
|
||||
|
||||
|
@ -544,7 +569,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
|
|||
if (tscSetSlidingWindowInfo(pSql, pStream) != TSDB_CODE_SUCCESS) {
|
||||
pSql->res.code = code;
|
||||
|
||||
tscError("%p stream %p open failed, since the interval value is incorrect", pSql, pStream);
|
||||
tscError("0x%"PRIx64" stream %p open failed, since the interval value is incorrect", pSql->self, pStream);
|
||||
pStream->fp(pStream->param, NULL, NULL);
|
||||
return;
|
||||
}
|
||||
|
@ -558,7 +583,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
|
|||
|
||||
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
|
||||
|
||||
tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
|
||||
tscDebug("0x%"PRIx64" stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql->self,
|
||||
pStream, tNameGetTableName(&pTableMetaInfo->name), pStream->interval.interval, pStream->interval.sliding, starttime, pSql->sqlstr);
|
||||
}
|
||||
|
||||
|
@ -584,7 +609,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
|
|||
|
||||
SSqlStream *pStream = (SSqlStream *)calloc(1, sizeof(SSqlStream));
|
||||
if (pStream == NULL) {
|
||||
tscError("%p open stream failed, sql:%s, reason:%s, code:0x%08x", pSql, sqlstr, pCmd->payload, pRes->code);
|
||||
tscError("0x%"PRIx64" open stream failed, sql:%s, reason:%s, code:0x%08x", pSql->self, sqlstr, pCmd->payload, pRes->code);
|
||||
tscFreeSqlObj(pSql);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -600,26 +625,26 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
|
|||
|
||||
pSql->sqlstr = calloc(1, strlen(sqlstr) + 1);
|
||||
if (pSql->sqlstr == NULL) {
|
||||
tscError("%p failed to malloc sql string buffer", pSql);
|
||||
tscError("0x%"PRIx64" failed to malloc sql string buffer", pSql->self);
|
||||
tscFreeSqlObj(pSql);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
strtolower(pSql->sqlstr, sqlstr);
|
||||
|
||||
tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
|
||||
registerSqlObj(pSql);
|
||||
|
||||
tscDebugL("0x%"PRIx64" SQL: %s", pSql->self, pSql->sqlstr);
|
||||
tsem_init(&pSql->rspSem, 0, 0);
|
||||
|
||||
pSql->fp = tscCreateStream;
|
||||
pSql->fetchFp = tscCreateStream;
|
||||
|
||||
registerSqlObj(pSql);
|
||||
|
||||
int32_t code = tsParseSql(pSql, true);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
tscCreateStream(pStream, pSql, code);
|
||||
} else if (code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||
tscError("%p open stream failed, sql:%s, code:%s", pSql, sqlstr, tstrerror(pRes->code));
|
||||
tscError("0x%"PRIx64" open stream failed, sql:%s, code:%s", pSql->self, sqlstr, tstrerror(code));
|
||||
taosReleaseRef(tscObjRef, pSql->self);
|
||||
free(pStream);
|
||||
return NULL;
|
||||
|
@ -645,7 +670,7 @@ void taos_close_stream(TAOS_STREAM *handle) {
|
|||
|
||||
taosTmrStopA(&(pStream->pTimer));
|
||||
|
||||
tscDebug("%p stream:%p is closed", pSql, pStream);
|
||||
tscDebug("0x%"PRIx64" stream:%p is closed", pSql->self, pStream);
|
||||
// notify CQ to release the pStream object
|
||||
pStream->fp(pStream->param, NULL, NULL);
|
||||
pStream->pSql = NULL;
|
||||
|
|
|
@ -224,11 +224,11 @@ static SArray* getTableList( SSqlObj* pSql ) {
|
|||
|
||||
SSqlObj* pNew = taos_query(pSql->pTscObj, sql);
|
||||
if (pNew == NULL) {
|
||||
tscError("failed to retrieve table id: cannot create new sql object.");
|
||||
tscError("0x%"PRIx64"failed to retrieve table id: cannot create new sql object.", pSql->self);
|
||||
return NULL;
|
||||
|
||||
} else if (taos_errno(pNew) != TSDB_CODE_SUCCESS) {
|
||||
tscError("failed to retrieve table id: %s", tstrerror(taos_errno(pNew)));
|
||||
tscError("0x%"PRIx64"failed to retrieve table id,error: %s", pSql->self, tstrerror(taos_errno(pNew)));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -284,7 +284,7 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
|
|||
}
|
||||
size_t numOfTables = taosArrayGetSize(tables);
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, 0);
|
||||
SArray* progress = taosArrayInit(numOfTables, sizeof(SSubscriptionProgress));
|
||||
for( size_t i = 0; i < numOfTables; i++ ) {
|
||||
STidTags* tt = taosArrayGet( tables, i );
|
||||
|
@ -304,7 +304,7 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
|
|||
}
|
||||
taosArrayDestroy(tables);
|
||||
|
||||
TSDB_QUERY_SET_TYPE(tscGetQueryInfoDetail(pCmd, 0)->type, TSDB_QUERY_TYPE_MULTITABLE_QUERY);
|
||||
TSDB_QUERY_SET_TYPE(tscGetQueryInfo(pCmd, 0)->type, TSDB_QUERY_TYPE_MULTITABLE_QUERY);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -487,11 +487,13 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
|
|||
if (pSql == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (pSub->pSql->self != 0) {
|
||||
taosReleaseRef(tscObjRef, pSub->pSql->self);
|
||||
} else {
|
||||
tscFreeSqlObj(pSub->pSql);
|
||||
}
|
||||
|
||||
pSub->pSql = pSql;
|
||||
pSql->pSubscription = pSub;
|
||||
}
|
||||
|
@ -502,7 +504,7 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
|
|||
SSqlRes *pRes = &pSql->res;
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, 0);
|
||||
if (taosArrayGetSize(pSub->progress) > 0) { // fix crash in single table subscription
|
||||
|
||||
size_t size = taosArrayGetSize(pSub->progress);
|
||||
|
@ -555,7 +557,10 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
|
|||
pSql->fp = asyncCallback;
|
||||
pSql->fetchFp = asyncCallback;
|
||||
pSql->param = pSub;
|
||||
tscDoQuery(pSql);
|
||||
|
||||
pSql->cmd.active = pQueryInfo;
|
||||
executeQuery(pSql, pQueryInfo);
|
||||
|
||||
tsem_wait(&pSub->sem);
|
||||
|
||||
if (pRes->code != TSDB_CODE_SUCCESS) {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -13,8 +13,8 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TDENGINE_TAST_H
|
||||
#define TDENGINE_TAST_H
|
||||
#ifndef TDENGINE_TEXPR_H
|
||||
#define TDENGINE_TEXPR_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -62,32 +62,32 @@ typedef struct tExprNode {
|
|||
uint8_t nodeType;
|
||||
union {
|
||||
struct {
|
||||
uint8_t optr; // filter operator
|
||||
uint8_t hasPK; // 0: do not contain primary filter, 1: contain
|
||||
void * info; // support filter operation on this expression only available for leaf node
|
||||
|
||||
uint8_t optr; // filter operator
|
||||
uint8_t hasPK; // 0: do not contain primary filter, 1: contain
|
||||
void *info; // support filter operation on this expression only available for leaf node
|
||||
struct tExprNode *pLeft; // left child pointer
|
||||
struct tExprNode *pRight; // right child pointer
|
||||
} _node;
|
||||
struct SSchema *pSchema;
|
||||
tVariant * pVal;
|
||||
|
||||
struct SSchema *pSchema;
|
||||
tVariant *pVal;
|
||||
};
|
||||
} tExprNode;
|
||||
|
||||
typedef struct SExprTraverseSupp {
|
||||
__result_filter_fn_t nodeFilterFn;
|
||||
__do_filter_suppl_fn_t setupInfoFn;
|
||||
void * pExtInfo;
|
||||
void *pExtInfo;
|
||||
} SExprTraverseSupp;
|
||||
|
||||
void tExprTreeDestroy(tExprNode *pNode, void (*fp)(void *));
|
||||
|
||||
void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree);
|
||||
tExprNode* exprTreeFromBinary(const void* data, size_t size);
|
||||
tExprNode* exprTreeFromTableName(const char* tbnameCond);
|
||||
tExprNode* exprdup(tExprNode* pTree);
|
||||
|
||||
void exprTreeToBinary(SBufferWriter* bw, tExprNode* pExprTree);
|
||||
|
||||
bool exprTreeApplayFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param);
|
||||
bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param);
|
||||
|
||||
typedef void (*_arithmetic_operator_fn_t)(void *left, int32_t numLeft, int32_t leftType, void *right, int32_t numRight,
|
||||
int32_t rightType, void *output, int32_t order);
|
||||
|
@ -99,4 +99,4 @@ void arithmeticTreeTraverse(tExprNode *pExprs, int32_t numOfRows, char *pOutput,
|
|||
}
|
||||
#endif
|
||||
|
||||
#endif // TDENGINE_TAST_H
|
||||
#endif // TDENGINE_TEXPR_H
|
||||
|
|
|
@ -41,6 +41,35 @@ typedef struct SResPair {
|
|||
double avg;
|
||||
} SResPair;
|
||||
|
||||
// the structure for sql function in select clause
|
||||
typedef struct SSqlExpr {
|
||||
char aliasName[TSDB_COL_NAME_LEN]; // as aliasName
|
||||
SColIndex colInfo;
|
||||
|
||||
uint64_t uid; // refactor use the pointer
|
||||
|
||||
int16_t functionId; // function id in aAgg array
|
||||
|
||||
int16_t resType; // return value type
|
||||
int16_t resBytes; // length of return value
|
||||
int32_t interBytes; // inter result buffer size
|
||||
|
||||
int16_t colType; // table column type
|
||||
int16_t colBytes; // table column bytes
|
||||
|
||||
int16_t numOfParams; // argument value of each function
|
||||
tVariant param[3]; // parameters are not more than 3
|
||||
int32_t offset; // sub result column value of arithmetic expression.
|
||||
int16_t resColId; // result column id
|
||||
|
||||
SColumnFilterList flist;
|
||||
} SSqlExpr;
|
||||
|
||||
typedef struct SExprInfo {
|
||||
SSqlExpr base;
|
||||
struct tExprNode *pExpr;
|
||||
} SExprInfo;
|
||||
|
||||
#define TSDB_DB_NAME_T 1
|
||||
#define TSDB_TABLE_NAME_T 2
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#include "os.h"
|
||||
|
||||
#include "texpr.h"
|
||||
#include "exception.h"
|
||||
#include "taosdef.h"
|
||||
#include "taosmsg.h"
|
||||
|
@ -145,25 +146,25 @@ static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) {
|
|||
*pExpr = NULL;
|
||||
}
|
||||
|
||||
bool exprTreeApplayFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param) {
|
||||
bool exprTreeApplyFilter(tExprNode *pExpr, const void *pItem, SExprTraverseSupp *param) {
|
||||
tExprNode *pLeft = pExpr->_node.pLeft;
|
||||
tExprNode *pRight = pExpr->_node.pRight;
|
||||
|
||||
//non-leaf nodes, recursively traverse the expression tree in the post-root order
|
||||
if (pLeft->nodeType == TSQL_NODE_EXPR && pRight->nodeType == TSQL_NODE_EXPR) {
|
||||
if (pExpr->_node.optr == TSDB_RELATION_OR) { // or
|
||||
if (exprTreeApplayFilter(pLeft, pItem, param)) {
|
||||
if (exprTreeApplyFilter(pLeft, pItem, param)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// left child does not satisfy the query condition, try right child
|
||||
return exprTreeApplayFilter(pRight, pItem, param);
|
||||
return exprTreeApplyFilter(pRight, pItem, param);
|
||||
} else { // and
|
||||
if (!exprTreeApplayFilter(pLeft, pItem, param)) {
|
||||
if (!exprTreeApplyFilter(pLeft, pItem, param)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return exprTreeApplayFilter(pRight, pItem, param);
|
||||
return exprTreeApplyFilter(pRight, pItem, param);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -463,3 +464,28 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
|
|||
CLEANUP_EXECUTE_TO(anchor, false);
|
||||
return expr;
|
||||
}
|
||||
|
||||
tExprNode* exprdup(tExprNode* pTree) {
|
||||
if (pTree == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tExprNode* pNode = calloc(1, sizeof(tExprNode));
|
||||
if (pTree->nodeType == TSQL_NODE_EXPR) {
|
||||
tExprNode* pLeft = exprdup(pTree->_node.pLeft);
|
||||
tExprNode* pRight = exprdup(pTree->_node.pRight);
|
||||
|
||||
pNode->nodeType = TSQL_NODE_EXPR;
|
||||
pNode->_node.pLeft = pLeft;
|
||||
pNode->_node.pRight = pRight;
|
||||
} else if (pTree->nodeType == TSQL_NODE_VALUE) {
|
||||
pNode->pVal = calloc(1, sizeof(tVariant));
|
||||
tVariantAssign(pNode->pVal, pTree->pVal);
|
||||
} else if (pTree->nodeType == TSQL_NODE_COL) {
|
||||
pNode->pSchema = calloc(1, sizeof(SSchema));
|
||||
*pNode->pSchema = *pTree->pSchema;
|
||||
}
|
||||
|
||||
return pNode;
|
||||
}
|
||||
|
||||
|
|
|
@ -139,7 +139,7 @@ int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
|
|||
int8_t tsEnableBalance = 1;
|
||||
int8_t tsAlternativeRole = 0;
|
||||
int32_t tsBalanceInterval = 300; // seconds
|
||||
int32_t tsOfflineThreshold = 86400 * 100; // seconds 100 days
|
||||
int32_t tsOfflineThreshold = 86400 * 10; // seconds of 10 days
|
||||
int32_t tsMnodeEqualVnodeNum = 4;
|
||||
int8_t tsEnableFlowCtrl = 1;
|
||||
int8_t tsEnableSlaveQuery = 1;
|
||||
|
@ -921,7 +921,7 @@ static void doInitGlobalConfig(void) {
|
|||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW;
|
||||
cfg.minValue = -1;
|
||||
cfg.maxValue = 10000000;
|
||||
cfg.maxValue = 10000000000.0f;
|
||||
cfg.ptrLength = 0;
|
||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
|
|
@ -68,6 +68,7 @@ bool tscValidateTableNameLength(size_t len) {
|
|||
return len < TSDB_TABLE_NAME_LEN;
|
||||
}
|
||||
|
||||
// TODO refactor
|
||||
SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFilters) {
|
||||
if (numOfFilters == 0) {
|
||||
assert(src == NULL);
|
||||
|
|
|
@ -48,8 +48,19 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) {
|
|||
case TSDB_DATA_TYPE_INT:{
|
||||
ret = tStrToInteger(token->z, token->type, token->n, &pVar->i64, true);
|
||||
if (ret != 0) {
|
||||
pVar->nType = -1; // -1 means error type
|
||||
return;
|
||||
SStrToken t = {0};
|
||||
tSQLGetToken(token->z, &t.type);
|
||||
if (t.type == TK_MINUS) { // it is a signed number which is greater than INT64_MAX or less than INT64_MIN
|
||||
pVar->nType = -1; // -1 means error type
|
||||
return;
|
||||
}
|
||||
|
||||
// data overflow, try unsigned parse the input number
|
||||
ret = tStrToInteger(token->z, token->type, token->n, &pVar->i64, false);
|
||||
if (ret != 0) {
|
||||
pVar->nType = -1; // -1 means error type
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
|
@ -525,6 +536,8 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
|
|||
}
|
||||
|
||||
bool code = false;
|
||||
|
||||
uint64_t ui = 0;
|
||||
switch(type) {
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
code = IS_VALID_TINYINT(*result); break;
|
||||
|
@ -535,13 +548,17 @@ static FORCE_INLINE int32_t convertToInteger(tVariant *pVariant, int64_t *result
|
|||
case TSDB_DATA_TYPE_BIGINT:
|
||||
code = IS_VALID_BIGINT(*result); break;
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
code = IS_VALID_UTINYINT(*result); break;
|
||||
ui = *result;
|
||||
code = IS_VALID_UTINYINT(ui); break;
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
code = IS_VALID_USMALLINT(*result); break;
|
||||
ui = *result;
|
||||
code = IS_VALID_USMALLINT(ui); break;
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
code = IS_VALID_UINT(*result); break;
|
||||
ui = *result;
|
||||
code = IS_VALID_UINT(ui); break;
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
code = IS_VALID_UBIGINT(*result); break;
|
||||
ui = *result;
|
||||
code = IS_VALID_UBIGINT(ui); break;
|
||||
}
|
||||
|
||||
return code? 0:-1;
|
||||
|
|
|
@ -31,7 +31,11 @@ namespace TDengineDriver
|
|||
TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes
|
||||
TSDB_DATA_TYPE_BINARY = 8, // string
|
||||
TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes
|
||||
TSDB_DATA_TYPE_NCHAR = 10 // unicode string
|
||||
TSDB_DATA_TYPE_NCHAR = 10, // unicode string
|
||||
TSDB_DATA_TYPE_UTINYINT = 11,// 1 byte
|
||||
TSDB_DATA_TYPE_USMALLINT= 12,// 2 bytes
|
||||
TSDB_DATA_TYPE_UINT = 13, // 4 bytes
|
||||
TSDB_DATA_TYPE_UBIGINT= 14 // 8 bytes
|
||||
}
|
||||
|
||||
enum TDengineInitOption
|
||||
|
@ -53,15 +57,23 @@ namespace TDengineDriver
|
|||
switch ((TDengineDataType)type)
|
||||
{
|
||||
case TDengineDataType.TSDB_DATA_TYPE_BOOL:
|
||||
return "BOOLEAN";
|
||||
return "BOOL";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_TINYINT:
|
||||
return "BYTE";
|
||||
return "TINYINT";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_SMALLINT:
|
||||
return "SHORT";
|
||||
return "SMALLINT";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_INT:
|
||||
return "INT";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_BIGINT:
|
||||
return "LONG";
|
||||
return "BIGINT";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_UTINYINT:
|
||||
return "TINYINT UNSIGNED";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_USMALLINT:
|
||||
return "SMALLINT UNSIGNED";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_UINT:
|
||||
return "INT UNSIGNED";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_UBIGINT:
|
||||
return "BIGINT UNSIGNED";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_FLOAT:
|
||||
return "FLOAT";
|
||||
case TDengineDataType.TSDB_DATA_TYPE_DOUBLE:
|
||||
|
|
|
@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
|
|||
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
|
||||
POST_BUILD
|
||||
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.26-dist.jar ${LIBRARY_OUTPUT_PATH}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.28-dist.jar ${LIBRARY_OUTPUT_PATH}
|
||||
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||
COMMENT "build jdbc driver")
|
||||
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.26</version>
|
||||
<version>2.0.28</version>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<name>JDBCDriver</name>
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.26</version>
|
||||
<version>2.0.28</version>
|
||||
<packaging>jar</packaging>
|
||||
<name>JDBCDriver</name>
|
||||
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
|
||||
|
|
|
@ -4,13 +4,23 @@ import java.sql.*;
|
|||
import java.util.Enumeration;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.*;
|
||||
|
||||
public abstract class AbstractConnection extends WrapperImpl implements Connection {
|
||||
|
||||
protected volatile boolean isClosed;
|
||||
protected volatile String catalog;
|
||||
protected volatile Properties clientInfoProps = new Properties();
|
||||
protected final Properties clientInfoProps = new Properties();
|
||||
|
||||
protected AbstractConnection(Properties properties) {
|
||||
Set<String> propNames = properties.stringPropertyNames();
|
||||
for (String propName : propNames) {
|
||||
clientInfoProps.setProperty(propName, properties.getProperty(propName));
|
||||
}
|
||||
String timestampFormat = properties.getProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, "STRING");
|
||||
clientInfoProps.setProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, timestampFormat);
|
||||
}
|
||||
|
||||
@Override
|
||||
public abstract Statement createStatement() throws SQLException;
|
||||
|
@ -35,7 +45,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
|
|||
}
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public void setAutoCommit(boolean autoCommit) throws SQLException {
|
||||
if (isClosed())
|
||||
|
@ -441,9 +450,8 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti
|
|||
if (isClosed)
|
||||
throw (SQLClientInfoException) TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED);
|
||||
|
||||
if (clientInfoProps == null)
|
||||
clientInfoProps = new Properties();
|
||||
clientInfoProps.setProperty(name, value);
|
||||
if (clientInfoProps != null)
|
||||
clientInfoProps.setProperty(name, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -10,6 +10,7 @@ import java.util.Map;
|
|||
|
||||
public abstract class AbstractResultSet extends WrapperImpl implements ResultSet {
|
||||
private int fetchSize;
|
||||
protected boolean wasNull;
|
||||
|
||||
protected void checkAvailability(int columnIndex, int bounds) throws SQLException {
|
||||
if (isClosed())
|
||||
|
@ -28,7 +29,7 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet
|
|||
|
||||
@Override
|
||||
public boolean wasNull() throws SQLException {
|
||||
return false;
|
||||
return wasNull;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -28,6 +28,7 @@ public class TSDBConnection extends AbstractConnection {
|
|||
}
|
||||
|
||||
public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException {
|
||||
super(info);
|
||||
this.databaseMetaData = meta;
|
||||
connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST),
|
||||
Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")),
|
||||
|
|
|
@ -95,6 +95,11 @@ public class TSDBDriver extends AbstractDriver {
|
|||
*/
|
||||
public static final String PROPERTY_KEY_BATCH_LOAD = "batchfetch";
|
||||
|
||||
/**
|
||||
* timestamp format for JDBC-RESTful,should one of the options: string or timestamp or utc
|
||||
*/
|
||||
public static final String PROPERTY_KEY_TIMESTAMP_FORMAT = "timestampFormat";
|
||||
|
||||
private TSDBDatabaseMetaData dbMetaData = null;
|
||||
|
||||
static {
|
||||
|
|
|
@ -203,7 +203,11 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
|||
|
||||
this.lastWasNull = this.rowData.wasNull(columnIndex - 1);
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getLong(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType());
|
||||
Object value = this.rowData.get(columnIndex - 1);
|
||||
if (value instanceof Timestamp)
|
||||
res = ((Timestamp) value).getTime();
|
||||
else
|
||||
res = this.rowData.getLong(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType());
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -273,7 +277,6 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
|
|||
checkAvailability(columnIndex, this.columnMetaDataList.size());
|
||||
|
||||
Timestamp res = null;
|
||||
|
||||
if (this.getBatchFetch())
|
||||
return this.blockData.getTimestamp(columnIndex - 1);
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ package com.taosdata.jdbc;
|
|||
import java.math.BigDecimal;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Timestamp;
|
||||
import java.time.Instant;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
||||
|
@ -299,7 +300,19 @@ public class TSDBResultSetRowData {
|
|||
}
|
||||
|
||||
public void setTimestamp(int col, long ts) {
|
||||
data.set(col, new Timestamp(ts));
|
||||
//TODO: this implementation contains logical error
|
||||
// when precision is us the (long ts) is 16 digital number
|
||||
// when precision is ms, the (long ts) is 13 digital number
|
||||
// we need a JNI function like this:
|
||||
// public void setTimestamp(int col, long epochSecond, long nanoAdjustment)
|
||||
if (ts < 1_0000_0000_0000_0L) {
|
||||
data.set(col, new Timestamp(ts));
|
||||
} else {
|
||||
long epochSec = ts / 1000_000l;
|
||||
long nanoAdjustment = ts % 1000_000l * 1000l;
|
||||
Timestamp timestamp = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
|
||||
data.set(col, timestamp);
|
||||
}
|
||||
}
|
||||
|
||||
public Timestamp getTimestamp(int col) {
|
||||
|
|
|
@ -22,6 +22,7 @@ public class RestfulConnection extends AbstractConnection {
|
|||
private final DatabaseMetaData metadata;
|
||||
|
||||
public RestfulConnection(String host, String port, Properties props, String database, String url) {
|
||||
super(props);
|
||||
this.host = host;
|
||||
this.port = Integer.parseInt(port);
|
||||
this.database = database;
|
||||
|
|
|
@ -5,13 +5,12 @@ import com.alibaba.fastjson.JSONObject;
|
|||
import com.google.common.primitives.Ints;
|
||||
import com.google.common.primitives.Longs;
|
||||
import com.google.common.primitives.Shorts;
|
||||
import com.taosdata.jdbc.AbstractResultSet;
|
||||
import com.taosdata.jdbc.TSDBConstants;
|
||||
import com.taosdata.jdbc.TSDBError;
|
||||
import com.taosdata.jdbc.TSDBErrorNumbers;
|
||||
import com.taosdata.jdbc.*;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.sql.*;
|
||||
import java.time.Instant;
|
||||
import java.time.ZoneOffset;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Calendar;
|
||||
|
||||
|
@ -19,6 +18,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
private volatile boolean isClosed;
|
||||
private int pos = -1;
|
||||
|
||||
|
||||
private final String database;
|
||||
private final Statement statement;
|
||||
// data
|
||||
|
@ -65,7 +65,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
}
|
||||
}
|
||||
|
||||
private Object parseColumnData(JSONArray row, int colIndex, int taosType) {
|
||||
private Object parseColumnData(JSONArray row, int colIndex, int taosType) throws SQLException {
|
||||
switch (taosType) {
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BOOL:
|
||||
return row.getBoolean(colIndex);
|
||||
|
@ -81,8 +81,44 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
return row.getFloat(colIndex);
|
||||
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
|
||||
return row.getDouble(colIndex);
|
||||
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
|
||||
return new Timestamp(row.getDate(colIndex).getTime());
|
||||
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: {
|
||||
if (row.get(colIndex) == null)
|
||||
return null;
|
||||
String timestampFormat = this.statement.getConnection().getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT);
|
||||
if ("TIMESTAMP".equalsIgnoreCase(timestampFormat)) {
|
||||
Long value = row.getLong(colIndex);
|
||||
//TODO: this implementation has bug if the timestamp bigger than 9999_9999_9999_9
|
||||
if (value < 1_0000_0000_0000_0L)
|
||||
return new Timestamp(value);
|
||||
long epochSec = value / 1000_000l;
|
||||
long nanoAdjustment = value % 1000_000l * 1000l;
|
||||
return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
|
||||
}
|
||||
if ("UTC".equalsIgnoreCase(timestampFormat)) {
|
||||
String value = row.getString(colIndex);
|
||||
long epochSec = Timestamp.valueOf(value.substring(0, 19).replace("T", " ")).getTime() / 1000;
|
||||
int fractionalSec = Integer.parseInt(value.substring(20, value.length() - 5));
|
||||
long nanoAdjustment = 0;
|
||||
if (value.length() > 28) {
|
||||
// ms timestamp: yyyy-MM-ddTHH:mm:ss.SSSSSS+0x00
|
||||
nanoAdjustment = fractionalSec * 1000l;
|
||||
} else {
|
||||
// ms timestamp: yyyy-MM-ddTHH:mm:ss.SSS+0x00
|
||||
nanoAdjustment = fractionalSec * 1000_000l;
|
||||
}
|
||||
ZoneOffset zoneOffset = ZoneOffset.of(value.substring(value.length() - 5));
|
||||
Instant instant = Instant.ofEpochSecond(epochSec, nanoAdjustment).atOffset(zoneOffset).toInstant();
|
||||
return Timestamp.from(instant);
|
||||
}
|
||||
String value = row.getString(colIndex);
|
||||
if (value.length() <= 23) // ms timestamp: yyyy-MM-dd HH:mm:ss.SSS
|
||||
return row.getTimestamp(colIndex);
|
||||
// us timestamp: yyyy-MM-dd HH:mm:ss.SSSSSS
|
||||
long epochSec = Timestamp.valueOf(value.substring(0, 19)).getTime() / 1000;
|
||||
long nanoAdjustment = Integer.parseInt(value.substring(20)) * 1000l;
|
||||
Timestamp timestamp = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
|
||||
return timestamp;
|
||||
}
|
||||
case TSDBConstants.TSDB_DATA_TYPE_BINARY:
|
||||
return row.getString(colIndex) == null ? null : row.getString(colIndex).getBytes();
|
||||
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
|
||||
|
@ -126,12 +162,12 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean wasNull() throws SQLException {
|
||||
if (isClosed())
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED);
|
||||
return resultSet.isEmpty();
|
||||
}
|
||||
// @Override
|
||||
// public boolean wasNull() throws SQLException {
|
||||
// if (isClosed())
|
||||
// throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED);
|
||||
// return resultSet.isEmpty();
|
||||
// }
|
||||
|
||||
@Override
|
||||
public String getString(int columnIndex) throws SQLException {
|
||||
|
@ -150,8 +186,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
checkAvailability(columnIndex, resultSet.get(pos).size());
|
||||
|
||||
Object value = resultSet.get(pos).get(columnIndex - 1);
|
||||
if (value == null)
|
||||
if (value == null) {
|
||||
wasNull = true;
|
||||
return false;
|
||||
}
|
||||
wasNull = false;
|
||||
if (value instanceof Boolean)
|
||||
return (boolean) value;
|
||||
return Boolean.valueOf(value.toString());
|
||||
|
@ -162,8 +201,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
checkAvailability(columnIndex, resultSet.get(pos).size());
|
||||
|
||||
Object value = resultSet.get(pos).get(columnIndex - 1);
|
||||
if (value == null)
|
||||
if (value == null) {
|
||||
wasNull = true;
|
||||
return 0;
|
||||
}
|
||||
wasNull = false;
|
||||
long valueAsLong = Long.parseLong(value.toString());
|
||||
if (valueAsLong == Byte.MIN_VALUE)
|
||||
return 0;
|
||||
|
@ -183,8 +225,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
checkAvailability(columnIndex, resultSet.get(pos).size());
|
||||
|
||||
Object value = resultSet.get(pos).get(columnIndex - 1);
|
||||
if (value == null)
|
||||
if (value == null) {
|
||||
wasNull = true;
|
||||
return 0;
|
||||
}
|
||||
wasNull = false;
|
||||
long valueAsLong = Long.parseLong(value.toString());
|
||||
if (valueAsLong == Short.MIN_VALUE)
|
||||
return 0;
|
||||
|
@ -198,8 +243,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
checkAvailability(columnIndex, resultSet.get(pos).size());
|
||||
|
||||
Object value = resultSet.get(pos).get(columnIndex - 1);
|
||||
if (value == null)
|
||||
if (value == null) {
|
||||
wasNull = true;
|
||||
return 0;
|
||||
}
|
||||
wasNull = false;
|
||||
long valueAsLong = Long.parseLong(value.toString());
|
||||
if (valueAsLong == Integer.MIN_VALUE)
|
||||
return 0;
|
||||
|
@ -213,9 +261,14 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
checkAvailability(columnIndex, resultSet.get(pos).size());
|
||||
|
||||
Object value = resultSet.get(pos).get(columnIndex - 1);
|
||||
if (value == null)
|
||||
if (value == null) {
|
||||
wasNull = true;
|
||||
return 0;
|
||||
|
||||
}
|
||||
wasNull = false;
|
||||
if (value instanceof Timestamp) {
|
||||
return ((Timestamp) value).getTime();
|
||||
}
|
||||
long valueAsLong = 0;
|
||||
try {
|
||||
valueAsLong = Long.parseLong(value.toString());
|
||||
|
@ -232,8 +285,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
checkAvailability(columnIndex, resultSet.get(pos).size());
|
||||
|
||||
Object value = resultSet.get(pos).get(columnIndex - 1);
|
||||
if (value == null)
|
||||
if (value == null) {
|
||||
wasNull = true;
|
||||
return 0;
|
||||
}
|
||||
wasNull = false;
|
||||
if (value instanceof Float || value instanceof Double)
|
||||
return (float) value;
|
||||
return Float.parseFloat(value.toString());
|
||||
|
@ -244,8 +300,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
checkAvailability(columnIndex, resultSet.get(pos).size());
|
||||
|
||||
Object value = resultSet.get(pos).get(columnIndex - 1);
|
||||
if (value == null)
|
||||
if (value == null) {
|
||||
wasNull = true;
|
||||
return 0;
|
||||
}
|
||||
wasNull = false;
|
||||
if (value instanceof Double || value instanceof Float)
|
||||
return (double) value;
|
||||
return Double.parseDouble(value.toString());
|
||||
|
@ -307,6 +366,13 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
|
|||
return null;
|
||||
if (value instanceof Timestamp)
|
||||
return (Timestamp) value;
|
||||
// if (value instanceof Long) {
|
||||
// if (1_0000_0000_0000_0L > (long) value)
|
||||
// return Timestamp.from(Instant.ofEpochMilli((long) value));
|
||||
// long epochSec = (long) value / 1000_000L;
|
||||
// long nanoAdjustment = (long) ((long) value % 1000_000L * 1000);
|
||||
// return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment));
|
||||
// }
|
||||
return Timestamp.valueOf(value.toString());
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ import com.alibaba.fastjson.JSON;
|
|||
import com.alibaba.fastjson.JSONArray;
|
||||
import com.alibaba.fastjson.JSONObject;
|
||||
import com.taosdata.jdbc.AbstractStatement;
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import com.taosdata.jdbc.TSDBError;
|
||||
import com.taosdata.jdbc.TSDBErrorNumbers;
|
||||
import com.taosdata.jdbc.utils.HttpClientPoolUtil;
|
||||
|
@ -34,14 +35,11 @@ public class RestfulStatement extends AbstractStatement {
|
|||
if (!SqlSyntaxValidator.isValidForExecuteQuery(sql))
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql);
|
||||
|
||||
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
|
||||
if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) {
|
||||
return executeOneQuery(url, sql);
|
||||
return executeOneQuery(sql);
|
||||
}
|
||||
|
||||
// if (this.database != null && !this.database.trim().replaceAll("\\s","").isEmpty())
|
||||
// HttpClientPoolUtil.execute(url, "use " + this.database);
|
||||
return executeOneQuery(url, sql);
|
||||
return executeOneQuery(sql);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -56,8 +54,6 @@ public class RestfulStatement extends AbstractStatement {
|
|||
return executeOneUpdate(url, sql);
|
||||
}
|
||||
|
||||
// if (this.database != null && !this.database.trim().replaceAll("\\s", "").isEmpty())
|
||||
// HttpClientPoolUtil.execute(url, "use " + this.database);
|
||||
return executeOneUpdate(url, sql);
|
||||
}
|
||||
|
||||
|
@ -78,14 +74,21 @@ public class RestfulStatement extends AbstractStatement {
|
|||
|
||||
//如果执行了use操作应该将当前Statement的catalog设置为新的database
|
||||
boolean result = true;
|
||||
final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
|
||||
String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
|
||||
if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("TIMESTAMP")) {
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt";
|
||||
}
|
||||
if (conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).equals("UTC")) {
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
|
||||
}
|
||||
|
||||
if (SqlSyntaxValidator.isUseSql(sql)) {
|
||||
HttpClientPoolUtil.execute(url, sql);
|
||||
this.database = sql.trim().replace("use", "").trim();
|
||||
this.conn.setCatalog(this.database);
|
||||
result = false;
|
||||
} else if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) {
|
||||
executeOneQuery(url, sql);
|
||||
executeOneQuery(sql);
|
||||
} else if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) {
|
||||
executeOneUpdate(url, sql);
|
||||
result = false;
|
||||
|
@ -101,11 +104,18 @@ public class RestfulStatement extends AbstractStatement {
|
|||
return result;
|
||||
}
|
||||
|
||||
private ResultSet executeOneQuery(String url, String sql) throws SQLException {
|
||||
private ResultSet executeOneQuery(String sql) throws SQLException {
|
||||
if (!SqlSyntaxValidator.isValidForExecuteQuery(sql))
|
||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql);
|
||||
|
||||
// row data
|
||||
String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql";
|
||||
String timestampFormat = conn.getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT);
|
||||
if ("TIMESTAMP".equalsIgnoreCase(timestampFormat))
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlt";
|
||||
if ("UTC".equalsIgnoreCase(timestampFormat))
|
||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
|
||||
|
||||
String result = HttpClientPoolUtil.execute(url, sql);
|
||||
JSONObject resultJson = JSON.parseObject(result);
|
||||
if (resultJson.getString("status").equals("error")) {
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
package com.taosdata.jdbc.utils;
|
||||
|
||||
import java.time.format.DateTimeFormatter;
|
||||
import java.time.format.DateTimeFormatterBuilder;
|
||||
|
||||
public class UtcTimestampUtil {
|
||||
public static final DateTimeFormatter formatter = new DateTimeFormatterBuilder()
|
||||
.appendPattern("yyyy-MM-ddTHH:mm:ss.SSS+")
|
||||
// .appendFraction(ChronoField.NANO_OF_SECOND, 0, 9, true)
|
||||
.toFormatter();
|
||||
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
package com.taosdata.jdbc.cases;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.sql.*;
|
||||
|
||||
public class NullValueInResultSetForJdbcJniTest {
|
||||
|
||||
private static final String host = "127.0.0.1";
|
||||
Connection conn;
|
||||
|
||||
@Test
|
||||
public void test() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select * from weather");
|
||||
ResultSetMetaData meta = rs.getMetaData();
|
||||
while (rs.next()) {
|
||||
for (int i = 1; i <= meta.getColumnCount(); i++) {
|
||||
Object value = rs.getObject(i);
|
||||
System.out.print(meta.getColumnLabel(i) + ": " + value + "\t");
|
||||
}
|
||||
System.out.println();
|
||||
}
|
||||
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Before
|
||||
public void before() throws SQLException {
|
||||
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
|
||||
conn = DriverManager.getConnection(url);
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("drop database if exists test_null");
|
||||
stmt.execute("create database if not exists test_null");
|
||||
stmt.execute("use test_null");
|
||||
stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64))");
|
||||
stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, 1)");
|
||||
stmt.executeUpdate("insert into weather(ts, f2) values(now+2s, 2)");
|
||||
stmt.executeUpdate("insert into weather(ts, f3) values(now+3s, 3.0)");
|
||||
stmt.executeUpdate("insert into weather(ts, f4) values(now+4s, 4.0)");
|
||||
stmt.executeUpdate("insert into weather(ts, f5) values(now+5s, 5)");
|
||||
stmt.executeUpdate("insert into weather(ts, f6) values(now+6s, 6)");
|
||||
stmt.executeUpdate("insert into weather(ts, f7) values(now+7s, true)");
|
||||
stmt.executeUpdate("insert into weather(ts, f8) values(now+8s, 'hello')");
|
||||
stmt.executeUpdate("insert into weather(ts, f9) values(now+9s, '涛思数据')");
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@After
|
||||
public void after() {
|
||||
try {
|
||||
if (conn != null)
|
||||
conn.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,91 @@
|
|||
package com.taosdata.jdbc.cases;
|
||||
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import com.taosdata.jdbc.utils.TimestampUtil;
|
||||
import org.junit.*;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.Properties;
|
||||
|
||||
public class TD3841Test {
|
||||
private static final String host = "127.0.0.1";
|
||||
private static Properties properties;
|
||||
private static Connection conn_restful;
|
||||
private static Connection conn_jni;
|
||||
|
||||
@Test
|
||||
public void testRestful() throws SQLException {
|
||||
String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
|
||||
conn_restful = DriverManager.getConnection(url, properties);
|
||||
|
||||
try (Statement stmt = conn_restful.createStatement()) {
|
||||
stmt.execute("drop database if exists test_null");
|
||||
stmt.execute("create database if not exists test_null");
|
||||
stmt.execute("use test_null");
|
||||
stmt.execute("create table weather(ts timestamp, f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 smallint, f7 tinyint, f8 bool, f9 binary(64), f10 nchar(64))");
|
||||
stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, " + TimestampUtil.datetimeToLong("2021-04-21 12:00:00.000") + ")");
|
||||
ResultSet rs = stmt.executeQuery("select * from weather");
|
||||
rs.next();
|
||||
|
||||
Assert.assertEquals("2021-04-21 12:00:00.000", TimestampUtil.longToDatetime(rs.getTimestamp(2).getTime()));
|
||||
Assert.assertEquals(true, rs.getInt(3) == 0 && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getLong(4) == 0 && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getFloat(5) == 0.0f && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getDouble(6) == 0.0f && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getByte(7) == 0 && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getShort(8) == 0 && rs.wasNull());
|
||||
Assert.assertEquals(null, rs.getBytes(9));
|
||||
Assert.assertEquals(null, rs.getString(10));
|
||||
|
||||
rs.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testJNI() throws SQLException {
|
||||
final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
|
||||
conn_jni = DriverManager.getConnection(url, properties);
|
||||
|
||||
try (Statement stmt = conn_jni.createStatement()) {
|
||||
stmt.execute("drop database if exists test_null");
|
||||
stmt.execute("create database if not exists test_null");
|
||||
stmt.execute("use test_null");
|
||||
stmt.execute("create table weather(ts timestamp, f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 smallint, f7 tinyint, f8 bool, f9 binary(64), f10 nchar(64))");
|
||||
stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, " + TimestampUtil.datetimeToLong("2021-04-21 12:00:00.000") + ")");
|
||||
ResultSet rs = stmt.executeQuery("select * from weather");
|
||||
rs.next();
|
||||
|
||||
Assert.assertEquals("2021-04-21 12:00:00.000", TimestampUtil.longToDatetime(rs.getTimestamp(2).getTime()));
|
||||
Assert.assertEquals(true, rs.getInt(3) == 0 && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getLong(4) == 0 && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getFloat(5) == 0.0f && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getDouble(6) == 0.0f && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getByte(7) == 0 && rs.wasNull());
|
||||
Assert.assertEquals(true, rs.getShort(8) == 0 && rs.wasNull());
|
||||
Assert.assertEquals(null, rs.getBytes(9));
|
||||
Assert.assertEquals(null, rs.getString(10));
|
||||
|
||||
rs.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() {
|
||||
properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() throws SQLException {
|
||||
if (conn_restful != null)
|
||||
conn_restful.close();
|
||||
if (conn_jni != null)
|
||||
conn_jni.close();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
package com.taosdata.jdbc.cases;
|
||||
|
||||
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.Properties;
|
||||
|
||||
public class TwoTypeTimestampPercisionInJniTest {
|
||||
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final String ms_timestamp_db = "ms_precision_test";
|
||||
private static final String us_timestamp_db = "us_precision_test";
|
||||
private static final long timestamp1 = System.currentTimeMillis();
|
||||
private static final long timestamp2 = timestamp1 * 1000 + 123;
|
||||
|
||||
private static Connection conn;
|
||||
|
||||
@Test
|
||||
public void testCase1() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather");
|
||||
rs.next();
|
||||
long ts = rs.getTimestamp(1).getTime();
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
ts = rs.getLong(1);
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCase2() {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather");
|
||||
rs.next();
|
||||
|
||||
Timestamp timestamp = rs.getTimestamp(1);
|
||||
System.out.println(timestamp);
|
||||
long ts = timestamp.getTime();
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
int nanos = timestamp.getNanos();
|
||||
Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos);
|
||||
|
||||
ts = rs.getLong(1);
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws SQLException {
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
|
||||
String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
|
||||
conn = DriverManager.getConnection(url, properties);
|
||||
|
||||
Statement stmt = conn.createStatement();
|
||||
stmt.execute("drop database if exists " + ms_timestamp_db);
|
||||
stmt.execute("create database if not exists " + ms_timestamp_db + " precision 'ms'");
|
||||
stmt.execute("create table " + ms_timestamp_db + ".weather(ts timestamp, f1 int)");
|
||||
stmt.executeUpdate("insert into " + ms_timestamp_db + ".weather(ts,f1) values(" + timestamp1 + ", 127)");
|
||||
|
||||
stmt.execute("drop database if exists " + us_timestamp_db);
|
||||
stmt.execute("create database if not exists " + us_timestamp_db + " precision 'us'");
|
||||
stmt.execute("create table " + us_timestamp_db + ".weather(ts timestamp, f1 int)");
|
||||
stmt.executeUpdate("insert into " + us_timestamp_db + ".weather(ts,f1) values(" + timestamp2 + ", 127)");
|
||||
stmt.close();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() {
|
||||
try {
|
||||
if (conn != null)
|
||||
conn.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,168 @@
|
|||
package com.taosdata.jdbc.cases;
|
||||
|
||||
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.sql.*;
|
||||
import java.util.Properties;
|
||||
|
||||
public class TwoTypeTimestampPercisionInRestfulTest {
|
||||
|
||||
private static final String host = "127.0.0.1";
|
||||
private static final String ms_timestamp_db = "ms_precision_test";
|
||||
private static final String us_timestamp_db = "us_precision_test";
|
||||
private static final long timestamp1 = System.currentTimeMillis();
|
||||
private static final long timestamp2 = timestamp1 * 1000 + 123;
|
||||
|
||||
private static Connection conn1;
|
||||
private static Connection conn2;
|
||||
private static Connection conn3;
|
||||
|
||||
@Test
|
||||
public void testCase1() {
|
||||
try (Statement stmt = conn1.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather");
|
||||
rs.next();
|
||||
long ts = rs.getTimestamp(1).getTime();
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
ts = rs.getLong(1);
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCase2() {
|
||||
try (Statement stmt = conn1.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather");
|
||||
rs.next();
|
||||
|
||||
Timestamp timestamp = rs.getTimestamp(1);
|
||||
long ts = timestamp.getTime();
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
int nanos = timestamp.getNanos();
|
||||
Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos);
|
||||
|
||||
ts = rs.getLong(1);
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCase3() {
|
||||
try (Statement stmt = conn2.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather");
|
||||
rs.next();
|
||||
Timestamp rsTimestamp = rs.getTimestamp(1);
|
||||
long ts = rsTimestamp.getTime();
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
ts = rs.getLong(1);
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCase4() {
|
||||
try (Statement stmt = conn2.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather");
|
||||
rs.next();
|
||||
|
||||
Timestamp timestamp = rs.getTimestamp(1);
|
||||
long ts = timestamp.getTime();
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
int nanos = timestamp.getNanos();
|
||||
Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos);
|
||||
|
||||
ts = rs.getLong(1);
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCase5() {
|
||||
try (Statement stmt = conn3.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + ms_timestamp_db + ".weather");
|
||||
rs.next();
|
||||
long ts = rs.getTimestamp(1).getTime();
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
ts = rs.getLong(1);
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCase6() {
|
||||
try (Statement stmt = conn3.createStatement()) {
|
||||
ResultSet rs = stmt.executeQuery("select last_row(ts) from " + us_timestamp_db + ".weather");
|
||||
rs.next();
|
||||
|
||||
Timestamp timestamp = rs.getTimestamp(1);
|
||||
long ts = timestamp.getTime();
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
int nanos = timestamp.getNanos();
|
||||
Assert.assertEquals(timestamp2 % 1000_000l * 1000, nanos);
|
||||
|
||||
ts = rs.getLong(1);
|
||||
Assert.assertEquals(timestamp1, ts);
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws SQLException {
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
// properties.setProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, "TIMESTAMP");
|
||||
|
||||
String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata";
|
||||
conn1 = DriverManager.getConnection(url, properties);
|
||||
|
||||
url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata×tampFormat=timestamp";
|
||||
conn2 = DriverManager.getConnection(url, properties);
|
||||
|
||||
url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata×tampFormat=utc";
|
||||
conn3 = DriverManager.getConnection(url, properties);
|
||||
|
||||
Statement stmt = conn1.createStatement();
|
||||
stmt.execute("drop database if exists " + ms_timestamp_db);
|
||||
stmt.execute("create database if not exists " + ms_timestamp_db + " precision 'ms'");
|
||||
stmt.execute("create table " + ms_timestamp_db + ".weather(ts timestamp, f1 int)");
|
||||
stmt.executeUpdate("insert into " + ms_timestamp_db + ".weather(ts,f1) values(" + timestamp1 + ", 127)");
|
||||
|
||||
stmt.execute("drop database if exists " + us_timestamp_db);
|
||||
stmt.execute("create database if not exists " + us_timestamp_db + " precision 'us'");
|
||||
stmt.execute("create table " + us_timestamp_db + ".weather(ts timestamp, f1 int)");
|
||||
stmt.executeUpdate("insert into " + us_timestamp_db + ".weather(ts,f1) values(" + timestamp2 + ", 127)");
|
||||
stmt.close();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() {
|
||||
try {
|
||||
if (conn1 != null)
|
||||
conn1.close();
|
||||
if (conn2 != null)
|
||||
conn2.close();
|
||||
if (conn3 != null)
|
||||
conn3.close();
|
||||
} catch (SQLException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
|
|||
|
||||
setuptools.setup(
|
||||
name="taos",
|
||||
version="2.0.7",
|
||||
version="2.0.8",
|
||||
author="Taosdata Inc.",
|
||||
author_email="support@taosdata.com",
|
||||
description="TDengine python client package",
|
||||
|
|
|
@ -42,7 +42,7 @@ class TDengineCursor(object):
|
|||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
def next(self):
|
||||
if self._result is None or self._fields is None:
|
||||
raise OperationalError("Invalid use of fetch iterator")
|
||||
|
||||
|
|
|
@ -310,8 +310,23 @@ void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, ch
|
|||
}
|
||||
SCqContext *pContext = handle;
|
||||
int64_t rid = 0;
|
||||
|
||||
pthread_mutex_lock(&pContext->mutex);
|
||||
|
||||
SCqObj *pObj = pContext->pHead;
|
||||
while (pObj) {
|
||||
if (pObj->uid == uid) {
|
||||
rid = pObj->rid;
|
||||
pthread_mutex_unlock(&pContext->mutex);
|
||||
return (void *)rid;
|
||||
}
|
||||
|
||||
pObj = pObj->next;
|
||||
}
|
||||
|
||||
SCqObj *pObj = calloc(sizeof(SCqObj), 1);
|
||||
pthread_mutex_unlock(&pContext->mutex);
|
||||
|
||||
pObj = calloc(sizeof(SCqObj), 1);
|
||||
if (pObj == NULL) return NULL;
|
||||
|
||||
pObj->uid = uid;
|
||||
|
@ -386,12 +401,15 @@ static void doCreateStream(void *param, TAOS_RES *result, int32_t code) {
|
|||
if (pObj == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
SCqContext* pContext = pObj->pContext;
|
||||
SSqlObj* pSql = (SSqlObj*)result;
|
||||
if (atomic_val_compare_exchange_ptr(&(pContext->dbConn), NULL, pSql->pTscObj) != NULL) {
|
||||
taos_close(pSql->pTscObj);
|
||||
SSqlObj* pSql = (SSqlObj*)result;
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
if (atomic_val_compare_exchange_ptr(&(pContext->dbConn), NULL, pSql->pTscObj) != NULL) {
|
||||
taos_close(pSql->pTscObj);
|
||||
}
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&pContext->mutex);
|
||||
cqCreateStream(pContext, pObj);
|
||||
pthread_mutex_unlock(&pContext->mutex);
|
||||
|
@ -427,10 +445,11 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
|
|||
pObj->tmrId = taosTmrStart(cqProcessCreateTimer, 1000, (void *)pObj->rid, pContext->tmrCtrl);
|
||||
return;
|
||||
}
|
||||
|
||||
pObj->tmrId = 0;
|
||||
|
||||
if (pObj->pStream == NULL) {
|
||||
pObj->pStream = taos_open_stream(pContext->dbConn, pObj->sqlStr, cqProcessStreamRes, 0, (void *)pObj->rid, NULL);
|
||||
pObj->pStream = taos_open_stream(pContext->dbConn, pObj->sqlStr, cqProcessStreamRes, INT64_MIN, (void *)pObj->rid, NULL);
|
||||
|
||||
// TODO the pObj->pStream may be released if error happens
|
||||
if (pObj->pStream) {
|
||||
|
|
|
@ -243,8 +243,9 @@ do { \
|
|||
#define TSDB_MAX_REPLICA 5
|
||||
|
||||
#define TSDB_TBNAME_COLUMN_INDEX (-1)
|
||||
#define TSDB_BLOCK_DIST_COLUMN_INDEX (-2)
|
||||
#define TSDB_UD_COLUMN_INDEX (-100)
|
||||
#define TSDB_BLOCK_DIST_COLUMN_INDEX (-2)
|
||||
#define TSDB_UD_COLUMN_INDEX (-1000)
|
||||
#define TSDB_RES_COL_ID (-5000)
|
||||
|
||||
#define TSDB_MULTI_TABLEMETA_MAX_NUM 100000 // maximum batch size allowed to load table meta
|
||||
|
||||
|
@ -388,9 +389,10 @@ typedef enum {
|
|||
typedef enum {
|
||||
TSDB_SUPER_TABLE = 0, // super table
|
||||
TSDB_CHILD_TABLE = 1, // table created from super table
|
||||
TSDB_NORMAL_TABLE = 2, // ordinary table
|
||||
TSDB_STREAM_TABLE = 3, // table created from stream computing
|
||||
TSDB_TABLE_MAX = 4
|
||||
TSDB_NORMAL_TABLE = 2, // ordinary table
|
||||
TSDB_STREAM_TABLE = 3, // table created from stream computing
|
||||
TSDB_TEMP_TABLE = 4, // temp table created by nest query
|
||||
TSDB_TABLE_MAX = 5
|
||||
} ETableType;
|
||||
|
||||
typedef enum {
|
||||
|
|
|
@ -399,7 +399,6 @@ typedef struct SColIndex {
|
|||
char name[TSDB_COL_NAME_LEN]; // TODO remove it
|
||||
} SColIndex;
|
||||
|
||||
|
||||
typedef struct SColumnFilterInfo {
|
||||
int16_t lowerRelOptr;
|
||||
int16_t upperRelOptr;
|
||||
|
@ -421,42 +420,13 @@ typedef struct SColumnFilterInfo {
|
|||
};
|
||||
} SColumnFilterInfo;
|
||||
|
||||
/* sql function msg, to describe the message to vnode about sql function
|
||||
* operations in select clause */
|
||||
typedef struct SSqlFuncMsg {
|
||||
int16_t functionId;
|
||||
int16_t numOfParams;
|
||||
|
||||
int16_t resColId; // result column id, id of the current output column
|
||||
int16_t colType;
|
||||
int16_t colBytes;
|
||||
|
||||
SColIndex colInfo;
|
||||
struct ArgElem {
|
||||
int16_t argType;
|
||||
int16_t argBytes;
|
||||
union {
|
||||
double d;
|
||||
int64_t i64;
|
||||
char * pz;
|
||||
} argValue;
|
||||
} arg[3];
|
||||
|
||||
int32_t filterNum;
|
||||
SColumnFilterInfo filterInfo[];
|
||||
} SSqlFuncMsg;
|
||||
|
||||
|
||||
typedef struct SExprInfo {
|
||||
SColumnFilterInfo * pFilter;
|
||||
struct tExprNode* pExpr;
|
||||
int16_t bytes;
|
||||
int16_t type;
|
||||
int32_t interBytes;
|
||||
int64_t uid;
|
||||
SSqlFuncMsg base;
|
||||
} SExprInfo;
|
||||
|
||||
typedef struct SColumnFilterList {
|
||||
int16_t numOfFilters;
|
||||
union{
|
||||
int64_t placeholder;
|
||||
SColumnFilterInfo *filterInfo;
|
||||
};
|
||||
} SColumnFilterList;
|
||||
/*
|
||||
* for client side struct, we only need the column id, type, bytes are not necessary
|
||||
* But for data in vnode side, we need all the following information.
|
||||
|
@ -465,11 +435,7 @@ typedef struct SColumnInfo {
|
|||
int16_t colId;
|
||||
int16_t type;
|
||||
int16_t bytes;
|
||||
int16_t numOfFilters;
|
||||
union{
|
||||
int64_t placeholder;
|
||||
SColumnFilterInfo *filters;
|
||||
};
|
||||
SColumnFilterList flist;
|
||||
} SColumnInfo;
|
||||
|
||||
typedef struct STableIdInfo {
|
||||
|
@ -483,10 +449,29 @@ typedef struct STimeWindow {
|
|||
TSKEY ekey;
|
||||
} STimeWindow;
|
||||
|
||||
typedef struct {
|
||||
int32_t tsOffset; // offset value in current msg body, NOTE: ts list is compressed
|
||||
int32_t tsLen; // total length of ts comp block
|
||||
int32_t tsNumOfBlocks; // ts comp block numbers
|
||||
int32_t tsOrder; // ts comp block order
|
||||
} STsBufInfo;
|
||||
|
||||
typedef struct {
|
||||
SMsgHead head;
|
||||
char version[TSDB_VERSION_LEN];
|
||||
|
||||
bool stableQuery; // super table query or not
|
||||
bool topBotQuery; // TODO used bitwise flag
|
||||
bool groupbyColumn; // denote if this is a groupby normal column query
|
||||
bool hasTagResults; // if there are tag values in final result or not
|
||||
bool timeWindowInterpo;// if the time window start/end required interpolation
|
||||
bool queryBlockDist; // if query data block distribution
|
||||
bool stabledev; // super table stddev query
|
||||
bool tsCompQuery; // is tscomp query
|
||||
bool simpleAgg;
|
||||
bool pointInterpQuery; // point interpolation query
|
||||
bool needReverseScan; // need reverse scan
|
||||
|
||||
STimeWindow window;
|
||||
int32_t numOfTables;
|
||||
int16_t order;
|
||||
|
@ -509,14 +494,13 @@ typedef struct {
|
|||
int16_t fillType; // interpolate type
|
||||
uint64_t fillVal; // default value array list
|
||||
int32_t secondStageOutput;
|
||||
int32_t tsOffset; // offset value in current msg body, NOTE: ts list is compressed
|
||||
int32_t tsLen; // total length of ts comp block
|
||||
int32_t tsNumOfBlocks; // ts comp block numbers
|
||||
int32_t tsOrder; // ts comp block order
|
||||
STsBufInfo tsBuf; // tsBuf info
|
||||
int32_t numOfTags; // number of tags columns involved
|
||||
int32_t sqlstrLen; // sql query string
|
||||
int32_t prevResultLen; // previous result length
|
||||
SColumnInfo colList[];
|
||||
int32_t numOfOperator;
|
||||
int32_t tableScanOperator;// table scan operator. -1 means no scan operator
|
||||
SColumnInfo tableCols[];
|
||||
} SQueryTableMsg;
|
||||
|
||||
typedef struct {
|
||||
|
@ -827,7 +811,7 @@ typedef struct {
|
|||
uint32_t queryId;
|
||||
int64_t useconds;
|
||||
int64_t stime;
|
||||
uint64_t qHandle;
|
||||
uint64_t qId;
|
||||
} SQueryDesc;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -221,7 +221,7 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
uint32_t numOfTables;
|
||||
SArray * pGroupList;
|
||||
SArray *pGroupList;
|
||||
SHashObj *map; // speedup acquire the tableQueryInfo by table uid
|
||||
} STableGroupInfo;
|
||||
|
||||
|
|
|
@ -205,11 +205,6 @@
|
|||
#define TK_VALUES 186
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#define TK_SPACE 300
|
||||
#define TK_COMMENT 301
|
||||
#define TK_ILLEGAL 302
|
||||
|
|
|
@ -37,7 +37,7 @@ static struct argp_option options[] = {
|
|||
{"password", 'p', "PASSWORD", OPTION_ARG_OPTIONAL, "The password to use when connecting to the server."},
|
||||
{"port", 'P', "PORT", 0, "The TCP/IP port number to use for the connection."},
|
||||
{"user", 'u', "USER", 0, "The user name to use when connecting to the server."},
|
||||
{"user", 'A', "Auth", 0, "The user auth to use when connecting to the server."},
|
||||
{"auth", 'A', "Auth", 0, "The auth string to use when connecting to the server."},
|
||||
{"config-dir", 'c', "CONFIG_DIR", 0, "Configuration directory."},
|
||||
{"dump-config", 'C', 0, 0, "Dump configuration."},
|
||||
{"commands", 's', "COMMANDS", 0, "Commands to run without enter the shell."},
|
||||
|
|
|
@ -199,15 +199,19 @@ void updateBuffer(Command *cmd) {
|
|||
}
|
||||
|
||||
int isReadyGo(Command *cmd) {
|
||||
char total[MAX_COMMAND_SIZE];
|
||||
char *total = malloc(MAX_COMMAND_SIZE);
|
||||
memset(total, 0, MAX_COMMAND_SIZE);
|
||||
sprintf(total, "%s%s", cmd->buffer, cmd->command);
|
||||
|
||||
char *reg_str =
|
||||
"(^.*;\\s*$)|(^\\s*$)|(^\\s*exit\\s*$)|(^\\s*q\\s*$)|(^\\s*quit\\s*$)|(^"
|
||||
"\\s*clear\\s*$)";
|
||||
if (regex_match(total, reg_str, REG_EXTENDED | REG_ICASE)) return 1;
|
||||
if (regex_match(total, reg_str, REG_EXTENDED | REG_ICASE)) {
|
||||
free(total);
|
||||
return 1;
|
||||
}
|
||||
|
||||
free(total);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -214,8 +214,8 @@ static struct argp_option options[] = {
|
|||
// dump format options
|
||||
{"schemaonly", 's', 0, 0, "Only dump schema.", 3},
|
||||
{"with-property", 'M', 0, 0, "Dump schema with properties.", 3},
|
||||
{"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3},
|
||||
{"end-time", 'E', "END_TIME", 0, "End time to dump. Epoch or ISO8601/RFC3339 format is acceptable. For example: 2017-10-01T18:00:00+0800", 3},
|
||||
{"start-time", 'S', "START_TIME", 0, "Start time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3},
|
||||
{"end-time", 'E', "END_TIME", 0, "End time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3},
|
||||
{"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
|
||||
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
|
||||
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
|
||||
|
@ -482,7 +482,8 @@ static int queryDbImpl(TAOS *taos, char *command) {
|
|||
|
||||
static void parse_args(int argc, char *argv[], SArguments *arguments) {
|
||||
for (int i = 1; i < argc; i++) {
|
||||
if (strcmp(argv[i], "-E") == 0) {
|
||||
if ((strcmp(argv[i], "-S") == 0)
|
||||
|| (strcmp(argv[i], "-E") == 0)) {
|
||||
if (argv[i+1]) {
|
||||
char *tmp = strdup(argv[++i]);
|
||||
|
||||
|
@ -509,7 +510,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) {
|
|||
exit(-1);
|
||||
}
|
||||
} else {
|
||||
errorPrint("%s() LN%d, -E need a valid value following!\n", __func__, __LINE__);
|
||||
errorPrint("%s need a valid value following!\n", argv[i]);
|
||||
exit(-1);
|
||||
}
|
||||
} else if (strcmp(argv[i], "-g") == 0) {
|
||||
|
@ -522,7 +523,8 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
/* Parse our arguments; every option seen by parse_opt will be
|
||||
reflected in arguments. */
|
||||
parse_args(argc, argv, &g_args);
|
||||
if (argc > 1)
|
||||
parse_args(argc, argv, &g_args);
|
||||
|
||||
argp_parse(&argp, argc, argv, 0, 0, &g_args);
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ enum _TSDB_DB_STATUS {
|
|||
int32_t mnodeInitDbs();
|
||||
void mnodeCleanupDbs();
|
||||
int64_t mnodeGetDbNum();
|
||||
int32_t mnodeGetDbMaxReplica();
|
||||
SDbObj *mnodeGetDb(char *db);
|
||||
SDbObj *mnodeGetDbByTableName(char *db);
|
||||
void * mnodeGetNextDb(void *pIter, SDbObj **pDb);
|
||||
|
|
|
@ -74,6 +74,24 @@ int64_t mnodeGetDbNum() {
|
|||
return sdbGetNumOfRows(tsDbSdb);
|
||||
}
|
||||
|
||||
int32_t mnodeGetDbMaxReplica() {
|
||||
int32_t maxReplica = 0;
|
||||
SDbObj *pDb = NULL;
|
||||
void *pIter = NULL;
|
||||
|
||||
while (1) {
|
||||
pIter = mnodeGetNextDb(pIter, &pDb);
|
||||
if (pDb == NULL) break;
|
||||
|
||||
if (pDb->cfg.replications > maxReplica)
|
||||
maxReplica = pDb->cfg.replications;
|
||||
|
||||
mnodeDecDbRef(pDb);
|
||||
}
|
||||
|
||||
return maxReplica;
|
||||
}
|
||||
|
||||
static int32_t mnodeDbActionInsert(SSdbRow *pRow) {
|
||||
SDbObj *pDb = pRow->pObj;
|
||||
SAcctObj *pAcct = mnodeGetAcct(pDb->acct);
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "mnodeDef.h"
|
||||
#include "mnodeInt.h"
|
||||
#include "mnodeDnode.h"
|
||||
#include "mnodeDb.h"
|
||||
#include "mnodeMnode.h"
|
||||
#include "mnodeSdb.h"
|
||||
#include "mnodeShow.h"
|
||||
|
@ -745,6 +746,14 @@ static int32_t mnodeDropDnodeByEp(char *ep, SMnodeMsg *pMsg) {
|
|||
return TSDB_CODE_MND_NO_REMOVE_MASTER;
|
||||
}
|
||||
|
||||
int32_t maxReplica = mnodeGetDbMaxReplica();
|
||||
int32_t dnodesNum = mnodeGetDnodesNum();
|
||||
if (dnodesNum <= maxReplica) {
|
||||
mError("dnode:%d, can't drop dnode:%s, #dnodes: %d, replia: %d", pDnode->dnodeId, ep, dnodesNum, maxReplica);
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
return TSDB_CODE_MND_NO_ENOUGH_DNODES;
|
||||
}
|
||||
|
||||
mInfo("dnode:%d, start to drop it", pDnode->dnodeId);
|
||||
|
||||
int32_t code = bnDropDnode(pDnode);
|
||||
|
|
|
@ -344,7 +344,7 @@ static int32_t mnodeGetQueryMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
|
|||
|
||||
pShow->bytes[cols] = 24;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
||||
strcpy(pSchema[cols].name, "qhandle");
|
||||
strcpy(pSchema[cols].name, "qid");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
|
@ -420,7 +420,7 @@ static int32_t mnodeRetrieveQueries(SShowObj *pShow, char *data, int32_t rows, v
|
|||
cols++;
|
||||
|
||||
char handleBuf[24] = {0};
|
||||
snprintf(handleBuf, tListLen(handleBuf), "%p", (void*)htobe64(pDesc->qHandle));
|
||||
snprintf(handleBuf, tListLen(handleBuf), "%"PRIu64, htobe64(pDesc->qId));
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, handleBuf, pShow->bytes[cols]);
|
||||
|
|
|
@ -32,6 +32,10 @@ extern "C" {
|
|||
#include "osArm32.h"
|
||||
#endif
|
||||
|
||||
#ifdef _TD_MIPS_64
|
||||
#include "osMips64.h"
|
||||
#endif
|
||||
|
||||
#ifdef _TD_LINUX_64
|
||||
#include "osLinux64.h"
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TDENGINE_OS_MIPS64_H
|
||||
#define TDENGINE_OS_MIPS64_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <argp.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <assert.h>
|
||||
#include <ctype.h>
|
||||
#include <dirent.h>
|
||||
#include <endian.h>
|
||||
#include <errno.h>
|
||||
#include <float.h>
|
||||
#include <ifaddrs.h>
|
||||
#include <libgen.h>
|
||||
#include <limits.h>
|
||||
#include <locale.h>
|
||||
#include <math.h>
|
||||
#include <netdb.h>
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/ip.h>
|
||||
#include <netinet/tcp.h>
|
||||
#include <netinet/udp.h>
|
||||
#include <pthread.h>
|
||||
#include <pwd.h>
|
||||
#include <regex.h>
|
||||
#include <semaphore.h>
|
||||
#include <signal.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
#include <strings.h>
|
||||
#include <sys/epoll.h>
|
||||
#include <sys/eventfd.h>
|
||||
#include <sys/file.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/sendfile.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/statvfs.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/uio.h>
|
||||
#include <sys/un.h>
|
||||
#include <syslog.h>
|
||||
#include <termios.h>
|
||||
#include <unistd.h>
|
||||
#include <wchar.h>
|
||||
#include <wordexp.h>
|
||||
#include <wctype.h>
|
||||
#include <inttypes.h>
|
||||
#include <fcntl.h>
|
||||
#include <sys/utsname.h>
|
||||
#include <sys/resource.h>
|
||||
#include <error.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <math.h>
|
||||
#include <poll.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -217,7 +217,7 @@ void taosSetCoreDump() {}
|
|||
|
||||
int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize) {
|
||||
struct statvfs info;
|
||||
if (statvfs(tsDataDir, &info)) {
|
||||
if (statvfs(dataDir, &info)) {
|
||||
uError("failed to get disk size, dataDir:%s errno:%s", tsDataDir, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
|
|
|
@ -319,7 +319,7 @@ bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) {
|
|||
|
||||
int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize) {
|
||||
struct statvfs info;
|
||||
if (statvfs(tsDataDir, &info)) {
|
||||
if (statvfs(dataDir, &info)) {
|
||||
uError("failed to get disk size, dataDir:%s errno:%s", tsDataDir, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
|
|
|
@ -18,7 +18,7 @@ TEST(testCase, parse_time) {
|
|||
deltaToUtcInitOnce();
|
||||
|
||||
// window: 1500000001000, 1500002000000
|
||||
// pQuery->interval: interval: 86400000, sliding:3600000
|
||||
// pQueryAttr->interval: interval: 86400000, sliding:3600000
|
||||
int64_t key = 1500000001000;
|
||||
SInterval interval = {0};
|
||||
interval.interval = 86400000;
|
||||
|
|
|
@ -25,7 +25,7 @@ const char *httpContextStateStr(HttpContextState state);
|
|||
HttpContext *httpCreateContext(SOCKET fd);
|
||||
bool httpInitContext(HttpContext *pContext);
|
||||
HttpContext *httpGetContext(void * pContext);
|
||||
void httpReleaseContext(HttpContext *pContext, bool clearRes);
|
||||
void httpReleaseContext(HttpContext *pContext/*, bool clearRes*/);
|
||||
void httpCloseContextByServer(HttpContext *pContext);
|
||||
void httpCloseContextByApp(HttpContext *pContext);
|
||||
void httpNotifyContextClose(HttpContext *pContext);
|
||||
|
|
|
@ -146,20 +146,20 @@ HttpContext *httpGetContext(void *ptr) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void httpReleaseContext(HttpContext *pContext, bool clearRes) {
|
||||
void httpReleaseContext(HttpContext *pContext/*, bool clearRes*/) {
|
||||
int32_t refCount = atomic_sub_fetch_32(&pContext->refCount, 1);
|
||||
if (refCount < 0) {
|
||||
httpError("context:%p, is already released, refCount:%d", pContext, refCount);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
if (clearRes) {
|
||||
if (pContext->parser) {
|
||||
httpClearParser(pContext->parser);
|
||||
}
|
||||
memset(&pContext->singleCmd, 0, sizeof(HttpSqlCmd));
|
||||
}
|
||||
|
||||
*/
|
||||
HttpContext **ppContext = pContext->ppContext;
|
||||
httpTrace("context:%p, is released, data:%p refCount:%d", pContext, ppContext, refCount);
|
||||
|
||||
|
@ -217,7 +217,7 @@ void httpCloseContextByApp(HttpContext *pContext) {
|
|||
httpContextStateStr(pContext->state), pContext->state);
|
||||
}
|
||||
|
||||
httpReleaseContext(pContext, true);
|
||||
httpReleaseContext(pContext/*, true*/);
|
||||
}
|
||||
|
||||
void httpCloseContextByServer(HttpContext *pContext) {
|
||||
|
@ -235,5 +235,5 @@ void httpCloseContextByServer(HttpContext *pContext) {
|
|||
|
||||
pContext->parsed = false;
|
||||
httpRemoveContextFromEpoll(pContext);
|
||||
httpReleaseContext(pContext, true);
|
||||
httpReleaseContext(pContext/*, true*/);
|
||||
}
|
||||
|
|
|
@ -50,6 +50,7 @@ bool httpProcessData(HttpContext* pContext) {
|
|||
*/
|
||||
// httpCloseContextByApp(pContext);
|
||||
} else {
|
||||
httpClearParser(pContext->parser);
|
||||
httpProcessRequest(pContext);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -177,7 +177,7 @@ static void httpProcessHttpData(void *param) {
|
|||
if (!httpAlterContextState(pContext, HTTP_CONTEXT_STATE_READY, HTTP_CONTEXT_STATE_READY)) {
|
||||
httpDebug("context:%p, fd:%d, state:%s, not in ready state, ignore read events", pContext, pContext->fd,
|
||||
httpContextStateStr(pContext->state));
|
||||
httpReleaseContext(pContext, true);
|
||||
httpReleaseContext(pContext/*, true*/);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -191,7 +191,7 @@ static void httpProcessHttpData(void *param) {
|
|||
(*(pThread->processData))(pContext);
|
||||
atomic_fetch_add_32(&pServer->requestNum, 1);
|
||||
} else {
|
||||
httpReleaseContext(pContext, false);
|
||||
httpReleaseContext(pContext/*, false*/);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -275,7 +275,7 @@ static void *httpAcceptHttpConnection(void *arg) {
|
|||
httpError("context:%p, fd:%d, ip:%s, thread:%s, failed to add http fd for epoll, error:%s", pContext, connFd,
|
||||
pContext->ipstr, pThread->label, strerror(errno));
|
||||
taosCloseSocket(pContext->fd);
|
||||
httpReleaseContext(pContext, true);
|
||||
httpReleaseContext(pContext/*, true*/);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -376,6 +376,8 @@ void httpExecCmd(HttpContext *pContext) {
|
|||
httpCloseContextByApp(pContext);
|
||||
break;
|
||||
}
|
||||
|
||||
memset(&pContext->singleCmd, 0, sizeof(HttpSqlCmd));
|
||||
}
|
||||
|
||||
void httpProcessRequestCb(void *param, TAOS_RES *result, int32_t code) {
|
||||
|
|
|
@ -122,7 +122,7 @@ enum {
|
|||
#define QUERY_IS_FREE_RESOURCE(type) (((type)&TSDB_QUERY_TYPE_FREE_RESOURCE) != 0)
|
||||
|
||||
typedef struct SArithmeticSupport {
|
||||
SExprInfo *pArithExpr;
|
||||
SExprInfo *pExprInfo;
|
||||
int32_t numOfCols;
|
||||
SColumnInfo *colList;
|
||||
void *exprList; // client side used
|
||||
|
@ -210,9 +210,6 @@ typedef struct SAggFunctionInfo {
|
|||
void (*xFunction)(SQLFunctionCtx *pCtx); // blocks version function
|
||||
void (*xFunctionF)(SQLFunctionCtx *pCtx, int32_t position); // single-row function version, todo merge with blockwise function
|
||||
|
||||
// some sql function require scan data twice or more, e.g.,stddev, percentile
|
||||
void (*xNextStep)(SQLFunctionCtx *pCtx);
|
||||
|
||||
// finalizer must be called after all xFunction has been executed to generated final result.
|
||||
void (*xFinalize)(SQLFunctionCtx *pCtx);
|
||||
void (*mergeFunc)(SQLFunctionCtx *pCtx);
|
||||
|
|
|
@ -86,7 +86,7 @@ typedef struct SResultRow {
|
|||
bool closed; // this result status: closed or opened
|
||||
uint32_t numOfRows; // number of rows of current time window
|
||||
SResultRowCellInfo* pCellInfo; // For each result column, there is a resultInfo
|
||||
STimeWindow win;
|
||||
STimeWindow win;
|
||||
char* key; // start key of current result row
|
||||
} SResultRow;
|
||||
|
||||
|
@ -178,8 +178,11 @@ typedef struct SSDataBlock {
|
|||
SDataBlockInfo info;
|
||||
} SSDataBlock;
|
||||
|
||||
typedef struct SQuery {
|
||||
// The basic query information extracted from the SQueryInfo tree to support the
|
||||
// execution of query in a data node.
|
||||
typedef struct SQueryAttr {
|
||||
SLimitVal limit;
|
||||
SLimitVal slimit;
|
||||
|
||||
bool stableQuery; // super table query or not
|
||||
bool topBotQuery; // TODO used bitwise flag
|
||||
|
@ -188,6 +191,11 @@ typedef struct SQuery {
|
|||
bool timeWindowInterpo;// if the time window start/end required interpolation
|
||||
bool queryBlockDist; // if query data block distribution
|
||||
bool stabledev; // super table stddev query
|
||||
bool tsCompQuery; // is tscomp query
|
||||
bool simpleAgg;
|
||||
bool pointInterpQuery; // point interpolation query
|
||||
bool needReverseScan; // need reverse scan
|
||||
bool distinctTag; // distinct tag query
|
||||
int32_t interBufSize; // intermediate buffer sizse
|
||||
|
||||
int32_t havingNum; // having expr number
|
||||
|
@ -202,39 +210,41 @@ typedef struct SQuery {
|
|||
int16_t precision;
|
||||
int16_t numOfOutput;
|
||||
int16_t fillType;
|
||||
int16_t checkResultBuf; // check if the buffer is full during scan each block
|
||||
|
||||
int32_t srcRowSize; // todo extract struct
|
||||
int32_t resultRowSize;
|
||||
int32_t intermediateResultRowSize; // intermediate result row size, in case of top-k query.
|
||||
int32_t maxSrcColumnSize;
|
||||
int32_t maxTableColumnWidth;
|
||||
int32_t tagLen; // tag value length of current query
|
||||
SSqlGroupbyExpr* pGroupbyExpr;
|
||||
|
||||
SExprInfo* pExpr1;
|
||||
SExprInfo* pExpr2;
|
||||
int32_t numOfExpr2;
|
||||
SColumnInfo* colList;
|
||||
SExprInfo* pExpr3;
|
||||
int32_t numOfExpr3;
|
||||
|
||||
SColumnInfo* tableCols;
|
||||
SColumnInfo* tagColList;
|
||||
int32_t numOfFilterCols;
|
||||
int64_t* fillVal;
|
||||
SOrderedPrjQueryInfo prjInfo; // limit value for each vgroup, only available in global order projection query.
|
||||
SSingleColumnFilterInfo* pFilterInfo;
|
||||
|
||||
STableQueryInfo* current;
|
||||
void* tsdb;
|
||||
SMemRef memRef;
|
||||
STableGroupInfo tableGroupInfo; // table <tid, last_key> list SArray<STableKeyInfo>
|
||||
int32_t vgId;
|
||||
} SQuery;
|
||||
} SQueryAttr;
|
||||
|
||||
typedef SSDataBlock* (*__operator_fn_t)(void* param);
|
||||
typedef SSDataBlock* (*__operator_fn_t)(void* param, bool* newgroup);
|
||||
typedef void (*__optr_cleanup_fn_t)(void* param, int32_t num);
|
||||
|
||||
struct SOperatorInfo;
|
||||
|
||||
typedef struct SQueryRuntimeEnv {
|
||||
jmp_buf env;
|
||||
SQuery* pQuery;
|
||||
SQueryAttr* pQueryAttr;
|
||||
uint32_t status; // query status
|
||||
void* qinfo;
|
||||
uint8_t scanFlag; // denotes reversed scan of data or not
|
||||
|
@ -257,10 +267,10 @@ typedef struct SQueryRuntimeEnv {
|
|||
SSDataBlock *outputBuf;
|
||||
STableGroupInfo tableqinfoGroupInfo; // this is a group array list, including SArray<STableQueryInfo*> structure
|
||||
struct SOperatorInfo *proot;
|
||||
struct SOperatorInfo *pTableScanner; // table scan operator
|
||||
SGroupResInfo groupResInfo;
|
||||
int64_t currentOffset; // dynamic offset value
|
||||
|
||||
STableQueryInfo *current;
|
||||
SRspResultInfo resultInfo;
|
||||
SHashObj *pTableRetrieveTsMap;
|
||||
} SQueryRuntimeEnv;
|
||||
|
@ -281,13 +291,17 @@ enum OPERATOR_TYPE_E {
|
|||
OP_Arithmetic = 7,
|
||||
OP_Groupby = 8,
|
||||
OP_Limit = 9,
|
||||
OP_Offset = 10,
|
||||
OP_SLimit = 10,
|
||||
OP_TimeWindow = 11,
|
||||
OP_SessionWindow = 12,
|
||||
OP_Fill = 13,
|
||||
OP_MultiTableAggregate = 14,
|
||||
OP_MultiTableTimeInterval = 15,
|
||||
OP_Having = 16,
|
||||
OP_DummyInput = 16, //TODO remove it after fully refactor.
|
||||
OP_MultiwayMergeSort = 17, // multi-way data merge into one input stream.
|
||||
OP_GlobalAggregate = 18, // global merge for the multi-way data sources.
|
||||
OP_Filter = 19,
|
||||
OP_Distinct = 20,
|
||||
};
|
||||
|
||||
typedef struct SOperatorInfo {
|
||||
|
@ -310,6 +324,12 @@ enum {
|
|||
QUERY_RESULT_READY = 2,
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
int32_t numOfTags;
|
||||
int32_t numOfCols;
|
||||
SColumnInfo *colList;
|
||||
} SQueriedTableInfo;
|
||||
|
||||
typedef struct SQInfo {
|
||||
void* signature;
|
||||
uint64_t qId;
|
||||
|
@ -317,7 +337,7 @@ typedef struct SQInfo {
|
|||
int64_t owner; // if it is in execution
|
||||
|
||||
SQueryRuntimeEnv runtimeEnv;
|
||||
SQuery query;
|
||||
SQueryAttr query;
|
||||
void* pBuf; // allocated buffer for STableQueryInfo, sizeof(STableQueryInfo)*numOfTables;
|
||||
|
||||
pthread_mutex_t lock; // used to synchronize the rsp/query threads
|
||||
|
@ -335,14 +355,16 @@ typedef struct SQueryParam {
|
|||
char *tbnameCond;
|
||||
char *prevResult;
|
||||
SArray *pTableIdList;
|
||||
SSqlFuncMsg **pExprMsg;
|
||||
SSqlFuncMsg **pSecExprMsg;
|
||||
SSqlExpr **pExpr;
|
||||
SSqlExpr **pSecExpr;
|
||||
SExprInfo *pExprs;
|
||||
SExprInfo *pSecExprs;
|
||||
|
||||
SColIndex *pGroupColIndex;
|
||||
SColumnInfo *pTagColumnInfo;
|
||||
SSqlGroupbyExpr *pGroupbyExpr;
|
||||
int32_t tableScanOperator;
|
||||
SArray *pOperator;
|
||||
} SQueryParam;
|
||||
|
||||
typedef struct STableScanInfo {
|
||||
|
@ -394,26 +416,39 @@ typedef struct SArithOperatorInfo {
|
|||
SOptrBasicInfo binfo;
|
||||
int32_t bufCapacity;
|
||||
uint32_t seed;
|
||||
|
||||
SSDataBlock *existDataBlock;
|
||||
} SArithOperatorInfo;
|
||||
|
||||
typedef struct SLimitOperatorInfo {
|
||||
int64_t limit;
|
||||
int64_t total;
|
||||
int64_t limit;
|
||||
int64_t total;
|
||||
} SLimitOperatorInfo;
|
||||
|
||||
typedef struct SOffsetOperatorInfo {
|
||||
int64_t offset;
|
||||
} SOffsetOperatorInfo;
|
||||
typedef struct SSLimitOperatorInfo {
|
||||
int64_t groupTotal;
|
||||
int64_t currentGroupOffset;
|
||||
|
||||
typedef struct SHavingOperatorInfo {
|
||||
SArray* fp;
|
||||
} SHavingOperatorInfo;
|
||||
int64_t rowsTotal;
|
||||
int64_t currentOffset;
|
||||
SLimitVal limit;
|
||||
SLimitVal slimit;
|
||||
|
||||
char **prevRow;
|
||||
SArray *orderColumnList;
|
||||
} SSLimitOperatorInfo;
|
||||
|
||||
typedef struct SFilterOperatorInfo {
|
||||
SSingleColumnFilterInfo *pFilterInfo;
|
||||
int32_t numOfFilterCols;
|
||||
} SFilterOperatorInfo;
|
||||
|
||||
typedef struct SFillOperatorInfo {
|
||||
SFillInfo *pFillInfo;
|
||||
SSDataBlock *pRes;
|
||||
int64_t totalInputRows;
|
||||
|
||||
SSDataBlock *existNewGroupBlock;
|
||||
} SFillOperatorInfo;
|
||||
|
||||
typedef struct SGroupbyOperatorInfo {
|
||||
|
@ -430,25 +465,95 @@ typedef struct SSWindowOperatorInfo {
|
|||
int32_t start; // start row index
|
||||
} SSWindowOperatorInfo;
|
||||
|
||||
typedef struct SDistinctOperatorInfo {
|
||||
SHashObj *pSet;
|
||||
SSDataBlock *pRes;
|
||||
bool recordNullVal; //has already record the null value, no need to try again
|
||||
int64_t threshold;
|
||||
int64_t outputCapacity;
|
||||
} SDistinctOperatorInfo;
|
||||
|
||||
struct SLocalMerger;
|
||||
|
||||
typedef struct SMultiwayMergeInfo {
|
||||
struct SLocalMerger *pMerge;
|
||||
SOptrBasicInfo binfo;
|
||||
int32_t bufCapacity;
|
||||
int64_t seed;
|
||||
char **prevRow;
|
||||
SArray *orderColumnList;
|
||||
int32_t resultRowFactor;
|
||||
|
||||
bool hasGroupColData;
|
||||
char **currentGroupColData;
|
||||
SArray *groupColumnList;
|
||||
bool hasDataBlockForNewGroup;
|
||||
SSDataBlock *pExistBlock;
|
||||
|
||||
bool hasPrev;
|
||||
bool groupMix;
|
||||
} SMultiwayMergeInfo;
|
||||
|
||||
SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv, int32_t repeatTime, int32_t reverseTime);
|
||||
SOperatorInfo* createTableScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv, int32_t repeatTime);
|
||||
SOperatorInfo* createTableSeqScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv);
|
||||
|
||||
SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream);
|
||||
SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv);
|
||||
SOperatorInfo* createMultiwaySortOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput,
|
||||
int32_t numOfRows, void* merger, bool groupMix);
|
||||
SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* param);
|
||||
SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* merger);
|
||||
SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
|
||||
|
||||
SSDataBlock* doGlobalAggregate(void* param, bool* newgroup);
|
||||
SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup);
|
||||
SSDataBlock* doSLimit(void* param, bool* newgroup);
|
||||
|
||||
SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numOfRows);
|
||||
void* destroyOutputBuf(SSDataBlock* pBlock);
|
||||
|
||||
void setInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order);
|
||||
int32_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput);
|
||||
void finalizeQueryResult(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SResultRowInfo* pResultRowInfo, int32_t* rowCellInfoOffset);
|
||||
void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOfInputRows);
|
||||
|
||||
void freeParam(SQueryParam *param);
|
||||
int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param);
|
||||
int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo, SSqlFuncMsg **pExprMsg,
|
||||
SColumnInfo* pTagCols);
|
||||
int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExprInfo** pExprInfo,
|
||||
SSqlExpr** pExprMsg, SColumnInfo* pTagCols, int32_t queryType, void* pMsg);
|
||||
|
||||
int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo,
|
||||
SSqlFuncMsg **pExprMsg, SExprInfo *prevExpr);
|
||||
SSqlExpr **pExpr, SExprInfo *prevExpr);
|
||||
|
||||
SSqlGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pColIndex, int32_t *code);
|
||||
SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs,
|
||||
SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, bool stableQuery, char* sql, uint64_t *qId);
|
||||
int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *pQInfo, SQueryParam* param, bool isSTable);
|
||||
SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, int32_t vgId, char* sql, uint64_t *qId);
|
||||
|
||||
int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo* pQInfo, SQueryParam* param, char* start,
|
||||
int32_t prevResultLen, void* merger);
|
||||
|
||||
void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters);
|
||||
|
||||
STableQueryInfo *createTableQueryInfo(SQueryAttr* pQueryAttr, void* pTable, bool groupbyColumn, STimeWindow win, void* buf);
|
||||
int32_t buildArithmeticExprFromMsg(SExprInfo *pArithExprInfo, void *pQueryMsg);
|
||||
|
||||
bool isQueryKilled(SQInfo *pQInfo);
|
||||
int32_t checkForQueryBuf(size_t numOfTables);
|
||||
bool doBuildResCheck(SQInfo* pQInfo);
|
||||
void setQueryStatus(SQueryRuntimeEnv *pRuntimeEnv, int8_t status);
|
||||
|
||||
bool onlyQueryTags(SQuery* pQuery);
|
||||
bool onlyQueryTags(SQueryAttr* pQueryAttr);
|
||||
bool isValidQInfo(void *param);
|
||||
|
||||
int32_t doDumpQueryResult(SQInfo *pQInfo, char *data);
|
||||
|
@ -457,6 +562,7 @@ size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows);
|
|||
void setQueryKilled(SQInfo *pQInfo);
|
||||
void queryCostStatis(SQInfo *pQInfo);
|
||||
void freeQInfo(SQInfo *pQInfo);
|
||||
void freeQueryAttr(SQueryAttr *pQuery);
|
||||
|
||||
int32_t getMaximumIdleDurationSec();
|
||||
|
||||
|
|
|
@ -237,6 +237,11 @@ int32_t compare_a(tOrderDescriptor *, int32_t numOfRow1, int32_t s1, char *data1
|
|||
int32_t compare_d(tOrderDescriptor *, int32_t numOfRow1, int32_t s1, char *data1, int32_t numOfRow2, int32_t s2,
|
||||
char *data2);
|
||||
|
||||
struct SSDataBlock;
|
||||
int32_t compare_aRv(struct SSDataBlock* pBlock, SArray* colIndex, int32_t numOfCols, int32_t rowIndex, char** buffer, int32_t order);
|
||||
|
||||
int32_t columnValueAscendingComparator(char *f1, char *f2, int32_t type, int32_t bytes);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TDENGINE_QPLAN_H
|
||||
#define TDENGINE_QPLAN_H
|
||||
|
||||
//TODO refactor
|
||||
SArray* createTableScanPlan(SQueryAttr* pQueryAttr);
|
||||
SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr);
|
||||
SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr);
|
||||
|
||||
#endif // TDENGINE_QPLAN_H
|
|
@ -40,8 +40,14 @@ enum SQL_NODE_TYPE {
|
|||
};
|
||||
|
||||
enum SQL_NODE_FROM_TYPE {
|
||||
SQL_NODE_FROM_SUBQUERY = 1,
|
||||
SQL_NODE_FROM_NAMELIST = 2,
|
||||
SQL_NODE_FROM_SUBQUERY = 1,
|
||||
SQL_NODE_FROM_TABLELIST = 2,
|
||||
};
|
||||
|
||||
enum SQL_EXPR_FLAG {
|
||||
EXPR_FLAG_TS_ERROR = 1,
|
||||
EXPR_FLAG_US_TIMESTAMP = 2,
|
||||
EXPR_FLAG_TIMESTAMP_VAR = 3,
|
||||
};
|
||||
|
||||
extern char tTokenTypeSwitcher[13];
|
||||
|
@ -83,11 +89,11 @@ typedef struct SSessionWindowVal {
|
|||
SStrToken gap;
|
||||
} SSessionWindowVal;
|
||||
|
||||
struct SFromInfo;
|
||||
struct SRelationInfo;
|
||||
|
||||
typedef struct SQuerySqlNode {
|
||||
struct SArray *pSelectList; // select clause
|
||||
struct SFromInfo *from; // from clause SArray<SQuerySqlNode>
|
||||
typedef struct SSqlNode {
|
||||
struct SArray *pSelNodeList; // select clause
|
||||
struct SRelationInfo *from; // from clause SArray<SSqlNode>
|
||||
struct tSqlExpr *pWhere; // where clause [optional]
|
||||
SArray *pGroupby; // groupby clause, only for tags[optional], SArray<tVariantListItem>
|
||||
SArray *pSortOrder; // orderby [optional], SArray<tVariantListItem>
|
||||
|
@ -99,25 +105,17 @@ typedef struct SQuerySqlNode {
|
|||
SLimitVal slimit; // group limit offset [optional]
|
||||
SStrToken sqlstr; // sql string in select clause
|
||||
struct tSqlExpr *pHaving; // having clause [optional]
|
||||
} SQuerySqlNode;
|
||||
} SSqlNode;
|
||||
|
||||
typedef struct STableNamePair {
|
||||
SStrToken name;
|
||||
SStrToken aliasName;
|
||||
} STableNamePair;
|
||||
|
||||
typedef struct SSubclauseInfo { // "UNION" multiple select sub-clause
|
||||
SQuerySqlNode **pClause;
|
||||
int32_t numOfClause;
|
||||
} SSubclauseInfo;
|
||||
|
||||
typedef struct SFromInfo {
|
||||
int32_t type; // nested query|table name list
|
||||
union {
|
||||
SSubclauseInfo *pNode;
|
||||
SArray *tableList; // SArray<STableNamePair>
|
||||
};
|
||||
} SFromInfo;
|
||||
typedef struct SRelationInfo {
|
||||
int32_t type; // nested query|table name list
|
||||
SArray *list; // SArray<STableNamePair>|SArray<SSqlNode*>
|
||||
} SRelationInfo;
|
||||
|
||||
typedef struct SCreatedTableInfo {
|
||||
SStrToken name; // table name token
|
||||
|
@ -140,7 +138,7 @@ typedef struct SCreateTableSql {
|
|||
} colInfo;
|
||||
|
||||
SArray *childTableInfo; // SArray<SCreatedTableInfo>
|
||||
SQuerySqlNode *pSelect;
|
||||
SSqlNode *pSelect;
|
||||
} SCreateTableSql;
|
||||
|
||||
typedef struct SAlterTableInfo {
|
||||
|
@ -217,7 +215,7 @@ typedef struct SMiscInfo {
|
|||
typedef struct SSqlInfo {
|
||||
int32_t type;
|
||||
bool valid;
|
||||
SSubclauseInfo subclauseInfo;
|
||||
SArray *list; // todo refactor
|
||||
char msg[256];
|
||||
union {
|
||||
SCreateTableSql *pCreateTableInfo;
|
||||
|
@ -237,7 +235,8 @@ typedef struct tSqlExpr {
|
|||
SStrToken colInfo; // table column info
|
||||
tVariant value; // the use input value
|
||||
SStrToken token; // original sql expr string
|
||||
|
||||
uint32_t flags;
|
||||
|
||||
struct tSqlExpr *pLeft; // left child
|
||||
struct tSqlExpr *pRight; // right child
|
||||
struct SArray *pParam; // function parameters list
|
||||
|
@ -254,14 +253,9 @@ SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder);
|
|||
SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t index);
|
||||
SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t sortOrder);
|
||||
|
||||
tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType);
|
||||
|
||||
int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right);
|
||||
|
||||
tSqlExpr *tSqlExprClone(tSqlExpr *pSrc);
|
||||
SFromInfo *setTableNameList(SFromInfo* pFromInfo, SStrToken *pName, SStrToken* pAlias);
|
||||
SFromInfo *setSubquery(SFromInfo* pFromInfo, SQuerySqlNode *pSqlNode);
|
||||
void *destroyFromInfo(SFromInfo* pFromInfo);
|
||||
SRelationInfo *setTableNameList(SRelationInfo* pFromInfo, SStrToken *pName, SStrToken* pAlias);
|
||||
SRelationInfo *setSubquery(SRelationInfo* pFromInfo, SArray* pSqlNode);
|
||||
void *destroyRelationInfo(SRelationInfo* pFromInfo);
|
||||
|
||||
// sql expr leaf node
|
||||
tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType);
|
||||
|
@ -276,23 +270,23 @@ void tSqlExprDestroy(tSqlExpr *pExpr);
|
|||
SArray *tSqlExprListAppend(SArray *pList, tSqlExpr *pNode, SStrToken *pDistinct, SStrToken *pToken);
|
||||
void tSqlExprListDestroy(SArray *pList);
|
||||
|
||||
SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SFromInfo *pFrom, tSqlExpr *pWhere,
|
||||
SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelationInfo *pFrom, tSqlExpr *pWhere,
|
||||
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *ps,
|
||||
SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit, tSqlExpr *pHaving);
|
||||
int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right);
|
||||
|
||||
SCreateTableSql *tSetCreateTableInfo(SArray *pCols, SArray *pTags, SQuerySqlNode *pSelect, int32_t type);
|
||||
SCreateTableSql *tSetCreateTableInfo(SArray *pCols, SArray *pTags, SSqlNode *pSelect, int32_t type);
|
||||
|
||||
SAlterTableInfo *tSetAlterTableInfo(SStrToken *pTableName, SArray *pCols, SArray *pVals, int32_t type, int16_t tableTable);
|
||||
SCreatedTableInfo createNewChildTableInfo(SStrToken *pTableName, SArray *pTagNames, SArray *pTagVals, SStrToken *pToken, SStrToken* igExists);
|
||||
|
||||
void destroyAllSelectClause(SSubclauseInfo *pSql);
|
||||
void destroyQuerySqlNode(SQuerySqlNode *pSql);
|
||||
void destroyAllSqlNode(SArray *pSqlNode);
|
||||
void destroySqlNode(SSqlNode *pSql);
|
||||
void freeCreateTableInfo(void* p);
|
||||
|
||||
SSqlInfo *setSqlInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SStrToken *pTableName, int32_t type);
|
||||
SSubclauseInfo *setSubclause(SSubclauseInfo *pClause, void *pSqlExprInfo);
|
||||
|
||||
SSubclauseInfo *appendSelectClause(SSubclauseInfo *pInfo, void *pSubclause);
|
||||
SSqlInfo *setSqlInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SStrToken *pTableName, int32_t type);
|
||||
SArray *setSubclause(SArray *pList, void *pSqlNode);
|
||||
SArray *appendSelectClause(SArray *pList, void *pSubclause);
|
||||
|
||||
void setCreatedTableName(SSqlInfo *pInfo, SStrToken *pTableNameToken, SStrToken *pIfNotExists);
|
||||
|
||||
|
|
|
@ -28,9 +28,9 @@
|
|||
#define GET_QID(_r) (((SQInfo*)((_r)->qinfo))->qId)
|
||||
|
||||
#define curTimeWindowIndex(_winres) ((_winres)->curIndex)
|
||||
#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!(sq)))? (_q)->pExpr1[1].base.arg->argValue.i64:1)
|
||||
#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!(sq)))? (_q)->pExpr1[1].base.param[0].i64:1)
|
||||
|
||||
int32_t getOutputInterResultBufSize(SQuery* pQuery);
|
||||
int32_t getOutputInterResultBufSize(SQueryAttr* pQueryAttr);
|
||||
|
||||
size_t getResultRowSize(SQueryRuntimeEnv* pRuntimeEnv);
|
||||
int32_t initResultRowInfo(SResultRowInfo* pResultRowInfo, int32_t size, int16_t type);
|
||||
|
@ -52,20 +52,11 @@ static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pResultRowInfo, int
|
|||
return pResultRowInfo->pResult[slot];
|
||||
}
|
||||
|
||||
static FORCE_INLINE char* getPosInResultPage(SQueryRuntimeEnv* pRuntimeEnv, tFilePage* page, int32_t rowOffset,
|
||||
int16_t offset, int32_t size) {
|
||||
assert(rowOffset >= 0 && pRuntimeEnv != NULL);
|
||||
static FORCE_INLINE char *getPosInResultPage(SQueryAttr *pQueryAttr, tFilePage* page, int32_t rowOffset, int16_t offset) {
|
||||
assert(rowOffset >= 0 && pQueryAttr != NULL);
|
||||
|
||||
SQuery* pQuery = pRuntimeEnv->pQuery;
|
||||
int64_t pageSize = pRuntimeEnv->pResultBuf->pageSize;
|
||||
|
||||
int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pQuery->topBotQuery, pQuery->stableQuery);
|
||||
|
||||
// buffer overflow check
|
||||
int64_t bufEnd = (rowOffset + offset * numOfRows + size);
|
||||
assert(page->num <= pageSize && bufEnd <= page->num);
|
||||
|
||||
return ((char*)page->data) + rowOffset + offset * numOfRows;
|
||||
int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery);
|
||||
return ((char *)page->data) + rowOffset + offset * numOfRows;
|
||||
}
|
||||
|
||||
bool isNullOperator(SColumnFilterElem *pFilter, const char* minval, const char* maxval, int16_t type);
|
||||
|
|
|
@ -450,16 +450,16 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). {
|
|||
}
|
||||
|
||||
//////////////////////// The SELECT statement /////////////////////////////////
|
||||
%type select {SQuerySqlNode*}
|
||||
%destructor select {destroyQuerySqlNode($$);}
|
||||
%type select {SSqlNode*}
|
||||
%destructor select {destroySqlNode($$);}
|
||||
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). {
|
||||
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &S, F, &L, &G, N);
|
||||
}
|
||||
|
||||
select(A) ::= LP select(B) RP. {A = B;}
|
||||
|
||||
%type union {SSubclauseInfo*}
|
||||
%destructor union {destroyAllSelectClause($$);}
|
||||
%type union {SArray*}
|
||||
%destructor union {destroyAllSqlNode($$);}
|
||||
union(Y) ::= select(X). { Y = setSubclause(NULL, X); }
|
||||
union(Y) ::= union(Z) UNION ALL select(X). { Y = appendSelectClause(Z, X); }
|
||||
|
||||
|
@ -505,35 +505,30 @@ distinct(X) ::= DISTINCT(Y). { X = Y; }
|
|||
distinct(X) ::= . { X.n = 0;}
|
||||
|
||||
// A complete FROM clause.
|
||||
%type from {SFromInfo*}
|
||||
%type from {SRelationInfo*}
|
||||
%destructor from {destroyRelationInfo($$);}
|
||||
from(A) ::= FROM tablelist(X). {A = X;}
|
||||
from(A) ::= FROM LP union(Y) RP. {A = Y;}
|
||||
from(A) ::= FROM LP union(Y) RP. {A = setSubquery(NULL, Y);}
|
||||
|
||||
%type tablelist {SArray*}
|
||||
%type tablelist {SRelationInfo*}
|
||||
%destructor tablelist {destroyRelationInfo($$);}
|
||||
tablelist(A) ::= ids(X) cpxName(Y). {
|
||||
toTSDBType(X.type);
|
||||
X.n += Y.n;
|
||||
A = setTableNameList(NULL, &X, NULL);
|
||||
}
|
||||
|
||||
tablelist(A) ::= ids(X) cpxName(Y) ids(Z). {
|
||||
toTSDBType(X.type);
|
||||
toTSDBType(Z.type);
|
||||
X.n += Y.n;
|
||||
A = setTableNameList(NULL, &X, &Z);
|
||||
}
|
||||
|
||||
tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z). {
|
||||
toTSDBType(X.type);
|
||||
X.n += Z.n;
|
||||
A = setTableNameList(Y, &X, NULL);
|
||||
}
|
||||
|
||||
tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z) ids(F). {
|
||||
toTSDBType(X.type);
|
||||
toTSDBType(F.type);
|
||||
X.n += Z.n;
|
||||
|
||||
A = setTableNameList(Y, &X, &F);
|
||||
}
|
||||
|
||||
|
@ -674,6 +669,8 @@ expr(A) ::= PLUS(X) FLOAT(Y). { X.n += Y.n; X.type = TK_FLOAT; A = tSqlExprCr
|
|||
expr(A) ::= STRING(X). { A = tSqlExprCreateIdValue(&X, TK_STRING);}
|
||||
expr(A) ::= NOW(X). { A = tSqlExprCreateIdValue(&X, TK_NOW); }
|
||||
expr(A) ::= VARIABLE(X). { A = tSqlExprCreateIdValue(&X, TK_VARIABLE);}
|
||||
expr(A) ::= PLUS(X) VARIABLE(Y). { X.n += Y.n; X.type = TK_VARIABLE; A = tSqlExprCreateIdValue(&X, TK_VARIABLE);}
|
||||
expr(A) ::= MINUS(X) VARIABLE(Y). { X.n += Y.n; X.type = TK_VARIABLE; A = tSqlExprCreateIdValue(&X, TK_VARIABLE);}
|
||||
expr(A) ::= BOOL(X). { A = tSqlExprCreateIdValue(&X, TK_BOOL);}
|
||||
expr(A) ::= NULL(X). { A = tSqlExprCreateIdValue(&X, TK_NULL);}
|
||||
|
||||
|
|
|
@ -375,12 +375,6 @@ int32_t isValidFunction(const char* name, int32_t len) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
// set the query flag to denote that query is completed
|
||||
static void no_next_step(SQLFunctionCtx *pCtx) {
|
||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
pResInfo->complete = true;
|
||||
}
|
||||
|
||||
static bool function_setup(SQLFunctionCtx *pCtx) {
|
||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
if (pResInfo->initialized) {
|
||||
|
@ -1540,7 +1534,7 @@ static void stddev_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
|||
}
|
||||
}
|
||||
|
||||
static void stddev_next_step(SQLFunctionCtx *pCtx) {
|
||||
static UNUSED_FUNC void stddev_next_step(SQLFunctionCtx *pCtx) {
|
||||
/*
|
||||
* the stddevInfo and the average info struct share the same buffer area
|
||||
* And the position of each element in their struct is exactly the same matched
|
||||
|
@ -2482,6 +2476,29 @@ static STopBotInfo *getTopBotOutputInfo(SQLFunctionCtx *pCtx) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* keep the intermediate results during scan data blocks in the format of:
|
||||
* +-----------------------------------+-------------one value pair-----------+------------next value pair-----------+
|
||||
* |-------------pointer area----------|----ts---+-----+-----n tags-----------|----ts---+-----+-----n tags-----------|
|
||||
* +..[Value Pointer1][Value Pointer2].|timestamp|value|tags1|tags2|....|tagsn|timestamp|value|tags1|tags2|....|tagsn+
|
||||
*/
|
||||
static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) {
|
||||
char *tmp = (char *)pTopBotInfo + sizeof(STopBotInfo);
|
||||
pTopBotInfo->res = (tValuePair**) tmp;
|
||||
tmp += POINTER_BYTES * pCtx->param[0].i64;
|
||||
|
||||
size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen;
|
||||
// assert(pCtx->param[0].i64 > 0);
|
||||
|
||||
for (int32_t i = 0; i < pCtx->param[0].i64; ++i) {
|
||||
pTopBotInfo->res[i] = (tValuePair*) tmp;
|
||||
pTopBotInfo->res[i]->pTags = tmp + sizeof(tValuePair);
|
||||
tmp += size;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const char *maxval) {
|
||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
if (pResInfo == NULL) {
|
||||
|
@ -2495,6 +2512,10 @@ bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const cha
|
|||
return true;
|
||||
}
|
||||
|
||||
if ((void *)pTopBotInfo->res[0] != (void *)((char *)pTopBotInfo + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) {
|
||||
buildTopBotStruct(pTopBotInfo, pCtx);
|
||||
}
|
||||
|
||||
tValuePair **pRes = (tValuePair**) pTopBotInfo->res;
|
||||
|
||||
if (pCtx->functionId == TSDB_FUNC_TOP) {
|
||||
|
@ -2534,27 +2555,6 @@ bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const cha
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* keep the intermediate results during scan data blocks in the format of:
|
||||
* +-----------------------------------+-------------one value pair-----------+------------next value pair-----------+
|
||||
* |-------------pointer area----------|----ts---+-----+-----n tags-----------|----ts---+-----+-----n tags-----------|
|
||||
* +..[Value Pointer1][Value Pointer2].|timestamp|value|tags1|tags2|....|tagsn|timestamp|value|tags1|tags2|....|tagsn+
|
||||
*/
|
||||
static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) {
|
||||
char *tmp = (char *)pTopBotInfo + sizeof(STopBotInfo);
|
||||
pTopBotInfo->res = (tValuePair**) tmp;
|
||||
tmp += POINTER_BYTES * pCtx->param[0].i64;
|
||||
|
||||
size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen;
|
||||
// assert(pCtx->param[0].i64 > 0);
|
||||
|
||||
for (int32_t i = 0; i < pCtx->param[0].i64; ++i) {
|
||||
pTopBotInfo->res[i] = (tValuePair*) tmp;
|
||||
pTopBotInfo->res[i]->pTags = tmp + sizeof(tValuePair);
|
||||
tmp += size;
|
||||
}
|
||||
}
|
||||
|
||||
static bool top_bottom_function_setup(SQLFunctionCtx *pCtx) {
|
||||
if (!function_setup(pCtx)) {
|
||||
return false;
|
||||
|
@ -2609,6 +2609,10 @@ static void top_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
|||
|
||||
STopBotInfo *pRes = getTopBotOutputInfo(pCtx);
|
||||
assert(pRes->num >= 0);
|
||||
|
||||
if ((void *)pRes->res[0] != (void *)((char *)pRes + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) {
|
||||
buildTopBotStruct(pRes, pCtx);
|
||||
}
|
||||
|
||||
SET_VAL(pCtx, 1, 1);
|
||||
TSKEY ts = GET_TS_DATA(pCtx, index);
|
||||
|
@ -2911,7 +2915,7 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) {
|
|||
doFinalizer(pCtx);
|
||||
}
|
||||
|
||||
static void percentile_next_step(SQLFunctionCtx *pCtx) {
|
||||
static UNUSED_FUNC void percentile_next_step(SQLFunctionCtx *pCtx) {
|
||||
SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx);
|
||||
SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
||||
|
@ -3016,7 +3020,7 @@ static void apercentile_func_merge(SQLFunctionCtx *pCtx) {
|
|||
|
||||
pInput->pHisto = (SHistogramInfo*) ((char *)pInput + sizeof(SAPercentileInfo));
|
||||
pInput->pHisto->elems = (SHistBin*) ((char *)pInput->pHisto + sizeof(SHistogramInfo));
|
||||
|
||||
|
||||
if (pInput->pHisto->numOfElems <= 0) {
|
||||
return;
|
||||
}
|
||||
|
@ -3035,7 +3039,7 @@ static void apercentile_func_merge(SQLFunctionCtx *pCtx) {
|
|||
pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo));
|
||||
tHistogramDestroy(&pRes);
|
||||
}
|
||||
|
||||
|
||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
pResInfo->hasResult = DATA_SET_FLAG;
|
||||
SET_VAL(pCtx, 1, 1);
|
||||
|
@ -3046,7 +3050,7 @@ static void apercentile_finalizer(SQLFunctionCtx *pCtx) {
|
|||
|
||||
SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx);
|
||||
SAPercentileInfo *pOutput = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
||||
|
||||
if (pCtx->currentStage == MERGE_STAGE) {
|
||||
if (pResInfo->hasResult == DATA_SET_FLAG) { // check for null
|
||||
assert(pOutput->pHisto->numOfElems > 0);
|
||||
|
@ -3322,8 +3326,6 @@ static void col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
|||
INC_INIT_VAL(pCtx, 1);
|
||||
char *pData = GET_INPUT_DATA(pCtx, index);
|
||||
memcpy(pCtx->pOutput, pData, pCtx->inputBytes);
|
||||
|
||||
pCtx->pOutput += pCtx->inputBytes;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3361,9 +3363,15 @@ static void tag_project_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
|||
* @param pCtx
|
||||
* @return
|
||||
*/
|
||||
static void copy_function(SQLFunctionCtx *pCtx);
|
||||
|
||||
static void tag_function(SQLFunctionCtx *pCtx) {
|
||||
SET_VAL(pCtx, 1, 1);
|
||||
tVariantDump(&pCtx->tag, pCtx->pOutput, pCtx->outputType, true);
|
||||
if (pCtx->currentStage == MERGE_STAGE) {
|
||||
copy_function(pCtx);
|
||||
} else {
|
||||
tVariantDump(&pCtx->tag, pCtx->pOutput, pCtx->outputType, true);
|
||||
}
|
||||
}
|
||||
|
||||
static void tag_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
||||
|
@ -3700,7 +3708,7 @@ static void arithmetic_function(SQLFunctionCtx *pCtx) {
|
|||
GET_RES_INFO(pCtx)->numOfRes += pCtx->size;
|
||||
SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz;
|
||||
|
||||
arithmeticTreeTraverse(sas->pArithExpr->pExpr, pCtx->size, pCtx->pOutput, sas, pCtx->order, getArithColumnData);
|
||||
arithmeticTreeTraverse(sas->pExprInfo->pExpr, pCtx->size, pCtx->pOutput, sas, pCtx->order, getArithColumnData);
|
||||
}
|
||||
|
||||
static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
||||
|
@ -3708,7 +3716,7 @@ static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) {
|
|||
SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz;
|
||||
|
||||
sas->offset = index;
|
||||
arithmeticTreeTraverse(sas->pArithExpr->pExpr, 1, pCtx->pOutput, sas, pCtx->order, getArithColumnData);
|
||||
arithmeticTreeTraverse(sas->pExprInfo->pExpr, 1, pCtx->pOutput, sas, pCtx->order, getArithColumnData);
|
||||
|
||||
pCtx->pOutput += pCtx->outputBytes;
|
||||
}
|
||||
|
@ -4897,7 +4905,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
function_setup,
|
||||
count_function,
|
||||
count_function_f,
|
||||
no_next_step,
|
||||
doFinalizer,
|
||||
count_func_merge,
|
||||
countRequired,
|
||||
|
@ -4911,7 +4918,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
function_setup,
|
||||
sum_function,
|
||||
sum_function_f,
|
||||
no_next_step,
|
||||
function_finalizer,
|
||||
sum_func_merge,
|
||||
statisRequired,
|
||||
|
@ -4925,7 +4931,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
function_setup,
|
||||
avg_function,
|
||||
avg_function_f,
|
||||
no_next_step,
|
||||
avg_finalizer,
|
||||
avg_func_merge,
|
||||
statisRequired,
|
||||
|
@ -4939,7 +4944,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
min_func_setup,
|
||||
min_function,
|
||||
min_function_f,
|
||||
no_next_step,
|
||||
function_finalizer,
|
||||
min_func_merge,
|
||||
statisRequired,
|
||||
|
@ -4953,7 +4957,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
max_func_setup,
|
||||
max_function,
|
||||
max_function_f,
|
||||
no_next_step,
|
||||
function_finalizer,
|
||||
max_func_merge,
|
||||
statisRequired,
|
||||
|
@ -4967,7 +4970,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
function_setup,
|
||||
stddev_function,
|
||||
stddev_function_f,
|
||||
stddev_next_step,
|
||||
stddev_finalizer,
|
||||
noop1,
|
||||
dataBlockRequired,
|
||||
|
@ -4981,7 +4983,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
percentile_function_setup,
|
||||
percentile_function,
|
||||
percentile_function_f,
|
||||
percentile_next_step,
|
||||
percentile_finalizer,
|
||||
noop1,
|
||||
dataBlockRequired,
|
||||
|
@ -4995,7 +4996,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
apercentile_function_setup,
|
||||
apercentile_function,
|
||||
apercentile_function_f,
|
||||
no_next_step,
|
||||
apercentile_finalizer,
|
||||
apercentile_func_merge,
|
||||
dataBlockRequired,
|
||||
|
@ -5009,7 +5009,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
function_setup,
|
||||
first_function,
|
||||
first_function_f,
|
||||
no_next_step,
|
||||
function_finalizer,
|
||||
noop1,
|
||||
firstFuncRequired,
|
||||
|
@ -5023,7 +5022,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
function_setup,
|
||||
last_function,
|
||||
last_function_f,
|
||||
no_next_step,
|
||||
function_finalizer,
|
||||
noop1,
|
||||
lastFuncRequired,
|
||||
|
@ -5038,7 +5036,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
first_last_function_setup,
|
||||
last_row_function,
|
||||
noop2,
|
||||
no_next_step,
|
||||
last_row_finalizer,
|
||||
last_dist_func_merge,
|
||||
dataBlockRequired,
|
||||
|
@ -5053,7 +5050,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
top_bottom_function_setup,
|
||||
top_function,
|
||||
top_function_f,
|
||||
no_next_step,
|
||||
top_bottom_func_finalizer,
|
||||
top_func_merge,
|
||||
dataBlockRequired,
|
||||
|
@ -5068,7 +5064,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
top_bottom_function_setup,
|
||||
bottom_function,
|
||||
bottom_function_f,
|
||||
no_next_step,
|
||||
top_bottom_func_finalizer,
|
||||
bottom_func_merge,
|
||||
dataBlockRequired,
|
||||
|
@ -5082,7 +5077,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
spread_function_setup,
|
||||
spread_function,
|
||||
spread_function_f,
|
||||
no_next_step,
|
||||
spread_function_finalizer,
|
||||
spread_func_merge,
|
||||
countRequired,
|
||||
|
@ -5096,7 +5090,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
twa_function_setup,
|
||||
twa_function,
|
||||
twa_function_f,
|
||||
no_next_step,
|
||||
twa_function_finalizer,
|
||||
twa_function_copy,
|
||||
dataBlockRequired,
|
||||
|
@ -5110,7 +5103,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
leastsquares_function_setup,
|
||||
leastsquares_function,
|
||||
leastsquares_function_f,
|
||||
no_next_step,
|
||||
leastsquares_finalizer,
|
||||
noop1,
|
||||
dataBlockRequired,
|
||||
|
@ -5124,35 +5116,32 @@ SAggFunctionInfo aAggs[] = {{
|
|||
function_setup,
|
||||
date_col_output_function,
|
||||
date_col_output_function_f,
|
||||
no_next_step,
|
||||
doFinalizer,
|
||||
copy_function,
|
||||
noDataRequired,
|
||||
},
|
||||
{
|
||||
// 17
|
||||
"ts",
|
||||
"ts_dummy",
|
||||
TSDB_FUNC_TS_DUMMY,
|
||||
TSDB_FUNC_TS_DUMMY,
|
||||
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
|
||||
function_setup,
|
||||
noop1,
|
||||
noop2,
|
||||
no_next_step,
|
||||
doFinalizer,
|
||||
copy_function,
|
||||
dataBlockRequired,
|
||||
},
|
||||
{
|
||||
// 18
|
||||
"tag",
|
||||
"tag_dummy",
|
||||
TSDB_FUNC_TAG_DUMMY,
|
||||
TSDB_FUNC_TAG_DUMMY,
|
||||
TSDB_BASE_FUNC_SO,
|
||||
function_setup,
|
||||
tag_function,
|
||||
noop2,
|
||||
no_next_step,
|
||||
doFinalizer,
|
||||
copy_function,
|
||||
noDataRequired,
|
||||
|
@ -5166,7 +5155,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
ts_comp_function_setup,
|
||||
ts_comp_function,
|
||||
ts_comp_function_f,
|
||||
no_next_step,
|
||||
ts_comp_finalize,
|
||||
copy_function,
|
||||
dataBlockRequired,
|
||||
|
@ -5180,7 +5168,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
function_setup,
|
||||
tag_function,
|
||||
tag_function_f,
|
||||
no_next_step,
|
||||
doFinalizer,
|
||||
copy_function,
|
||||
noDataRequired,
|
||||
|
@ -5194,7 +5181,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
function_setup,
|
||||
col_project_function,
|
||||
col_project_function_f,
|
||||
no_next_step,
|
||||
doFinalizer,
|
||||
copy_function,
|
||||
dataBlockRequired,
|
||||
|
@ -5208,7 +5194,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
function_setup,
|
||||
tag_project_function,
|
||||
tag_project_function_f,
|
||||
no_next_step,
|
||||
doFinalizer,
|
||||
copy_function,
|
||||
noDataRequired,
|
||||
|
@ -5222,7 +5207,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
function_setup,
|
||||
arithmetic_function,
|
||||
arithmetic_function_f,
|
||||
no_next_step,
|
||||
doFinalizer,
|
||||
copy_function,
|
||||
dataBlockRequired,
|
||||
|
@ -5236,7 +5220,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
diff_function_setup,
|
||||
diff_function,
|
||||
diff_function_f,
|
||||
no_next_step,
|
||||
doFinalizer,
|
||||
noop1,
|
||||
dataBlockRequired,
|
||||
|
@ -5251,7 +5234,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
first_last_function_setup,
|
||||
first_dist_function,
|
||||
first_dist_function_f,
|
||||
no_next_step,
|
||||
function_finalizer,
|
||||
first_dist_func_merge,
|
||||
firstDistFuncRequired,
|
||||
|
@ -5265,7 +5247,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
first_last_function_setup,
|
||||
last_dist_function,
|
||||
last_dist_function_f,
|
||||
no_next_step,
|
||||
function_finalizer,
|
||||
last_dist_func_merge,
|
||||
lastDistFuncRequired,
|
||||
|
@ -5279,7 +5260,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
function_setup,
|
||||
stddev_dst_function,
|
||||
stddev_dst_function_f,
|
||||
no_next_step,
|
||||
stddev_dst_finalizer,
|
||||
stddev_dst_merge,
|
||||
dataBlockRequired,
|
||||
|
@ -5293,7 +5273,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
function_setup,
|
||||
interp_function,
|
||||
do_sum_f, // todo filter handle
|
||||
no_next_step,
|
||||
doFinalizer,
|
||||
copy_function,
|
||||
dataBlockRequired,
|
||||
|
@ -5307,7 +5286,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
rate_function_setup,
|
||||
rate_function,
|
||||
rate_function_f,
|
||||
no_next_step,
|
||||
rate_finalizer,
|
||||
rate_func_copy,
|
||||
dataBlockRequired,
|
||||
|
@ -5321,7 +5299,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
rate_function_setup,
|
||||
irate_function,
|
||||
irate_function_f,
|
||||
no_next_step,
|
||||
rate_finalizer,
|
||||
rate_func_copy,
|
||||
dataBlockRequired,
|
||||
|
@ -5335,7 +5312,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
rate_function_setup,
|
||||
rate_function,
|
||||
rate_function_f,
|
||||
no_next_step,
|
||||
sumrate_finalizer,
|
||||
sumrate_func_merge,
|
||||
dataBlockRequired,
|
||||
|
@ -5349,7 +5325,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
rate_function_setup,
|
||||
irate_function,
|
||||
irate_function_f,
|
||||
no_next_step,
|
||||
sumrate_finalizer,
|
||||
sumrate_func_merge,
|
||||
dataBlockRequired,
|
||||
|
@ -5363,7 +5338,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
rate_function_setup,
|
||||
rate_function,
|
||||
rate_function_f,
|
||||
no_next_step,
|
||||
sumrate_finalizer,
|
||||
sumrate_func_merge,
|
||||
dataBlockRequired,
|
||||
|
@ -5377,7 +5351,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
rate_function_setup,
|
||||
irate_function,
|
||||
irate_function_f,
|
||||
no_next_step,
|
||||
sumrate_finalizer,
|
||||
sumrate_func_merge,
|
||||
dataBlockRequired,
|
||||
|
@ -5391,7 +5364,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
function_setup,
|
||||
noop1,
|
||||
noop2,
|
||||
no_next_step,
|
||||
noop1,
|
||||
noop1,
|
||||
dataBlockRequired,
|
||||
|
@ -5405,7 +5377,6 @@ SAggFunctionInfo aAggs[] = {{
|
|||
function_setup,
|
||||
blockInfo_func,
|
||||
noop2,
|
||||
no_next_step,
|
||||
blockinfo_func_finalizer,
|
||||
block_func_merge,
|
||||
dataBlockRequired,
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -20,6 +20,7 @@
|
|||
#include "taosdef.h"
|
||||
#include "taosmsg.h"
|
||||
#include "tulog.h"
|
||||
#include "qExecutor.h"
|
||||
|
||||
#define COLMODEL_GET_VAL(data, schema, allrow, rowId, colId) \
|
||||
(data + (schema)->pFields[colId].offset * (allrow) + (rowId) * (schema)->pFields[colId].field.bytes)
|
||||
|
@ -266,6 +267,7 @@ int32_t tExtMemBufferFlush(tExtMemBuffer *pMemBuffer) {
|
|||
size_t retVal = fwrite((char *)&(first->item), pMemBuffer->pageSize, 1, pMemBuffer->file);
|
||||
if (retVal <= 0) { // failed to write to buffer, may be not enough space
|
||||
ret = TAOS_SYSTEM_ERROR(errno);
|
||||
pMemBuffer->pHead = first;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -351,47 +353,28 @@ static FORCE_INLINE int32_t primaryKeyComparator(int64_t f1, int64_t f2, int32_t
|
|||
}
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, int32_t type, int32_t bytes) {
|
||||
static int32_t tsCompareFunc(TSKEY k1, TSKEY k2, int32_t order) {
|
||||
if (k1 == k2) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (order == TSDB_ORDER_DESC) {
|
||||
return (k1 < k2)? 1:-1;
|
||||
} else {
|
||||
return (k1 < k2)? -1:1;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t columnValueAscendingComparator(char *f1, char *f2, int32_t type, int32_t bytes) {
|
||||
switch (type) {
|
||||
case TSDB_DATA_TYPE_INT: {
|
||||
int32_t first = *(int32_t *) f1;
|
||||
int32_t second = *(int32_t *) f2;
|
||||
if (first == second) {
|
||||
return 0;
|
||||
}
|
||||
return (first < second) ? -1 : 1;
|
||||
};
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
DEFAULT_DOUBLE_COMP(GET_DOUBLE_VAL(f1), GET_DOUBLE_VAL(f2));
|
||||
};
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
DEFAULT_FLOAT_COMP(GET_FLOAT_VAL(f1), GET_FLOAT_VAL(f2));
|
||||
};
|
||||
case TSDB_DATA_TYPE_BIGINT: {
|
||||
int64_t first = *(int64_t *)f1;
|
||||
int64_t second = *(int64_t *)f2;
|
||||
if (first == second) {
|
||||
return 0;
|
||||
}
|
||||
return (first < second) ? -1 : 1;
|
||||
};
|
||||
case TSDB_DATA_TYPE_SMALLINT: {
|
||||
int16_t first = *(int16_t *)f1;
|
||||
int16_t second = *(int16_t *)f2;
|
||||
if (first == second) {
|
||||
return 0;
|
||||
}
|
||||
return (first < second) ? -1 : 1;
|
||||
};
|
||||
case TSDB_DATA_TYPE_INT: DEFAULT_COMP(GET_INT32_VAL(f1), GET_INT32_VAL(f2));
|
||||
case TSDB_DATA_TYPE_DOUBLE: DEFAULT_DOUBLE_COMP(GET_DOUBLE_VAL(f1), GET_DOUBLE_VAL(f2));
|
||||
case TSDB_DATA_TYPE_FLOAT: DEFAULT_FLOAT_COMP(GET_FLOAT_VAL(f1), GET_FLOAT_VAL(f2));
|
||||
case TSDB_DATA_TYPE_BIGINT: DEFAULT_COMP(GET_INT64_VAL(f1), GET_INT64_VAL(f2));
|
||||
case TSDB_DATA_TYPE_SMALLINT:DEFAULT_COMP(GET_INT16_VAL(f1), GET_INT16_VAL(f2));
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
case TSDB_DATA_TYPE_TINYINT: {
|
||||
int8_t first = *(int8_t *)f1;
|
||||
int8_t second = *(int8_t *)f2;
|
||||
if (first == second) {
|
||||
return 0;
|
||||
}
|
||||
return (first < second) ? -1 : 1;
|
||||
};
|
||||
case TSDB_DATA_TYPE_TINYINT: DEFAULT_COMP(GET_INT8_VAL(f1), GET_INT8_VAL(f2));
|
||||
|
||||
case TSDB_DATA_TYPE_BINARY: {
|
||||
int32_t len1 = varDataLen(f1);
|
||||
int32_t len2 = varDataLen(f2);
|
||||
|
@ -414,6 +397,10 @@ static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, i
|
|||
}
|
||||
return (ret < 0) ? -1 : 1;
|
||||
};
|
||||
case TSDB_DATA_TYPE_UTINYINT: DEFAULT_COMP(GET_UINT8_VAL(f1), GET_UINT8_VAL(f2));
|
||||
case TSDB_DATA_TYPE_USMALLINT: DEFAULT_COMP(GET_UINT16_VAL(f1), GET_UINT16_VAL(f2));
|
||||
case TSDB_DATA_TYPE_UINT: DEFAULT_COMP(GET_UINT32_VAL(f1), GET_UINT32_VAL(f2));
|
||||
case TSDB_DATA_TYPE_UBIGINT: DEFAULT_COMP(GET_UINT64_VAL(f1), GET_UINT64_VAL(f2));
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -451,6 +438,35 @@ int32_t compare_a(tOrderDescriptor *pDescriptor, int32_t numOfRows1, int32_t s1,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t compare_aRv(SSDataBlock* pBlock, SArray* colIndex, int32_t numOfCols, int32_t rowIndex, char** buffer, int32_t order) {
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColIndex* pColIndex = taosArrayGet(colIndex, i);
|
||||
int32_t index = pColIndex->colIndex;
|
||||
|
||||
SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, index);
|
||||
assert(pColIndex->colId == pColInfo->info.colId);
|
||||
|
||||
char* data = pColInfo->pData + rowIndex * pColInfo->info.bytes;
|
||||
if (pColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
int32_t ret = tsCompareFunc(GET_INT64_VAL(data), GET_INT64_VAL(buffer[i]), order);
|
||||
if (ret == 0) {
|
||||
continue; // The timestamps are identical
|
||||
} else {
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
int32_t ret = columnValueAscendingComparator(data, buffer[i], pColInfo->info.type, pColInfo->info.bytes);
|
||||
if (ret == 0) {
|
||||
continue;
|
||||
} else {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t compare_d(tOrderDescriptor *pDescriptor, int32_t numOfRows1, int32_t s1, char *data1, int32_t numOfRows2,
|
||||
int32_t s2, char *data2) {
|
||||
assert(numOfRows1 == numOfRows2);
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
static void setTagsValue(SFillInfo* pFillInfo, void** data, int32_t genRows) {
|
||||
for(int32_t j = 0; j < pFillInfo->numOfCols; ++j) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[j];
|
||||
if (TSDB_COL_IS_NORMAL_COL(pCol->flag)) {
|
||||
if (TSDB_COL_IS_NORMAL_COL(pCol->flag) || TSDB_COL_IS_UD_COL(pCol->flag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -126,10 +126,10 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData
|
|||
} else {
|
||||
setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index);
|
||||
}
|
||||
} else { /* fill the default value */
|
||||
} else { // fill the default value */
|
||||
for (int32_t i = 1; i < pFillInfo->numOfCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)) {
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)/* || IS_VAR_DATA_TYPE(pCol->col.type)*/) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -210,7 +210,7 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, void** data, int32_t outputR
|
|||
// assign rows to dst buffer
|
||||
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)) {
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)/* || IS_VAR_DATA_TYPE(pCol->col.type)*/) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -275,13 +275,16 @@ static int64_t appendFilledResult(SFillInfo* pFillInfo, void** output, int64_t r
|
|||
// there are no duplicated tags in the SFillTagColInfo list
|
||||
static int32_t setTagColumnInfo(SFillInfo* pFillInfo, int32_t numOfCols, int32_t capacity) {
|
||||
int32_t rowsize = 0;
|
||||
int32_t numOfTags = 0;
|
||||
|
||||
int32_t k = 0;
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SFillColInfo* pColInfo = &pFillInfo->pFillCol[i];
|
||||
pFillInfo->pData[i] = NULL;
|
||||
|
||||
if (TSDB_COL_IS_TAG(pColInfo->flag)) {
|
||||
if (TSDB_COL_IS_TAG(pColInfo->flag) || pColInfo->col.type == TSDB_DATA_TYPE_BINARY) {
|
||||
numOfTags += 1;
|
||||
|
||||
bool exists = false;
|
||||
int32_t index = -1;
|
||||
for (int32_t j = 0; j < k; ++j) {
|
||||
|
@ -310,6 +313,8 @@ static int32_t setTagColumnInfo(SFillInfo* pFillInfo, int32_t numOfCols, int32_t
|
|||
rowsize += pColInfo->col.bytes;
|
||||
}
|
||||
|
||||
pFillInfo->numOfTags = numOfTags;
|
||||
|
||||
assert(k <= pFillInfo->numOfTags);
|
||||
return rowsize;
|
||||
}
|
||||
|
@ -347,12 +352,13 @@ SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int3
|
|||
pFillInfo->interval.slidingUnit = slidingUnit;
|
||||
|
||||
pFillInfo->pData = malloc(POINTER_BYTES * numOfCols);
|
||||
if (numOfTags > 0) {
|
||||
pFillInfo->pTags = calloc(pFillInfo->numOfTags, sizeof(SFillTagColInfo));
|
||||
for (int32_t i = 0; i < numOfTags; ++i) {
|
||||
|
||||
// if (numOfTags > 0) {
|
||||
pFillInfo->pTags = calloc(numOfCols, sizeof(SFillTagColInfo));
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
pFillInfo->pTags[i].col.colId = -2; // TODO
|
||||
}
|
||||
}
|
||||
// }
|
||||
|
||||
pFillInfo->rowSize = setTagColumnInfo(pFillInfo, pFillInfo->numOfCols, pFillInfo->alloc);
|
||||
assert(pFillInfo->rowSize > 0);
|
||||
|
@ -367,6 +373,7 @@ SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int3
|
|||
void taosResetFillInfo(SFillInfo* pFillInfo, TSKEY startTimestamp) {
|
||||
pFillInfo->start = startTimestamp;
|
||||
pFillInfo->currentKey = startTimestamp;
|
||||
pFillInfo->end = startTimestamp;
|
||||
pFillInfo->index = -1;
|
||||
pFillInfo->numOfRows = 0;
|
||||
pFillInfo->numOfCurrent = 0;
|
||||
|
@ -425,6 +432,8 @@ void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey)
|
|||
|
||||
void taosFillSetInputDataBlock(SFillInfo* pFillInfo, const SSDataBlock* pInput) {
|
||||
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
|
||||
SColumnInfoData* pColData = taosArrayGet(pInput->pDataBlock, i);
|
||||
// pFillInfo->pData[i] = pColData->pData;
|
||||
if (pInput->info.rows > pFillInfo->alloc) {
|
||||
|
@ -436,6 +445,12 @@ void taosFillSetInputDataBlock(SFillInfo* pFillInfo, const SSDataBlock* pInput)
|
|||
}
|
||||
|
||||
memcpy(pFillInfo->pData[i], pColData->pData, pColData->info.bytes * pInput->info.rows);
|
||||
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)/* || IS_VAR_DATA_TYPE(pCol->col.type)*/) { // copy the tag value to tag value buffer
|
||||
SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex];
|
||||
assert (pTag->col.colId == pCol->col.colId);
|
||||
memcpy(pTag->tagVal, pColData->pData, pCol->col.bytes); // TODO not memcpy??
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -456,7 +471,7 @@ void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, const tFilePage*
|
|||
|
||||
memcpy(pFillInfo->pData[i], data, (size_t)(pCol->col.bytes * pInput->num));
|
||||
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)) { // copy the tag value to tag value buffer
|
||||
if (TSDB_COL_IS_TAG(pCol->flag)/* || IS_VAR_DATA_TYPE(pCol->col.type)*/) { // copy the tag value to tag value buffer
|
||||
SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex];
|
||||
assert (pTag->col.colId == pCol->col.colId);
|
||||
memcpy(pTag->tagVal, data, pCol->col.bytes); // TODO not memcpy??
|
||||
|
@ -465,7 +480,17 @@ void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, const tFilePage*
|
|||
}
|
||||
|
||||
bool taosFillHasMoreResults(SFillInfo* pFillInfo) {
|
||||
return taosNumOfRemainRows(pFillInfo) > 0;
|
||||
int32_t remain = taosNumOfRemainRows(pFillInfo);
|
||||
if (remain > 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (pFillInfo->numOfTotal > 0 && (((pFillInfo->end > pFillInfo->start) && FILL_IS_ASC_FILL(pFillInfo)) ||
|
||||
(pFillInfo->end < pFillInfo->start && !FILL_IS_ASC_FILL(pFillInfo)))) {
|
||||
return getNumOfResultsAfterFillGap(pFillInfo, pFillInfo->end, 4096) > 0;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows) {
|
||||
|
|
|
@ -0,0 +1,195 @@
|
|||
#include "os.h"
|
||||
#include "tsclient.h"
|
||||
#include "qUtil.h"
|
||||
#include "texpr.h"
|
||||
|
||||
#define QNODE_PROJECT 1
|
||||
#define QNODE_FILTER 2
|
||||
#define QNODE_RELATION 3
|
||||
#define QNODE_AGGREGATE 4
|
||||
#define QNODE_GROUPBY 5
|
||||
#define QNODE_LIMIT 6
|
||||
#define QNODE_JOIN 7
|
||||
#define QNODE_DIST 8
|
||||
#define QNODE_SORT 9
|
||||
#define QNODE_UNIONALL 10
|
||||
#define QNODE_TIMEWINDOW 11
|
||||
|
||||
typedef struct SQueryNode {
|
||||
int32_t type; // the type of logic node
|
||||
char *name; // the name of logic node
|
||||
|
||||
SSchema *pSchema; // the schema of the input SSDatablock
|
||||
int32_t numOfCols; // number of input columns
|
||||
SExprInfo *pExpr; // the query functions or sql aggregations
|
||||
int32_t numOfOutput; // number of result columns, which is also the number of pExprs
|
||||
|
||||
// previous operator to generated result for current node to process
|
||||
// in case of join, multiple prev nodes exist.
|
||||
struct SQueryNode* prevNode;
|
||||
struct SQueryNode* nextNode;
|
||||
} SQueryNode;
|
||||
|
||||
// TODO create the query plan
|
||||
SQueryNode* qCreateQueryPlan(SQueryInfo* pQueryInfo) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
char* queryPlanToString() {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SQueryNode* queryPlanFromString() {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SArray* createTableScanPlan(SQueryAttr* pQueryAttr) {
|
||||
SArray* plan = taosArrayInit(4, sizeof(int32_t));
|
||||
|
||||
int32_t op = 0;
|
||||
if (onlyQueryTags(pQueryAttr)) {
|
||||
// op = OP_TagScan;
|
||||
} else {
|
||||
if (pQueryAttr->queryBlockDist) {
|
||||
op = OP_TableBlockInfoScan;
|
||||
} else if (pQueryAttr->tsCompQuery || pQueryAttr->pointInterpQuery) {
|
||||
op = OP_TableSeqScan;
|
||||
} else if (pQueryAttr->needReverseScan) {
|
||||
op = OP_DataBlocksOptScan;
|
||||
} else {
|
||||
op = OP_TableScan;
|
||||
}
|
||||
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
|
||||
return plan;
|
||||
}
|
||||
|
||||
SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
|
||||
SArray* plan = taosArrayInit(4, sizeof(int32_t));
|
||||
int32_t op = 0;
|
||||
|
||||
if (onlyQueryTags(pQueryAttr)) { // do nothing for tags query
|
||||
op = OP_TagScan;
|
||||
taosArrayPush(plan, &op);
|
||||
if (pQueryAttr->distinctTag) {
|
||||
op = OP_Distinct;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
} else if (pQueryAttr->interval.interval > 0) {
|
||||
if (pQueryAttr->stableQuery) {
|
||||
op = OP_MultiTableTimeInterval;
|
||||
taosArrayPush(plan, &op);
|
||||
} else {
|
||||
op = OP_TimeWindow;
|
||||
taosArrayPush(plan, &op);
|
||||
|
||||
if (pQueryAttr->pExpr2 != NULL) {
|
||||
op = OP_Arithmetic;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
|
||||
if (pQueryAttr->fillType != TSDB_FILL_NONE && (!pQueryAttr->pointInterpQuery)) {
|
||||
op = OP_Fill;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
}
|
||||
|
||||
} else if (pQueryAttr->groupbyColumn) {
|
||||
op = OP_Groupby;
|
||||
taosArrayPush(plan, &op);
|
||||
|
||||
if (!pQueryAttr->stableQuery && pQueryAttr->havingNum > 0) {
|
||||
op = OP_Filter;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
|
||||
if (pQueryAttr->pExpr2 != NULL) {
|
||||
op = OP_Arithmetic;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
} else if (pQueryAttr->sw.gap > 0) {
|
||||
op = OP_SessionWindow;
|
||||
taosArrayPush(plan, &op);
|
||||
|
||||
if (pQueryAttr->pExpr2 != NULL) {
|
||||
op = OP_Arithmetic;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
} else if (pQueryAttr->simpleAgg) {
|
||||
if (pQueryAttr->stableQuery && !pQueryAttr->tsCompQuery) {
|
||||
op = OP_MultiTableAggregate;
|
||||
} else {
|
||||
op = OP_Aggregate;
|
||||
}
|
||||
|
||||
taosArrayPush(plan, &op);
|
||||
|
||||
if (!pQueryAttr->stableQuery && pQueryAttr->havingNum > 0) {
|
||||
op = OP_Filter;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
|
||||
if (pQueryAttr->pExpr2 != NULL && !pQueryAttr->stableQuery) {
|
||||
op = OP_Arithmetic;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
} else { // diff/add/multiply/subtract/division
|
||||
op = OP_Arithmetic;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
|
||||
if (pQueryAttr->limit.limit > 0 || pQueryAttr->limit.offset > 0) {
|
||||
op = OP_Limit;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
|
||||
return plan;
|
||||
}
|
||||
|
||||
SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) {
|
||||
SArray* plan = taosArrayInit(4, sizeof(int32_t));
|
||||
|
||||
if (!pQueryAttr->stableQuery) {
|
||||
return plan;
|
||||
}
|
||||
|
||||
int32_t op = OP_MultiwayMergeSort;
|
||||
taosArrayPush(plan, &op);
|
||||
|
||||
if (pQueryAttr->distinctTag) {
|
||||
op = OP_Distinct;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
|
||||
if (pQueryAttr->simpleAgg || (pQueryAttr->interval.interval > 0 || pQueryAttr->sw.gap > 0)) {
|
||||
op = OP_GlobalAggregate;
|
||||
taosArrayPush(plan, &op);
|
||||
|
||||
if (pQueryAttr->havingNum > 0) {
|
||||
op = OP_Filter;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
|
||||
if (pQueryAttr->pExpr2 != NULL) {
|
||||
op = OP_Arithmetic;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
}
|
||||
|
||||
// fill operator
|
||||
if (pQueryAttr->fillType != TSDB_FILL_NONE && (!pQueryAttr->pointInterpQuery)) {
|
||||
op = OP_Fill;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
|
||||
// limit/offset operator
|
||||
if (pQueryAttr->limit.limit > 0 || pQueryAttr->limit.offset > 0 ||
|
||||
pQueryAttr->slimit.limit > 0 || pQueryAttr->slimit.offset > 0) {
|
||||
op = OP_SLimit;
|
||||
taosArrayPush(plan, &op);
|
||||
}
|
||||
|
||||
return plan;
|
||||
}
|
|
@ -43,7 +43,7 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pa
|
|||
|
||||
pResBuf->emptyDummyIdList = taosArrayInit(1, sizeof(int32_t));
|
||||
|
||||
qDebug("QInfo:%"PRIu64" create resBuf for output, page size:%d, inmem buf pages:%d, file:%s", qId, pResBuf->pageSize,
|
||||
qDebug("QInfo:0x%"PRIx64" create resBuf for output, page size:%d, inmem buf pages:%d, file:%s", qId, pResBuf->pageSize,
|
||||
pResBuf->inMemPages, pResBuf->path);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -410,13 +410,13 @@ void destroyResultBuf(SDiskbasedResultBuf* pResultBuf) {
|
|||
}
|
||||
|
||||
if (pResultBuf->file != NULL) {
|
||||
qDebug("QInfo:%"PRIu64" res output buffer closed, total:%.2f Kb, inmem size:%.2f Kb, file size:%.2f Kb",
|
||||
qDebug("QInfo:0x%"PRIx64" res output buffer closed, total:%.2f Kb, inmem size:%.2f Kb, file size:%.2f Kb",
|
||||
pResultBuf->qId, pResultBuf->totalBufSize/1024.0, listNEles(pResultBuf->lruList) * pResultBuf->pageSize / 1024.0,
|
||||
pResultBuf->fileSize/1024.0);
|
||||
|
||||
fclose(pResultBuf->file);
|
||||
} else {
|
||||
qDebug("QInfo:%"PRIu64" res output buffer closed, total:%.2f Kb, no file created", pResultBuf->qId,
|
||||
qDebug("QInfo:0x%"PRIx64" res output buffer closed, total:%.2f Kb, no file created", pResultBuf->qId,
|
||||
pResultBuf->totalBufSize/1024.0);
|
||||
}
|
||||
|
||||
|
|
|
@ -144,12 +144,15 @@ tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType) {
|
|||
pSqlExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
|
||||
pSqlExpr->tokenId = TK_TIMESTAMP; // TK_TIMESTAMP used to denote the time value is in microsecond
|
||||
pSqlExpr->type = SQL_NODE_VALUE;
|
||||
pSqlExpr->flags |= 1 << EXPR_FLAG_US_TIMESTAMP;
|
||||
} else if (optrType == TK_VARIABLE) {
|
||||
int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSqlExpr->value.i64);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
terrno = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
}
|
||||
|
||||
pSqlExpr->flags |= 1 << EXPR_FLAG_US_TIMESTAMP;
|
||||
pSqlExpr->flags |= 1 << EXPR_FLAG_TIMESTAMP_VAR;
|
||||
pSqlExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
|
||||
pSqlExpr->tokenId = TK_TIMESTAMP;
|
||||
pSqlExpr->type = SQL_NODE_VALUE;
|
||||
|
@ -217,6 +220,15 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) {
|
|||
pExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
|
||||
pExpr->tokenId = pLeft->tokenId;
|
||||
pExpr->type = SQL_NODE_VALUE;
|
||||
pExpr->flags = pLeft->flags | pRight->flags;
|
||||
|
||||
if ((pLeft->flags & (1 << EXPR_FLAG_TIMESTAMP_VAR)) && (pRight->flags & (1 << EXPR_FLAG_TIMESTAMP_VAR))) {
|
||||
pExpr->flags |= 1 << EXPR_FLAG_TS_ERROR;
|
||||
} else {
|
||||
pExpr->flags &= ~(1 << EXPR_FLAG_TIMESTAMP_VAR);
|
||||
pExpr->flags &= ~(1 << EXPR_FLAG_TS_ERROR);
|
||||
}
|
||||
|
||||
|
||||
switch (optrType) {
|
||||
case TK_PLUS: {
|
||||
|
@ -245,7 +257,6 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) {
|
|||
|
||||
tSqlExprDestroy(pLeft);
|
||||
tSqlExprDestroy(pRight);
|
||||
|
||||
} else if ((pLeft->tokenId == TK_FLOAT && pRight->tokenId == TK_INTEGER) ||
|
||||
(pLeft->tokenId == TK_INTEGER && pRight->tokenId == TK_FLOAT) ||
|
||||
(pLeft->tokenId == TK_FLOAT && pRight->tokenId == TK_FLOAT)) {
|
||||
|
@ -319,7 +330,7 @@ int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) {
|
|||
if ((left == NULL && right) || (left && right == NULL)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
if (left->type != right->type) {
|
||||
return 1;
|
||||
}
|
||||
|
@ -332,11 +343,11 @@ int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) {
|
|||
return 1;
|
||||
}
|
||||
|
||||
if ((left->pLeft && right->pLeft == NULL)
|
||||
if ((left->pLeft && right->pLeft == NULL)
|
||||
|| (left->pLeft == NULL && right->pLeft)
|
||||
|| (left->pRight && right->pRight == NULL)
|
||||
|| (left->pRight && right->pRight == NULL)
|
||||
|| (left->pRight == NULL && right->pRight)
|
||||
|| (left->pParam && right->pParam == NULL)
|
||||
|| (left->pParam && right->pParam == NULL)
|
||||
|| (left->pParam == NULL && right->pParam)) {
|
||||
return 1;
|
||||
}
|
||||
|
@ -349,19 +360,18 @@ int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) {
|
|||
return 1;
|
||||
}
|
||||
|
||||
|
||||
if (right->pParam && left->pParam) {
|
||||
size_t size = taosArrayGetSize(right->pParam);
|
||||
if (left->pParam && taosArrayGetSize(left->pParam) != size) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
tSqlExprItem* pLeftElem = taosArrayGet(left->pParam, i);
|
||||
tSqlExpr* pSubLeft = pLeftElem->pNode;
|
||||
tSqlExprItem* pRightElem = taosArrayGet(left->pParam, i);
|
||||
tSqlExprItem* pRightElem = taosArrayGet(right->pParam, i);
|
||||
tSqlExpr* pSubRight = pRightElem->pNode;
|
||||
|
||||
|
||||
if (tSqlExprCompare(pSubLeft, pSubRight)) {
|
||||
return 1;
|
||||
}
|
||||
|
@ -523,13 +533,13 @@ SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int
|
|||
return pList;
|
||||
}
|
||||
|
||||
SFromInfo *setTableNameList(SFromInfo* pFromInfo, SStrToken *pName, SStrToken* pAlias) {
|
||||
if (pFromInfo == NULL) {
|
||||
pFromInfo = calloc(1, sizeof(SFromInfo));
|
||||
pFromInfo->tableList = taosArrayInit(4, sizeof(STableNamePair));
|
||||
SRelationInfo *setTableNameList(SRelationInfo* pRelationInfo, SStrToken *pName, SStrToken* pAlias) {
|
||||
if (pRelationInfo == NULL) {
|
||||
pRelationInfo = calloc(1, sizeof(SRelationInfo));
|
||||
pRelationInfo->list = taosArrayInit(4, sizeof(STableNamePair));
|
||||
}
|
||||
|
||||
pFromInfo->type = SQL_NODE_FROM_NAMELIST;
|
||||
pRelationInfo->type = SQL_NODE_FROM_TABLELIST;
|
||||
STableNamePair p = {.name = *pName};
|
||||
if (pAlias != NULL) {
|
||||
p.aliasName = *pAlias;
|
||||
|
@ -537,34 +547,39 @@ SFromInfo *setTableNameList(SFromInfo* pFromInfo, SStrToken *pName, SStrToken* p
|
|||
TPARSER_SET_NONE_TOKEN(p.aliasName);
|
||||
}
|
||||
|
||||
taosArrayPush(pFromInfo->tableList, &p);
|
||||
|
||||
return pFromInfo;
|
||||
taosArrayPush(pRelationInfo->list, &p);
|
||||
return pRelationInfo;
|
||||
}
|
||||
|
||||
SFromInfo *setSubquery(SFromInfo* pFromInfo, SQuerySqlNode* pSqlNode) {
|
||||
if (pFromInfo == NULL) {
|
||||
pFromInfo = calloc(1, sizeof(SFromInfo));
|
||||
SRelationInfo* setSubquery(SRelationInfo* pRelationInfo, SArray* pList) {
|
||||
if (pRelationInfo == NULL) {
|
||||
pRelationInfo = calloc(1, sizeof(SRelationInfo));
|
||||
pRelationInfo->list = taosArrayInit(4, POINTER_BYTES);
|
||||
}
|
||||
|
||||
pFromInfo->type = SQL_NODE_FROM_SUBQUERY;
|
||||
pFromInfo->pNode->pClause[pFromInfo->pNode->numOfClause - 1] = pSqlNode;
|
||||
pRelationInfo->type = SQL_NODE_FROM_SUBQUERY;
|
||||
taosArrayPush(pRelationInfo->list, &pList);
|
||||
|
||||
return pFromInfo;
|
||||
return pRelationInfo;
|
||||
}
|
||||
|
||||
void* destroyFromInfo(SFromInfo* pFromInfo) {
|
||||
if (pFromInfo == NULL) {
|
||||
void* destroyRelationInfo(SRelationInfo* pRelationInfo) {
|
||||
if (pRelationInfo == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (pFromInfo->type == SQL_NODE_FROM_NAMELIST) {
|
||||
taosArrayDestroy(pFromInfo->tableList);
|
||||
if (pRelationInfo->type == SQL_NODE_FROM_TABLELIST) {
|
||||
taosArrayDestroy(pRelationInfo->list);
|
||||
} else {
|
||||
destroyAllSelectClause(pFromInfo->pNode);
|
||||
size_t size = taosArrayGetSize(pRelationInfo->list);
|
||||
for(int32_t i = 0; i < size; ++i) {
|
||||
SArray* pa = taosArrayGetP(pRelationInfo->list, 0);
|
||||
destroyAllSqlNode(pa);
|
||||
}
|
||||
taosArrayDestroy(pRelationInfo->list);
|
||||
}
|
||||
|
||||
tfree(pFromInfo);
|
||||
tfree(pRelationInfo);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -708,19 +723,19 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
|
|||
/*
|
||||
* extract the select info out of sql string
|
||||
*/
|
||||
SQuerySqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelectList, SFromInfo *pFrom, tSqlExpr *pWhere,
|
||||
SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelationInfo *pFrom, tSqlExpr *pWhere,
|
||||
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval,
|
||||
SSessionWindowVal *pSession, SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit,
|
||||
SLimitVal *psLimit, tSqlExpr *pHaving) {
|
||||
assert(pSelectList != NULL);
|
||||
assert(pSelNodeList != NULL);
|
||||
|
||||
SQuerySqlNode *pSqlNode = calloc(1, sizeof(SQuerySqlNode));
|
||||
SSqlNode *pSqlNode = calloc(1, sizeof(SSqlNode));
|
||||
|
||||
// all later sql string are belonged to the stream sql
|
||||
pSqlNode->sqlstr = *pSelectToken;
|
||||
pSqlNode->sqlstr.n = (uint32_t)strlen(pSqlNode->sqlstr.z);
|
||||
|
||||
pSqlNode->pSelectList = pSelectList;
|
||||
pSqlNode->pSelNodeList = pSelNodeList;
|
||||
pSqlNode->from = pFrom;
|
||||
pSqlNode->pGroupby = pGroupby;
|
||||
pSqlNode->pSortOrder = pSortOrder;
|
||||
|
@ -778,49 +793,47 @@ void freeCreateTableInfo(void* p) {
|
|||
tfree(pInfo->tagdata.data);
|
||||
}
|
||||
|
||||
void destroyQuerySqlNode(SQuerySqlNode *pQuerySql) {
|
||||
if (pQuerySql == NULL) {
|
||||
void destroySqlNode(SSqlNode *pSqlNode) {
|
||||
if (pSqlNode == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
tSqlExprListDestroy(pQuerySql->pSelectList);
|
||||
tSqlExprListDestroy(pSqlNode->pSelNodeList);
|
||||
pSqlNode->pSelNodeList = NULL;
|
||||
|
||||
tSqlExprDestroy(pSqlNode->pWhere);
|
||||
pSqlNode->pWhere = NULL;
|
||||
|
||||
pQuerySql->pSelectList = NULL;
|
||||
taosArrayDestroyEx(pSqlNode->pSortOrder, freeVariant);
|
||||
pSqlNode->pSortOrder = NULL;
|
||||
|
||||
tSqlExprDestroy(pQuerySql->pWhere);
|
||||
pQuerySql->pWhere = NULL;
|
||||
taosArrayDestroyEx(pSqlNode->pGroupby, freeVariant);
|
||||
pSqlNode->pGroupby = NULL;
|
||||
|
||||
tSqlExprDestroy(pQuerySql->pHaving);
|
||||
pQuerySql->pHaving = NULL;
|
||||
|
||||
taosArrayDestroyEx(pQuerySql->pSortOrder, freeVariant);
|
||||
pQuerySql->pSortOrder = NULL;
|
||||
pSqlNode->from = destroyRelationInfo(pSqlNode->from);
|
||||
|
||||
taosArrayDestroyEx(pQuerySql->pGroupby, freeVariant);
|
||||
pQuerySql->pGroupby = NULL;
|
||||
taosArrayDestroyEx(pSqlNode->fillType, freeVariant);
|
||||
pSqlNode->fillType = NULL;
|
||||
|
||||
pQuerySql->from = destroyFromInfo(pQuerySql->from);
|
||||
|
||||
taosArrayDestroyEx(pQuerySql->fillType, freeVariant);
|
||||
pQuerySql->fillType = NULL;
|
||||
|
||||
free(pQuerySql);
|
||||
tSqlExprDestroy(pSqlNode->pHaving);
|
||||
free(pSqlNode);
|
||||
}
|
||||
|
||||
void destroyAllSelectClause(SSubclauseInfo *pClause) {
|
||||
if (pClause == NULL || pClause->numOfClause == 0) {
|
||||
void destroyAllSqlNode(SArray *pList) {
|
||||
if (pList == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
for(int32_t i = 0; i < pClause->numOfClause; ++i) {
|
||||
SQuerySqlNode *pQuerySql = pClause->pClause[i];
|
||||
destroyQuerySqlNode(pQuerySql);
|
||||
size_t size = taosArrayGetSize(pList);
|
||||
for(int32_t i = 0; i < size; ++i) {
|
||||
SSqlNode *pNode = taosArrayGetP(pList, i);
|
||||
destroySqlNode(pNode);
|
||||
}
|
||||
|
||||
tfree(pClause->pClause);
|
||||
|
||||
taosArrayDestroy(pList);
|
||||
}
|
||||
|
||||
SCreateTableSql *tSetCreateTableInfo(SArray *pCols, SArray *pTags, SQuerySqlNode *pSelect, int32_t type) {
|
||||
SCreateTableSql *tSetCreateTableInfo(SArray *pCols, SArray *pTags, SSqlNode *pSelect, int32_t type) {
|
||||
SCreateTableSql *pCreate = calloc(1, sizeof(SCreateTableSql));
|
||||
|
||||
switch (type) {
|
||||
|
@ -888,7 +901,7 @@ SAlterTableInfo *tSetAlterTableInfo(SStrToken *pTableName, SArray *pCols, SArray
|
|||
}
|
||||
|
||||
void* destroyCreateTableSql(SCreateTableSql* pCreate) {
|
||||
destroyQuerySqlNode(pCreate->pSelect);
|
||||
destroySqlNode(pCreate->pSelect);
|
||||
|
||||
taosArrayDestroy(pCreate->colInfo.pColumns);
|
||||
taosArrayDestroy(pCreate->colInfo.pTagColumns);
|
||||
|
@ -903,7 +916,7 @@ void SqlInfoDestroy(SSqlInfo *pInfo) {
|
|||
if (pInfo == NULL) return;
|
||||
|
||||
if (pInfo->type == TSDB_SQL_SELECT) {
|
||||
destroyAllSelectClause(&pInfo->subclauseInfo);
|
||||
destroyAllSqlNode(pInfo->list);
|
||||
} else if (pInfo->type == TSDB_SQL_CREATE_TABLE) {
|
||||
pInfo->pCreateTableInfo = destroyCreateTableSql(pInfo->pCreateTableInfo);
|
||||
} else if (pInfo->type == TSDB_SQL_ALTER_TABLE) {
|
||||
|
@ -924,31 +937,20 @@ void SqlInfoDestroy(SSqlInfo *pInfo) {
|
|||
}
|
||||
}
|
||||
|
||||
SSubclauseInfo* setSubclause(SSubclauseInfo* pSubclause, void *pSqlExprInfo) {
|
||||
if (pSubclause == NULL) {
|
||||
pSubclause = calloc(1, sizeof(SSubclauseInfo));
|
||||
SArray* setSubclause(SArray* pList, void *pSqlNode) {
|
||||
if (pList == NULL) {
|
||||
pList = taosArrayInit(1, POINTER_BYTES);
|
||||
}
|
||||
|
||||
int32_t newSize = pSubclause->numOfClause + 1;
|
||||
char* tmp = realloc(pSubclause->pClause, newSize * POINTER_BYTES);
|
||||
if (tmp == NULL) {
|
||||
return pSubclause;
|
||||
}
|
||||
|
||||
pSubclause->pClause = (SQuerySqlNode**) tmp;
|
||||
|
||||
pSubclause->pClause[newSize - 1] = pSqlExprInfo;
|
||||
pSubclause->numOfClause++;
|
||||
|
||||
return pSubclause;
|
||||
taosArrayPush(pList, &pSqlNode);
|
||||
return pList;
|
||||
}
|
||||
|
||||
SSqlInfo* setSqlInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SStrToken *pTableName, int32_t type) {
|
||||
pInfo->type = type;
|
||||
|
||||
if (type == TSDB_SQL_SELECT) {
|
||||
pInfo->subclauseInfo = *(SSubclauseInfo*) pSqlExprInfo;
|
||||
free(pSqlExprInfo);
|
||||
pInfo->list = (SArray*) pSqlExprInfo;
|
||||
} else {
|
||||
pInfo->pCreateTableInfo = pSqlExprInfo;
|
||||
}
|
||||
|
@ -960,16 +962,9 @@ SSqlInfo* setSqlInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SStrToken *pTableName,
|
|||
return pInfo;
|
||||
}
|
||||
|
||||
SSubclauseInfo* appendSelectClause(SSubclauseInfo *pQueryInfo, void *pSubclause) {
|
||||
char* tmp = realloc(pQueryInfo->pClause, (pQueryInfo->numOfClause + 1) * POINTER_BYTES);
|
||||
if (tmp == NULL) { // out of memory
|
||||
return pQueryInfo;
|
||||
}
|
||||
|
||||
pQueryInfo->pClause = (SQuerySqlNode**) tmp;
|
||||
pQueryInfo->pClause[pQueryInfo->numOfClause++] = pSubclause;
|
||||
|
||||
return pQueryInfo;
|
||||
SArray* appendSelectClause(SArray *pList, void *pSubclause) {
|
||||
taosArrayPush(pList, &pSubclause);
|
||||
return pList;
|
||||
}
|
||||
|
||||
void setCreatedTableName(SSqlInfo *pInfo, SStrToken *pTableNameToken, SStrToken *pIfNotExists) {
|
||||
|
|
|
@ -30,11 +30,11 @@ typedef struct SCompSupporter {
|
|||
int32_t order;
|
||||
} SCompSupporter;
|
||||
|
||||
int32_t getOutputInterResultBufSize(SQuery* pQuery) {
|
||||
int32_t getOutputInterResultBufSize(SQueryAttr* pQueryAttr) {
|
||||
int32_t size = 0;
|
||||
|
||||
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
|
||||
size += pQuery->pExpr1[i].interBytes;
|
||||
for (int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) {
|
||||
size += pQueryAttr->pExpr1[i].base.interBytes;
|
||||
}
|
||||
|
||||
assert(size >= 0);
|
||||
|
@ -136,11 +136,11 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16
|
|||
tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pResultRow->pageId);
|
||||
|
||||
int16_t offset = 0;
|
||||
for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutput; ++i) {
|
||||
for (int32_t i = 0; i < pRuntimeEnv->pQueryAttr->numOfOutput; ++i) {
|
||||
SResultRowCellInfo *pResultInfo = &pResultRow->pCellInfo[i];
|
||||
|
||||
int16_t size = pRuntimeEnv->pQuery->pExpr1[i].bytes;
|
||||
char * s = getPosInResultPage(pRuntimeEnv, page, pResultRow->offset, offset, size);
|
||||
int16_t size = pRuntimeEnv->pQueryAttr->pExpr1[i].base.resType;
|
||||
char * s = getPosInResultPage(pRuntimeEnv->pQueryAttr, page, pResultRow->offset, offset);
|
||||
memset(s, 0, size);
|
||||
|
||||
offset += size;
|
||||
|
@ -164,8 +164,8 @@ SResultRowCellInfo* getResultCell(const SResultRow* pRow, int32_t index, int32_t
|
|||
}
|
||||
|
||||
size_t getResultRowSize(SQueryRuntimeEnv* pRuntimeEnv) {
|
||||
SQuery* pQuery = pRuntimeEnv->pQuery;
|
||||
return (pQuery->numOfOutput * sizeof(SResultRowCellInfo)) + pQuery->interBufSize + sizeof(SResultRow);
|
||||
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
|
||||
return (pQueryAttr->numOfOutput * sizeof(SResultRowCellInfo)) + pQueryAttr->interBufSize + sizeof(SResultRow);
|
||||
}
|
||||
|
||||
SResultRowPool* initResultRowPool(size_t size) {
|
||||
|
@ -382,10 +382,10 @@ int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo) {
|
|||
}
|
||||
|
||||
static int64_t getNumOfResultWindowRes(SQueryRuntimeEnv* pRuntimeEnv, SResultRow *pResultRow, int32_t* rowCellInfoOffset) {
|
||||
SQuery* pQuery = pRuntimeEnv->pQuery;
|
||||
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
|
||||
|
||||
for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
|
||||
int32_t functionId = pQuery->pExpr1[j].base.functionId;
|
||||
for (int32_t j = 0; j < pQueryAttr->numOfOutput; ++j) {
|
||||
int32_t functionId = pQueryAttr->pExpr1[j].base.functionId;
|
||||
|
||||
/*
|
||||
* ts, tag, tagprj function can not decide the output number of current query
|
||||
|
@ -448,7 +448,7 @@ static int32_t tableResultComparFn(const void *pLeft, const void *pRight, void *
|
|||
|
||||
static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupResInfo* pGroupResInfo, SArray *pTableList,
|
||||
int32_t* rowCellInfoOffset) {
|
||||
bool ascQuery = QUERY_IS_ASC_QUERY(pRuntimeEnv->pQuery);
|
||||
bool ascQuery = QUERY_IS_ASC_QUERY(pRuntimeEnv->pQueryAttr);
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
|
@ -484,7 +484,7 @@ static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupRes
|
|||
goto _end;
|
||||
}
|
||||
|
||||
SCompSupporter cs = {pTableQueryInfoList, posList, pRuntimeEnv->pQuery->order.order};
|
||||
SCompSupporter cs = {pTableQueryInfoList, posList, pRuntimeEnv->pQueryAttr->order.order};
|
||||
|
||||
int32_t ret = tLoserTreeCreate(&pTree, numOfTables, &cs, tableResultComparFn);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
|
@ -537,7 +537,7 @@ static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupRes
|
|||
|
||||
int64_t endt = taosGetTimestampMs();
|
||||
|
||||
qDebug("QInfo:%"PRIu64" result merge completed for group:%d, elapsed time:%" PRId64 " ms", GET_QID(pRuntimeEnv),
|
||||
qDebug("QInfo:%"PRIx64" result merge completed for group:%d, elapsed time:%" PRId64 " ms", GET_QID(pRuntimeEnv),
|
||||
pGroupResInfo->currentGroup, endt - startt);
|
||||
|
||||
_end:
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
*/
|
||||
|
||||
#include "os.h"
|
||||
#include "qFill.h"
|
||||
#include "taosmsg.h"
|
||||
#include "tcache.h"
|
||||
#include "tglobal.h"
|
||||
|
@ -23,13 +22,11 @@
|
|||
#include "hash.h"
|
||||
#include "texpr.h"
|
||||
#include "qExecutor.h"
|
||||
#include "qResultbuf.h"
|
||||
#include "qUtil.h"
|
||||
#include "query.h"
|
||||
#include "queryLog.h"
|
||||
#include "tlosertree.h"
|
||||
#include "ttype.h"
|
||||
#include "tcompare.h"
|
||||
|
||||
typedef struct SQueryMgmt {
|
||||
pthread_mutex_t lock;
|
||||
|
@ -58,10 +55,13 @@ void freeParam(SQueryParam *param) {
|
|||
tfree(param->tagCond);
|
||||
tfree(param->tbnameCond);
|
||||
tfree(param->pTableIdList);
|
||||
tfree(param->pExprMsg);
|
||||
tfree(param->pSecExprMsg);
|
||||
taosArrayDestroy(param->pOperator);
|
||||
tfree(param->pExprs);
|
||||
tfree(param->pSecExprs);
|
||||
|
||||
tfree(param->pExpr);
|
||||
tfree(param->pSecExpr);
|
||||
|
||||
tfree(param->pGroupColIndex);
|
||||
tfree(param->pTagColumnInfo);
|
||||
tfree(param->pGroupbyExpr);
|
||||
|
@ -91,12 +91,14 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi
|
|||
goto _over;
|
||||
}
|
||||
|
||||
if ((code = createQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->numOfOutput, ¶m.pExprs, param.pExprMsg, param.pTagColumnInfo)) != TSDB_CODE_SUCCESS) {
|
||||
SQueriedTableInfo info = { .numOfTags = pQueryMsg->numOfTags, .numOfCols = pQueryMsg->numOfCols, .colList = pQueryMsg->tableCols};
|
||||
if ((code = createQueryFunc(&info, pQueryMsg->numOfOutput, ¶m.pExprs, param.pExpr, param.pTagColumnInfo,
|
||||
pQueryMsg->queryType, pQueryMsg)) != TSDB_CODE_SUCCESS) {
|
||||
goto _over;
|
||||
}
|
||||
|
||||
if (param.pSecExprMsg != NULL) {
|
||||
if ((code = createIndirectQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->secondStageOutput, ¶m.pSecExprs, param.pSecExprMsg, param.pExprs)) != TSDB_CODE_SUCCESS) {
|
||||
if (param.pSecExpr != NULL) {
|
||||
if ((code = createIndirectQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->secondStageOutput, ¶m.pSecExprs, param.pSecExpr, param.pExprs)) != TSDB_CODE_SUCCESS) {
|
||||
goto _over;
|
||||
}
|
||||
}
|
||||
|
@ -158,7 +160,9 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi
|
|||
goto _over;
|
||||
}
|
||||
|
||||
(*pQInfo) = createQInfoImpl(pQueryMsg, param.pGroupbyExpr, param.pExprs, param.pSecExprs, &tableGroupInfo, param.pTagColumnInfo, isSTableQuery, param.sql, qId);
|
||||
assert(pQueryMsg->stableQuery == isSTableQuery);
|
||||
(*pQInfo) = createQInfoImpl(pQueryMsg, param.pGroupbyExpr, param.pExprs, param.pSecExprs, &tableGroupInfo,
|
||||
param.pTagColumnInfo, vgId, param.sql, qId);
|
||||
|
||||
param.sql = NULL;
|
||||
param.pExprs = NULL;
|
||||
|
@ -171,7 +175,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi
|
|||
goto _over;
|
||||
}
|
||||
|
||||
code = initQInfo(pQueryMsg, tsdb, vgId, *pQInfo, ¶m, isSTableQuery);
|
||||
code = initQInfo(&pQueryMsg->tsBuf, tsdb, NULL, *pQInfo, ¶m, (char*)pQueryMsg, pQueryMsg->prevResultLen, NULL);
|
||||
|
||||
_over:
|
||||
if (param.pGroupbyExpr != NULL) {
|
||||
|
@ -184,8 +188,8 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi
|
|||
freeParam(¶m);
|
||||
|
||||
for (int32_t i = 0; i < pQueryMsg->numOfCols; i++) {
|
||||
SColumnInfo* column = pQueryMsg->colList + i;
|
||||
freeColumnFilterInfo(column->filters, column->numOfFilters);
|
||||
SColumnInfo* column = pQueryMsg->tableCols + i;
|
||||
freeColumnFilterInfo(column->flist.filterInfo, column->flist.numOfFilters);
|
||||
}
|
||||
|
||||
//pQInfo already freed in initQInfo, but *pQInfo may not pointer to null;
|
||||
|
@ -205,23 +209,22 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) {
|
|||
|
||||
int64_t curOwner = 0;
|
||||
if ((curOwner = atomic_val_compare_exchange_64(&pQInfo->owner, 0, threadId)) != 0) {
|
||||
qError("QInfo:%"PRIu64"-%p qhandle is now executed by thread:%p", pQInfo->qId, pQInfo, (void*) curOwner);
|
||||
qError("QInfo:0x%"PRIx64"-%p qhandle is now executed by thread:%p", pQInfo->qId, pQInfo, (void*) curOwner);
|
||||
pQInfo->code = TSDB_CODE_QRY_IN_EXEC;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
*qId = pQInfo->qId;
|
||||
pQInfo->startExecTs = taosGetTimestampSec();
|
||||
|
||||
if (isQueryKilled(pQInfo)) {
|
||||
qDebug("QInfo:%"PRIu64" it is already killed, abort", pQInfo->qId);
|
||||
qDebug("QInfo:0x%"PRIx64" it is already killed, abort", pQInfo->qId);
|
||||
return doBuildResCheck(pQInfo);
|
||||
}
|
||||
|
||||
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
if (pRuntimeEnv->tableqinfoGroupInfo.numOfTables == 0) {
|
||||
qDebug("QInfo:%"PRIu64" no table exists for query, abort", pQInfo->qId);
|
||||
qDebug("QInfo:0x%"PRIx64" no table exists for query, abort", pQInfo->qId);
|
||||
setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
|
||||
return doBuildResCheck(pQInfo);
|
||||
}
|
||||
|
@ -230,21 +233,22 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) {
|
|||
int32_t ret = setjmp(pQInfo->runtimeEnv.env);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
pQInfo->code = ret;
|
||||
qDebug("QInfo:%"PRIu64" query abort due to error/cancel occurs, code:%s", pQInfo->qId, tstrerror(pQInfo->code));
|
||||
qDebug("QInfo:0x%"PRIx64" query abort due to error/cancel occurs, code:%s", pQInfo->qId, tstrerror(pQInfo->code));
|
||||
return doBuildResCheck(pQInfo);
|
||||
}
|
||||
|
||||
qDebug("QInfo:%"PRIu64" query task is launched", pQInfo->qId);
|
||||
qDebug("QInfo:0x%"PRIx64" query task is launched", pQInfo->qId);
|
||||
|
||||
pRuntimeEnv->outputBuf = pRuntimeEnv->proot->exec(pRuntimeEnv->proot);
|
||||
bool newgroup = false;
|
||||
pRuntimeEnv->outputBuf = pRuntimeEnv->proot->exec(pRuntimeEnv->proot, &newgroup);
|
||||
|
||||
if (isQueryKilled(pQInfo)) {
|
||||
qDebug("QInfo:%"PRIu64" query is killed", pQInfo->qId);
|
||||
qDebug("QInfo:0x%"PRIx64" query is killed", pQInfo->qId);
|
||||
} else if (GET_NUM_OF_RESULTS(pRuntimeEnv) == 0) {
|
||||
qDebug("QInfo:%"PRIu64" over, %u tables queried, %"PRId64" rows are returned", pQInfo->qId, pRuntimeEnv->tableqinfoGroupInfo.numOfTables,
|
||||
qDebug("QInfo:0x%"PRIx64" over, %u tables queried, %"PRId64" rows are returned", pQInfo->qId, pRuntimeEnv->tableqinfoGroupInfo.numOfTables,
|
||||
pRuntimeEnv->resultInfo.total);
|
||||
} else {
|
||||
qDebug("QInfo:%"PRIu64" query paused, %d rows returned, numOfTotal:%" PRId64 " rows",
|
||||
qDebug("QInfo:0x%"PRIx64" query paused, %d rows returned, numOfTotal:%" PRId64 " rows",
|
||||
pQInfo->qId, GET_NUM_OF_RESULTS(pRuntimeEnv), pRuntimeEnv->resultInfo.total + GET_NUM_OF_RESULTS(pRuntimeEnv));
|
||||
}
|
||||
|
||||
|
@ -255,13 +259,13 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex
|
|||
SQInfo *pQInfo = (SQInfo *)qinfo;
|
||||
|
||||
if (pQInfo == NULL || !isValidQInfo(pQInfo)) {
|
||||
qError("QInfo:%"PRIu64" invalid qhandle", pQInfo->qId);
|
||||
qError("QInfo:0x%"PRIx64" invalid qhandle", pQInfo->qId);
|
||||
return TSDB_CODE_QRY_INVALID_QHANDLE;
|
||||
}
|
||||
|
||||
*buildRes = false;
|
||||
if (IS_QUERY_KILLED(pQInfo)) {
|
||||
qDebug("QInfo:%"PRIu64" query is killed, code:0x%08x", pQInfo->qId, pQInfo->code);
|
||||
qDebug("QInfo:0x%"PRIx64" query is killed, code:0x%08x", pQInfo->qId, pQInfo->code);
|
||||
return pQInfo->code;
|
||||
}
|
||||
|
||||
|
@ -274,18 +278,18 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex
|
|||
code = pQInfo->code;
|
||||
} else {
|
||||
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
|
||||
SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr;
|
||||
|
||||
pthread_mutex_lock(&pQInfo->lock);
|
||||
|
||||
assert(pQInfo->rspContext == NULL);
|
||||
if (pQInfo->dataReady == QUERY_RESULT_READY) {
|
||||
*buildRes = true;
|
||||
qDebug("QInfo:%"PRIu64" retrieve result info, rowsize:%d, rows:%d, code:%s", pQInfo->qId, pQuery->resultRowSize,
|
||||
qDebug("QInfo:0x%"PRIx64" retrieve result info, rowsize:%d, rows:%d, code:%s", pQInfo->qId, pQueryAttr->resultRowSize,
|
||||
GET_NUM_OF_RESULTS(pRuntimeEnv), tstrerror(pQInfo->code));
|
||||
} else {
|
||||
*buildRes = false;
|
||||
qDebug("QInfo:%"PRIu64" retrieve req set query return result after paused", pQInfo->qId);
|
||||
qDebug("QInfo:0x%"PRIx64" retrieve req set query return result after paused", pQInfo->qId);
|
||||
pQInfo->rspContext = pRspContext;
|
||||
assert(pQInfo->rspContext != NULL);
|
||||
}
|
||||
|
@ -304,11 +308,11 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
|
|||
return TSDB_CODE_QRY_INVALID_QHANDLE;
|
||||
}
|
||||
|
||||
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
|
||||
SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr;
|
||||
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
|
||||
int32_t s = GET_NUM_OF_RESULTS(pRuntimeEnv);
|
||||
size_t size = pQuery->resultRowSize * s;
|
||||
size_t size = pQueryAttr->resultRowSize * s;
|
||||
size += sizeof(int32_t);
|
||||
size += sizeof(STableIdInfo) * taosHashGetSize(pRuntimeEnv->pTableRetrieveTsMap);
|
||||
|
||||
|
@ -330,7 +334,7 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
|
|||
(*pRsp)->useconds = htobe64(pQInfo->summary.elapsedTime);
|
||||
}
|
||||
|
||||
(*pRsp)->precision = htons(pQuery->precision);
|
||||
(*pRsp)->precision = htons(pQueryAttr->precision);
|
||||
if (GET_NUM_OF_RESULTS(&(pQInfo->runtimeEnv)) > 0 && pQInfo->code == TSDB_CODE_SUCCESS) {
|
||||
doDumpQueryResult(pQInfo, (*pRsp)->data);
|
||||
} else {
|
||||
|
@ -344,10 +348,10 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
|
|||
// here current thread hold the refcount, so it is safe to free tsdbQueryHandle.
|
||||
*continueExec = false;
|
||||
(*pRsp)->completed = 1; // notify no more result to client
|
||||
qDebug("QInfo:%"PRIu64" no more results to retrieve", pQInfo->qId);
|
||||
qDebug("QInfo:0x%"PRIx64" no more results to retrieve", pQInfo->qId);
|
||||
} else {
|
||||
*continueExec = true;
|
||||
qDebug("QInfo:%"PRIu64" has more results to retrieve", pQInfo->qId);
|
||||
qDebug("QInfo:0x%"PRIx64" has more results to retrieve", pQInfo->qId);
|
||||
}
|
||||
|
||||
// the memory should be freed if the code of pQInfo is not TSDB_CODE_SUCCESS
|
||||
|
@ -373,7 +377,7 @@ int32_t qKillQuery(qinfo_t qinfo) {
|
|||
return TSDB_CODE_QRY_INVALID_QHANDLE;
|
||||
}
|
||||
|
||||
qDebug("QInfo:%"PRIu64" query killed", pQInfo->qId);
|
||||
qDebug("QInfo:0x%"PRIx64" query killed", pQInfo->qId);
|
||||
setQueryKilled(pQInfo);
|
||||
|
||||
// Wait for the query executing thread being stopped/
|
||||
|
@ -401,7 +405,7 @@ void qDestroyQueryInfo(qinfo_t qHandle) {
|
|||
return;
|
||||
}
|
||||
|
||||
qDebug("QInfo:%"PRIu64" query completed", pQInfo->qId);
|
||||
qDebug("QInfo:0x%"PRIx64" query completed", pQInfo->qId);
|
||||
queryCostStatis(pQInfo); // print the query cost summary
|
||||
freeQInfo(pQInfo);
|
||||
}
|
||||
|
@ -484,7 +488,7 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qId, void *qInfo) {
|
|||
|
||||
SQueryMgmt *pQueryMgmt = pMgmt;
|
||||
if (pQueryMgmt->qinfoPool == NULL) {
|
||||
qError("QInfo:%"PRIu64"-%p failed to add qhandle into qMgmt, since qMgmt is closed", qId, (void*)qInfo);
|
||||
qError("QInfo:0x%"PRIx64"-%p failed to add qhandle into qMgmt, since qMgmt is closed", qId, (void*)qInfo);
|
||||
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
|
||||
return NULL;
|
||||
}
|
||||
|
@ -492,7 +496,7 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qId, void *qInfo) {
|
|||
pthread_mutex_lock(&pQueryMgmt->lock);
|
||||
if (pQueryMgmt->closed) {
|
||||
pthread_mutex_unlock(&pQueryMgmt->lock);
|
||||
qError("QInfo:%"PRIu64"-%p failed to add qhandle into cache, since qMgmt is colsing", qId, (void*)qInfo);
|
||||
qError("QInfo:0x%"PRIx64"-%p failed to add qhandle into cache, since qMgmt is colsing", qId, (void*)qInfo);
|
||||
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
|
||||
return NULL;
|
||||
} else {
|
||||
|
|
2431
src/query/src/sql.c
2431
src/query/src/sql.c
File diff suppressed because it is too large
Load Diff
|
@ -295,7 +295,7 @@ void *rpcOpen(const SRpcInit *pInit) {
|
|||
return NULL;
|
||||
}
|
||||
} else {
|
||||
pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime*30);
|
||||
pRpc->pCache = rpcOpenConnCache(pRpc->sessions, rpcCloseConn, pRpc->tmrCtrl, pRpc->idleTime * 30);
|
||||
if ( pRpc->pCache == NULL ) {
|
||||
tError("%s failed to init connection cache", pRpc->label);
|
||||
rpcClose(pRpc);
|
||||
|
@ -470,7 +470,7 @@ void rpcSendResponse(const SRpcMsg *pRsp) {
|
|||
taosTmrStopA(&pConn->pTimer);
|
||||
|
||||
// set the idle timer to monitor the activity
|
||||
taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime*30, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer);
|
||||
taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime * 30, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer);
|
||||
rpcSendMsgToPeer(pConn, msg, msgLen);
|
||||
|
||||
// if not set to secured, set it expcet NOT_READY case, since client wont treat it as secured
|
||||
|
@ -997,8 +997,8 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv, SRpcReqCont
|
|||
}
|
||||
|
||||
if ( rpcIsReq(pHead->msgType) ) {
|
||||
terrno = rpcProcessReqHead(pConn, pHead);
|
||||
pConn->connType = pRecv->connType;
|
||||
terrno = rpcProcessReqHead(pConn, pHead);
|
||||
|
||||
// stop idle timer
|
||||
taosTmrStopA(&pConn->pIdleTimer);
|
||||
|
|
|
@ -123,7 +123,7 @@ typedef struct STsdbQueryHandle {
|
|||
SMemRef *pMemRef;
|
||||
SArray *defaultLoadColumn;// default load column
|
||||
SDataBlockLoadInfo dataBlockLoadInfo; /* record current block load information */
|
||||
SLoadCompBlockInfo compBlockLoadInfo; /* record current compblock information in SQuery */
|
||||
SLoadCompBlockInfo compBlockLoadInfo; /* record current compblock information in SQueryAttr */
|
||||
|
||||
SArray *prev; // previous row which is before than time window
|
||||
SArray *next; // next row which is after the query time window
|
||||
|
@ -286,7 +286,7 @@ static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STa
|
|||
}
|
||||
|
||||
taosArrayPush(pTableCheckInfo, &info);
|
||||
tsdbDebug("%p check table uid:%"PRId64", tid:%d from lastKey:%"PRId64" %"PRIu64, pQueryHandle, info.tableId.uid,
|
||||
tsdbDebug("%p check table uid:%"PRId64", tid:%d from lastKey:%"PRId64" 0x%"PRIx64, pQueryHandle, info.tableId.uid,
|
||||
info.tableId.tid, info.lastKey, pQueryHandle->qId);
|
||||
}
|
||||
}
|
||||
|
@ -440,7 +440,7 @@ TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STable
|
|||
|
||||
tsdbMayTakeMemSnapshot(pQueryHandle, psTable);
|
||||
|
||||
tsdbDebug("%p total numOfTable:%" PRIzu " in query, %"PRIu64, pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo), pQueryHandle->qId);
|
||||
tsdbDebug("%p total numOfTable:%" PRIzu " in query, 0x%"PRIx64, pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo), pQueryHandle->qId);
|
||||
return (TsdbQueryHandleT) pQueryHandle;
|
||||
}
|
||||
|
||||
|
@ -651,7 +651,7 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
|
|||
SDataRow row = (SDataRow)SL_GET_NODE_DATA(node);
|
||||
TSKEY key = dataRowKey(row); // first timestamp in buffer
|
||||
tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
|
||||
"-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", %"PRIu64,
|
||||
"-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", 0x%"PRIx64,
|
||||
pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pMem->keyFirst, pMem->keyLast,
|
||||
pCheckInfo->lastKey, pMem->numOfRows, pHandle->qId);
|
||||
|
||||
|
@ -662,7 +662,7 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
|
|||
}
|
||||
|
||||
} else {
|
||||
tsdbDebug("%p uid:%"PRId64", tid:%d no data in mem, %"PRIu64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid,
|
||||
tsdbDebug("%p uid:%"PRId64", tid:%d no data in mem, 0x%"PRIx64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid,
|
||||
pHandle->qId);
|
||||
}
|
||||
|
||||
|
@ -673,7 +673,7 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
|
|||
SDataRow row = (SDataRow)SL_GET_NODE_DATA(node);
|
||||
TSKEY key = dataRowKey(row); // first timestamp in buffer
|
||||
tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
|
||||
"-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", %"PRIu64,
|
||||
"-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", 0x%"PRIx64,
|
||||
pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pIMem->keyFirst, pIMem->keyLast,
|
||||
pCheckInfo->lastKey, pIMem->numOfRows, pHandle->qId);
|
||||
|
||||
|
@ -683,7 +683,7 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
|
|||
assert(pCheckInfo->lastKey >= key);
|
||||
}
|
||||
} else {
|
||||
tsdbDebug("%p uid:%"PRId64", tid:%d no data in imem, %"PRIu64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid,
|
||||
tsdbDebug("%p uid:%"PRId64", tid:%d no data in imem, 0x%"PRIx64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid,
|
||||
pHandle->qId);
|
||||
}
|
||||
|
||||
|
@ -811,7 +811,7 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
|
|||
}
|
||||
|
||||
pCheckInfo->lastKey = dataRowKey(row); // first timestamp in buffer
|
||||
tsdbDebug("%p uid:%" PRId64", tid:%d check data in buffer from skey:%" PRId64 ", order:%d, %"PRIu64, pHandle,
|
||||
tsdbDebug("%p uid:%" PRId64", tid:%d check data in buffer from skey:%" PRId64 ", order:%d, 0x%"PRIx64, pHandle,
|
||||
pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pCheckInfo->lastKey, pHandle->order, pHandle->qId);
|
||||
|
||||
// all data in mem are checked already.
|
||||
|
@ -986,21 +986,21 @@ static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBloc
|
|||
STSchema *pSchema = tsdbGetTableSchema(pCheckInfo->pTableObj);
|
||||
int32_t code = tdInitDataCols(pQueryHandle->pDataCols, pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("%p failed to malloc buf for pDataCols, %"PRIu64, pQueryHandle, pQueryHandle->qId);
|
||||
tsdbError("%p failed to malloc buf for pDataCols, 0x%"PRIx64, pQueryHandle, pQueryHandle->qId);
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
code = tdInitDataCols(pQueryHandle->rhelper.pDCols[0], pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("%p failed to malloc buf for rhelper.pDataCols[0], %"PRIu64, pQueryHandle, pQueryHandle->qId);
|
||||
tsdbError("%p failed to malloc buf for rhelper.pDataCols[0], 0x%"PRIx64, pQueryHandle, pQueryHandle->qId);
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
code = tdInitDataCols(pQueryHandle->rhelper.pDCols[1], pSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("%p failed to malloc buf for rhelper.pDataCols[1], %"PRIu64, pQueryHandle, pQueryHandle->qId);
|
||||
tsdbError("%p failed to malloc buf for rhelper.pDataCols[1], 0x%"PRIx64, pQueryHandle, pQueryHandle->qId);
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
goto _error;
|
||||
}
|
||||
|
@ -1036,14 +1036,14 @@ static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBloc
|
|||
int64_t elapsedTime = (taosGetTimestampUs() - st);
|
||||
pQueryHandle->cost.blockLoadTime += elapsedTime;
|
||||
|
||||
tsdbDebug("%p load file block into buffer, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, elapsed time:%"PRId64 " us, %"PRIu64,
|
||||
tsdbDebug("%p load file block into buffer, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, elapsed time:%"PRId64 " us, 0x%"PRIx64,
|
||||
pQueryHandle, slotIndex, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfRows, elapsedTime, pQueryHandle->qId);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
_error:
|
||||
pBlock->numOfRows = 0;
|
||||
|
||||
tsdbError("%p error occurs in loading file block, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, %"PRIu64,
|
||||
tsdbError("%p error occurs in loading file block, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, 0x%"PRIx64,
|
||||
pQueryHandle, slotIndex, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfRows, pQueryHandle->qId);
|
||||
return terrno;
|
||||
}
|
||||
|
@ -1066,7 +1066,7 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* p
|
|||
assert(cur->pos >= 0 && cur->pos <= binfo.rows);
|
||||
|
||||
TSKEY key = (row != NULL)? dataRowKey(row):TSKEY_INITIAL_VAL;
|
||||
tsdbDebug("%p key in mem:%"PRId64", %"PRIu64, pQueryHandle, key, pQueryHandle->qId);
|
||||
tsdbDebug("%p key in mem:%"PRId64", 0x%"PRIx64, pQueryHandle, key, pQueryHandle->qId);
|
||||
|
||||
if ((ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) ||
|
||||
(!ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) {
|
||||
|
@ -1406,7 +1406,11 @@ static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
|
|||
SET_DOUBLE_PTR(pData, value);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
*(TSKEY *)pData = tdGetKey(*(TKEY *)value);
|
||||
if (pColInfo->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
||||
*(TSKEY *)pData = tdGetKey(*(TKEY *)value);
|
||||
} else {
|
||||
*(TSKEY *)pData = *(TSKEY *)value;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
memcpy(pData, value, pColInfo->info.bytes);
|
||||
|
@ -1547,7 +1551,7 @@ static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STabl
|
|||
updateInfoAfterMerge(pQueryHandle, pCheckInfo, numOfRows, pos);
|
||||
doCheckGeneratedBlockRange(pQueryHandle);
|
||||
|
||||
tsdbDebug("%p uid:%" PRIu64",tid:%d data block created, mixblock:%d, brange:%"PRIu64"-%"PRIu64" rows:%d, %"PRIu64,
|
||||
tsdbDebug("%p uid:%" PRIu64",tid:%d data block created, mixblock:%d, brange:%"PRIu64"-%"PRIu64" rows:%d, 0x%"PRIx64,
|
||||
pQueryHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, cur->mixBlock, cur->win.skey,
|
||||
cur->win.ekey, cur->rows, pQueryHandle->qId);
|
||||
}
|
||||
|
@ -1601,7 +1605,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
|
|||
int32_t endPos = getEndPosInDataBlock(pQueryHandle, &blockInfo);
|
||||
|
||||
tsdbDebug("%p uid:%" PRIu64",tid:%d start merge data block, file block range:%"PRIu64"-%"PRIu64" rows:%d, start:%d,"
|
||||
"end:%d, %"PRIu64,
|
||||
"end:%d, 0x%"PRIx64,
|
||||
pQueryHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, blockInfo.window.skey, blockInfo.window.ekey,
|
||||
blockInfo.rows, cur->pos, endPos, pQueryHandle->qId);
|
||||
|
||||
|
@ -1743,7 +1747,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
|
|||
updateInfoAfterMerge(pQueryHandle, pCheckInfo, numOfRows, pos);
|
||||
doCheckGeneratedBlockRange(pQueryHandle);
|
||||
|
||||
tsdbDebug("%p uid:%" PRIu64",tid:%d data block created, mixblock:%d, brange:%"PRIu64"-%"PRIu64" rows:%d, %"PRIu64,
|
||||
tsdbDebug("%p uid:%" PRIu64",tid:%d data block created, mixblock:%d, brange:%"PRIu64"-%"PRIu64" rows:%d, 0x%"PRIx64,
|
||||
pQueryHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, cur->mixBlock, cur->win.skey,
|
||||
cur->win.ekey, cur->rows, pQueryHandle->qId);
|
||||
}
|
||||
|
@ -1919,12 +1923,12 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
|
|||
memcpy(pQueryHandle->pDataBlockInfo, sup.pDataBlockInfo[0], sizeof(STableBlockInfo) * numOfBlocks);
|
||||
cleanBlockOrderSupporter(&sup, numOfQualTables);
|
||||
|
||||
tsdbDebug("%p create data blocks info struct completed for 1 table, %d blocks not sorted %"PRIu64, pQueryHandle, cnt,
|
||||
tsdbDebug("%p create data blocks info struct completed for 1 table, %d blocks not sorted 0x%"PRIx64, pQueryHandle, cnt,
|
||||
pQueryHandle->qId);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
tsdbDebug("%p create data blocks info struct completed, %d blocks in %d tables %"PRIu64, pQueryHandle, cnt,
|
||||
tsdbDebug("%p create data blocks info struct completed, %d blocks in %d tables 0x%"PRIx64, pQueryHandle, cnt,
|
||||
numOfQualTables, pQueryHandle->qId);
|
||||
|
||||
assert(cnt <= numOfBlocks && numOfQualTables <= numOfTables); // the pTableQueryInfo[j]->numOfBlocks may be 0
|
||||
|
@ -1961,7 +1965,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
|
|||
* }
|
||||
*/
|
||||
|
||||
tsdbDebug("%p %d data blocks sort completed, %"PRIu64, pQueryHandle, cnt, pQueryHandle->qId);
|
||||
tsdbDebug("%p %d data blocks sort completed, 0x%"PRIx64, pQueryHandle, cnt, pQueryHandle->qId);
|
||||
cleanBlockOrderSupporter(&sup, numOfTables);
|
||||
free(pTree);
|
||||
|
||||
|
@ -2019,7 +2023,7 @@ static int32_t getFirstFileDataBlock(STsdbQueryHandle* pQueryHandle, bool* exist
|
|||
if ((ASCENDING_TRAVERSE(pQueryHandle->order) && win.skey > pQueryHandle->window.ekey) ||
|
||||
(!ASCENDING_TRAVERSE(pQueryHandle->order) && win.ekey < pQueryHandle->window.ekey)) {
|
||||
tsdbUnLockFS(REPO_FS(pQueryHandle->pTsdb));
|
||||
tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, %"PRIu64, pQueryHandle,
|
||||
tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, 0x%"PRIx64, pQueryHandle,
|
||||
pQueryHandle->window.skey, pQueryHandle->window.ekey, pQueryHandle->qId);
|
||||
pQueryHandle->pFileGroup = NULL;
|
||||
assert(pQueryHandle->numOfBlocks == 0);
|
||||
|
@ -2043,7 +2047,7 @@ static int32_t getFirstFileDataBlock(STsdbQueryHandle* pQueryHandle, bool* exist
|
|||
break;
|
||||
}
|
||||
|
||||
tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %"PRIu64, pQueryHandle, numOfBlocks, numOfTables,
|
||||
tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, 0x%"PRIx64, pQueryHandle, numOfBlocks, numOfTables,
|
||||
pQueryHandle->pFileGroup->fid, pQueryHandle->qId);
|
||||
|
||||
assert(numOfBlocks >= 0);
|
||||
|
@ -2135,7 +2139,7 @@ int32_t tsdbGetFileBlocksDistInfo(TsdbQueryHandleT* queryHandle, STableBlockDist
|
|||
if ((ASCENDING_TRAVERSE(pQueryHandle->order) && win.skey > pQueryHandle->window.ekey) ||
|
||||
(!ASCENDING_TRAVERSE(pQueryHandle->order) && win.ekey < pQueryHandle->window.ekey)) {
|
||||
tsdbUnLockFS(REPO_FS(pQueryHandle->pTsdb));
|
||||
tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, %"PRIu64, pQueryHandle,
|
||||
tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, 0x%"PRIx64, pQueryHandle,
|
||||
pQueryHandle->window.skey, pQueryHandle->window.ekey, pQueryHandle->qId);
|
||||
pQueryHandle->pFileGroup = NULL;
|
||||
break;
|
||||
|
@ -2159,7 +2163,7 @@ int32_t tsdbGetFileBlocksDistInfo(TsdbQueryHandleT* queryHandle, STableBlockDist
|
|||
break;
|
||||
}
|
||||
|
||||
tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %"PRIu64, pQueryHandle, numOfBlocks, numOfTables,
|
||||
tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, 0x%"PRIx64, pQueryHandle, numOfBlocks, numOfTables,
|
||||
pQueryHandle->pFileGroup->fid, pQueryHandle->qId);
|
||||
|
||||
if (numOfBlocks == 0) {
|
||||
|
@ -2207,7 +2211,7 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists
|
|||
if ((!cur->mixBlock) || cur->blockCompleted) {
|
||||
// all data blocks in current file has been checked already, try next file if exists
|
||||
} else {
|
||||
tsdbDebug("%p continue in current data block, index:%d, pos:%d, %"PRIu64, pQueryHandle, cur->slot, cur->pos,
|
||||
tsdbDebug("%p continue in current data block, index:%d, pos:%d, 0x%"PRIx64, pQueryHandle, cur->slot, cur->pos,
|
||||
pQueryHandle->qId);
|
||||
int32_t code = handleDataMergeIfNeeded(pQueryHandle, pBlockInfo->compBlock, pCheckInfo);
|
||||
*exists = (pQueryHandle->realNumOfRows > 0);
|
||||
|
@ -2336,7 +2340,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int
|
|||
}
|
||||
|
||||
int64_t elapsedTime = taosGetTimestampUs() - st;
|
||||
tsdbDebug("%p build data block from cache completed, elapsed time:%"PRId64" us, numOfRows:%d, numOfCols:%d, %"PRIu64, pQueryHandle,
|
||||
tsdbDebug("%p build data block from cache completed, elapsed time:%"PRId64" us, numOfRows:%d, numOfCols:%d, 0x%"PRIx64, pQueryHandle,
|
||||
elapsedTime, numOfRows, numOfCols, pQueryHandle->qId);
|
||||
|
||||
return numOfRows;
|
||||
|
@ -3206,9 +3210,10 @@ int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, cons
|
|||
pGroupInfo->numOfTables = (uint32_t) taosArrayGetSize(res);
|
||||
pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey);
|
||||
|
||||
tsdbDebug("%p no table name/tag condition, all tables belong to one group, numOfTables:%u", tsdb, pGroupInfo->numOfTables);
|
||||
taosArrayDestroy(res);
|
||||
tsdbDebug("%p no table name/tag condition, all tables qualified, numOfTables:%u, group:%zu", tsdb,
|
||||
pGroupInfo->numOfTables, taosArrayGetSize(pGroupInfo->pGroupList));
|
||||
|
||||
taosArrayDestroy(res);
|
||||
if (tsdbUnlockRepoMeta(tsdb) < 0) goto _error;
|
||||
return ret;
|
||||
}
|
||||
|
@ -3391,7 +3396,7 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
|
|||
pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next);
|
||||
|
||||
SIOCostSummary* pCost = &pQueryHandle->cost;
|
||||
tsdbDebug("%p :io-cost summary: statis-info:%"PRId64" us, datablock:%" PRId64" us, check data:%"PRId64" us, %"PRIu64,
|
||||
tsdbDebug("%p :io-cost summary: statis-info:%"PRId64" us, datablock:%" PRId64" us, check data:%"PRId64" us, 0x%"PRIx64,
|
||||
pQueryHandle, pCost->statisInfoLoadTime, pCost->blockLoadTime, pCost->checkForNextTime, pQueryHandle->qId);
|
||||
|
||||
tfree(pQueryHandle);
|
||||
|
@ -3408,14 +3413,16 @@ void tsdbDestroyTableGroup(STableGroupInfo *pGroupList) {
|
|||
size_t numOfTables = taosArrayGetSize(p);
|
||||
for(int32_t j = 0; j < numOfTables; ++j) {
|
||||
STable* pTable = taosArrayGetP(p, j);
|
||||
assert(pTable != NULL);
|
||||
|
||||
tsdbUnRefTable(pTable);
|
||||
if (pTable != NULL) { // in case of handling retrieve data from tsdb
|
||||
tsdbUnRefTable(pTable);
|
||||
}
|
||||
//assert(pTable != NULL);
|
||||
}
|
||||
|
||||
taosArrayDestroy(p);
|
||||
}
|
||||
|
||||
taosHashCleanup(pGroupList->map);
|
||||
taosArrayDestroy(pGroupList->pGroupList);
|
||||
pGroupList->numOfTables = 0;
|
||||
}
|
||||
|
@ -3426,7 +3433,7 @@ static void applyFilterToSkipListNode(SSkipList *pSkipList, tExprNode *pExpr, SA
|
|||
// Scan each node in the skiplist by using iterator
|
||||
while (tSkipListIterNext(iter)) {
|
||||
SSkipListNode *pNode = tSkipListIterGet(iter);
|
||||
if (exprTreeApplayFilter(pExpr, pNode, param)) {
|
||||
if (exprTreeApplyFilter(pExpr, pNode, param)) {
|
||||
taosArrayPush(pResult, &(SL_GET_NODE_DATA(pNode)));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -398,6 +398,10 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) {
|
|||
case TSDB_DATA_TYPE_SMALLINT: DEFAULT_COMP(GET_INT16_VAL(f1), GET_INT16_VAL(f2));
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
case TSDB_DATA_TYPE_BOOL: DEFAULT_COMP(GET_INT8_VAL(f1), GET_INT8_VAL(f2));
|
||||
case TSDB_DATA_TYPE_UTINYINT: DEFAULT_COMP(GET_UINT8_VAL(f1), GET_UINT8_VAL(f2));
|
||||
case TSDB_DATA_TYPE_USMALLINT: DEFAULT_COMP(GET_UINT16_VAL(f1), GET_UINT16_VAL(f2));
|
||||
case TSDB_DATA_TYPE_UINT: DEFAULT_COMP(GET_UINT32_VAL(f1), GET_UINT32_VAL(f2));
|
||||
case TSDB_DATA_TYPE_UBIGINT: DEFAULT_COMP(GET_UINT64_VAL(f1), GET_UINT64_VAL(f2));
|
||||
case TSDB_DATA_TYPE_NCHAR: {
|
||||
tstr* t1 = (tstr*) f1;
|
||||
tstr* t2 = (tstr*) f2;
|
||||
|
|
|
@ -736,7 +736,7 @@ static uint32_t table[16][256] = {
|
|||
0x9c221d09, 0x6e2e10f7, 0x7dd67004, 0x8fda7dfa}
|
||||
|
||||
};
|
||||
#ifndef _TD_ARM_
|
||||
#if !defined(_TD_ARM_) && !defined(_TD_MIPS_)
|
||||
static uint32_t long_shifts[4][256] = {
|
||||
{0x00000000, 0xe040e0ac, 0xc56db7a9, 0x252d5705, 0x8f3719a3, 0x6f77f90f,
|
||||
0x4a5aae0a, 0xaa1a4ea6, 0x1b8245b7, 0xfbc2a51b, 0xdeeff21e, 0x3eaf12b2,
|
||||
|
@ -1187,7 +1187,7 @@ uint32_t crc32c_sf(uint32_t crci, crc_stream input, size_t length) {
|
|||
}
|
||||
return (uint32_t)crc ^ 0xffffffff;
|
||||
}
|
||||
#ifndef _TD_ARM_
|
||||
#if !defined(_TD_ARM_) && !defined(_TD_MIPS_)
|
||||
/* Apply the zeros operator table to crc. */
|
||||
static uint32_t shift_crc(uint32_t shift_table[][256], uint32_t crc) {
|
||||
return shift_table[0][crc & 0xff] ^ shift_table[1][(crc >> 8) & 0xff] ^
|
||||
|
@ -1198,7 +1198,7 @@ static uint32_t shift_crc(uint32_t shift_table[][256], uint32_t crc) {
|
|||
version. Otherwise, use the software version. */
|
||||
uint32_t (*crc32c)(uint32_t crci, crc_stream bytes, size_t len) = crc32c_sf;
|
||||
|
||||
#ifndef _TD_ARM_
|
||||
#if !defined(_TD_ARM_) && !defined(_TD_MIPS_)
|
||||
/* Compute CRC-32C using the Intel hardware instruction. */
|
||||
uint32_t crc32c_hw(uint32_t crc, crc_stream buf, size_t len) {
|
||||
crc_stream next = buf;
|
||||
|
@ -1353,7 +1353,7 @@ uint32_t crc32c_hw(uint32_t crc, crc_stream buf, size_t len) {
|
|||
#endif // #ifndef _TD_ARM_
|
||||
|
||||
void taosResolveCRC() {
|
||||
#if defined _TD_ARM_ || defined WINDOWS
|
||||
#if defined _TD_ARM_ || defined _TD_MIPS_ || defined WINDOWS
|
||||
crc32c = crc32c_sf;
|
||||
#else
|
||||
int sse42;
|
||||
|
|
|
@ -416,7 +416,8 @@ void taosPrintLog(const char *flags, int32_t dflag, const char *format, ...) {
|
|||
}
|
||||
}
|
||||
|
||||
if (dflag & DEBUG_SCREEN) taosWrite(1, buffer, (uint32_t)len);
|
||||
if (dflag & DEBUG_SCREEN)
|
||||
taosWrite(1, buffer, (uint32_t)len);
|
||||
if (dflag == 255) nInfo(buffer, len);
|
||||
}
|
||||
|
||||
|
|
|
@ -291,16 +291,16 @@ static void taosNetCheckPort(uint32_t hostIp, int32_t startPort, int32_t endPort
|
|||
info.port = port;
|
||||
ret = taosNetCheckTcpPort(&info);
|
||||
if (ret != 0) {
|
||||
uError("failed to test TCP port:%d", port);
|
||||
printf("failed to test TCP port:%d\n", port);
|
||||
} else {
|
||||
uInfo("successed to test TCP port:%d", port);
|
||||
printf("successed to test TCP port:%d\n", port);
|
||||
}
|
||||
|
||||
ret = taosNetCheckUdpPort(&info);
|
||||
if (ret != 0) {
|
||||
uError("failed to test UDP port:%d", port);
|
||||
printf("failed to test UDP port:%d\n", port);
|
||||
} else {
|
||||
uInfo("successed to test UDP port:%d", port);
|
||||
printf("successed to test UDP port:%d\n", port);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -464,9 +464,9 @@ static void taosNetTestRpc(char *host, int32_t startPort, int32_t pkgLen) {
|
|||
|
||||
int32_t ret = taosNetCheckRpc(host, port, sendpkgLen, spi, NULL);
|
||||
if (ret < 0) {
|
||||
uError("failed to test TCP port:%d", port);
|
||||
printf("failed to test TCP port:%d\n", port);
|
||||
} else {
|
||||
uInfo("successed to test TCP port:%d", port);
|
||||
printf("successed to test TCP port:%d\n", port);
|
||||
}
|
||||
|
||||
if (pkgLen >= tsRpcMaxUdpSize) {
|
||||
|
@ -477,9 +477,9 @@ static void taosNetTestRpc(char *host, int32_t startPort, int32_t pkgLen) {
|
|||
|
||||
ret = taosNetCheckRpc(host, port, pkgLen, spi, NULL);
|
||||
if (ret < 0) {
|
||||
uError("failed to test UDP port:%d", port);
|
||||
printf("failed to test UDP port:%d\n", port);
|
||||
} else {
|
||||
uInfo("successed to test UDP port:%d", port);
|
||||
printf("successed to test UDP port:%d\n", port);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -539,7 +539,7 @@ static void taosNetTestServer(char *host, int32_t startPort, int32_t pkgLen) {
|
|||
}
|
||||
|
||||
void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen) {
|
||||
tscEmbedded = 1;
|
||||
// tscEmbedded = 1;
|
||||
if (host == NULL) host = tsLocalFqdn;
|
||||
if (port == 0) port = tsServerPort;
|
||||
if (pkgLen <= 10) pkgLen = 1000;
|
||||
|
@ -559,5 +559,5 @@ void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen) {
|
|||
taosNetTestStartup(host, port);
|
||||
}
|
||||
|
||||
tscEmbedded = 0;
|
||||
// tscEmbedded = 0;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue