Merge branch 'develop' into hotfix/TD-884
This commit is contained in:
commit
599df650ae
|
@ -7,7 +7,6 @@ import (
|
|||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/taosdata/TDengine/importSampleData/import"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"log"
|
||||
|
@ -18,88 +17,89 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
dataimport "github.com/taosdata/TDengine/importSampleData/import"
|
||||
|
||||
_ "github.com/taosdata/driver-go/taosSql"
|
||||
)
|
||||
|
||||
const (
|
||||
TIMESTAMP = "timestamp"
|
||||
DATETIME = "datetime"
|
||||
MILLISECOND = "millisecond"
|
||||
DEFAULT_STARTTIME int64 = -1
|
||||
DEFAULT_INTERVAL int64 = 1*1000
|
||||
DEFAULT_DELAY int64 = -1
|
||||
DEFAULT_STATISTIC_TABLE = "statistic"
|
||||
TIMESTAMP = "timestamp"
|
||||
DATETIME = "datetime"
|
||||
MILLISECOND = "millisecond"
|
||||
DEFAULT_STARTTIME int64 = -1
|
||||
DEFAULT_INTERVAL int64 = 1 * 1000
|
||||
DEFAULT_DELAY int64 = -1
|
||||
DEFAULT_STATISTIC_TABLE = "statistic"
|
||||
|
||||
JSON_FORMAT = "json"
|
||||
CSV_FORMAT = "csv"
|
||||
JSON_FORMAT = "json"
|
||||
CSV_FORMAT = "csv"
|
||||
SUPERTABLE_PREFIX = "s_"
|
||||
SUBTABLE_PREFIX = "t_"
|
||||
SUBTABLE_PREFIX = "t_"
|
||||
|
||||
DRIVER_NAME = "taosSql"
|
||||
DRIVER_NAME = "taosSql"
|
||||
STARTTIME_LAYOUT = "2006-01-02 15:04:05.000"
|
||||
INSERT_PREFIX = "insert into "
|
||||
INSERT_PREFIX = "insert into "
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
cfg string
|
||||
cases string
|
||||
hnum int
|
||||
vnum int
|
||||
thread int
|
||||
batch int
|
||||
auto int
|
||||
starttimestr string
|
||||
interval int64
|
||||
host string
|
||||
port int
|
||||
user string
|
||||
password string
|
||||
dropdb int
|
||||
db string
|
||||
dbparam string
|
||||
cfg string
|
||||
cases string
|
||||
hnum int
|
||||
vnum int
|
||||
thread int
|
||||
batch int
|
||||
auto int
|
||||
starttimestr string
|
||||
interval int64
|
||||
host string
|
||||
port int
|
||||
user string
|
||||
password string
|
||||
dropdb int
|
||||
db string
|
||||
dbparam string
|
||||
|
||||
dataSourceName string
|
||||
startTime int64
|
||||
startTime int64
|
||||
|
||||
superTableConfigMap = make(map[string]*superTableConfig)
|
||||
subTableMap = make(map[string]*dataRows)
|
||||
scaleTableNames []string
|
||||
superTableConfigMap = make(map[string]*superTableConfig)
|
||||
subTableMap = make(map[string]*dataRows)
|
||||
scaleTableNames []string
|
||||
|
||||
scaleTableMap = make(map[string]*scaleTableInfo)
|
||||
|
||||
successRows []int64
|
||||
successRows []int64
|
||||
lastStaticTime time.Time
|
||||
lastTotalRows int64
|
||||
timeTicker *time.Ticker
|
||||
delay int64 // default 10 milliseconds
|
||||
tick int64
|
||||
save int
|
||||
saveTable string
|
||||
lastTotalRows int64
|
||||
timeTicker *time.Ticker
|
||||
delay int64 // default 10 milliseconds
|
||||
tick int64
|
||||
save int
|
||||
saveTable string
|
||||
)
|
||||
|
||||
type superTableConfig struct {
|
||||
startTime int64
|
||||
endTime int64
|
||||
cycleTime int64
|
||||
startTime int64
|
||||
endTime int64
|
||||
cycleTime int64
|
||||
avgInterval int64
|
||||
config dataimport.CaseConfig
|
||||
config dataimport.CaseConfig
|
||||
}
|
||||
|
||||
type scaleTableInfo struct {
|
||||
scaleTableName string
|
||||
subTableName string
|
||||
insertRows int64
|
||||
subTableName string
|
||||
insertRows int64
|
||||
}
|
||||
|
||||
type tableRows struct {
|
||||
tableName string // tableName
|
||||
value string // values(...)
|
||||
tableName string // tableName
|
||||
value string // values(...)
|
||||
}
|
||||
|
||||
type dataRows struct {
|
||||
rows []map[string]interface{}
|
||||
config dataimport.CaseConfig
|
||||
rows []map[string]interface{}
|
||||
config dataimport.CaseConfig
|
||||
}
|
||||
|
||||
func (rows dataRows) Len() int {
|
||||
|
@ -127,10 +127,10 @@ func init() {
|
|||
|
||||
if db == "" {
|
||||
//db = "go"
|
||||
db = fmt.Sprintf("test_%s",time.Now().Format("20060102"))
|
||||
db = fmt.Sprintf("test_%s", time.Now().Format("20060102"))
|
||||
}
|
||||
|
||||
if auto == 1 && len(starttimestr) == 0 {
|
||||
if auto == 1 && len(starttimestr) == 0 {
|
||||
log.Fatalf("startTime must be set when auto is 1, the format is \"yyyy-MM-dd HH:mm:ss.SSS\" ")
|
||||
}
|
||||
|
||||
|
@ -141,7 +141,7 @@ func init() {
|
|||
}
|
||||
|
||||
startTime = t.UnixNano() / 1e6 // as millisecond
|
||||
}else{
|
||||
} else {
|
||||
startTime = DEFAULT_STARTTIME
|
||||
}
|
||||
|
||||
|
@ -179,7 +179,7 @@ func main() {
|
|||
|
||||
_, exists := superTableConfigMap[caseConfig.Stname]
|
||||
if !exists {
|
||||
superTableConfigMap[caseConfig.Stname] = &superTableConfig{config:caseConfig}
|
||||
superTableConfigMap[caseConfig.Stname] = &superTableConfig{config: caseConfig}
|
||||
} else {
|
||||
log.Fatalf("the stname of case %s already exist.\n", caseConfig.Stname)
|
||||
}
|
||||
|
@ -201,9 +201,9 @@ func main() {
|
|||
|
||||
if DEFAULT_DELAY == delay {
|
||||
// default delay
|
||||
delay = caseMinumInterval / 2
|
||||
delay = caseMinumInterval / 2
|
||||
if delay < 1 {
|
||||
delay = 1
|
||||
delay = 1
|
||||
}
|
||||
log.Printf("actual delay is %d ms.", delay)
|
||||
}
|
||||
|
@ -232,7 +232,7 @@ func main() {
|
|||
filePerThread := subTableNum / thread
|
||||
leftFileNum := subTableNum % thread
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var wg sync.WaitGroup
|
||||
|
||||
start = time.Now()
|
||||
|
||||
|
@ -255,31 +255,31 @@ func main() {
|
|||
go staticSpeed()
|
||||
wg.Wait()
|
||||
|
||||
usedTime := time.Since(start)
|
||||
usedTime := time.Since(start)
|
||||
|
||||
total := getTotalRows(successRows)
|
||||
|
||||
log.Printf("finished insert %d rows, used %d ms, speed %d rows/s", total, usedTime/1e6, total * 1e9 / int64(usedTime))
|
||||
log.Printf("finished insert %d rows, used %d ms, speed %d rows/s", total, usedTime/1e6, total*1e3/usedTime.Milliseconds())
|
||||
|
||||
if vnum == 0 {
|
||||
// continue waiting for insert data
|
||||
wait := make(chan string)
|
||||
v := <- wait
|
||||
wait := make(chan string)
|
||||
v := <-wait
|
||||
log.Printf("program receive %s, exited.\n", v)
|
||||
}else{
|
||||
} else {
|
||||
timeTicker.Stop()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func staticSpeed(){
|
||||
func staticSpeed() {
|
||||
|
||||
connection := getConnection()
|
||||
defer connection.Close()
|
||||
|
||||
if save == 1 {
|
||||
connection.Exec("use " + db)
|
||||
_, err := connection.Exec("create table if not exists " + saveTable +"(ts timestamp, speed int)")
|
||||
_, err := connection.Exec("create table if not exists " + saveTable + "(ts timestamp, speed int)")
|
||||
if err != nil {
|
||||
log.Fatalf("create %s Table error: %s\n", saveTable, err)
|
||||
}
|
||||
|
@ -287,13 +287,13 @@ func staticSpeed(){
|
|||
|
||||
for {
|
||||
<-timeTicker.C
|
||||
|
||||
|
||||
currentTime := time.Now()
|
||||
usedTime := currentTime.UnixNano() - lastStaticTime.UnixNano()
|
||||
|
||||
|
||||
total := getTotalRows(successRows)
|
||||
currentSuccessRows := total - lastTotalRows
|
||||
|
||||
|
||||
speed := currentSuccessRows * 1e9 / int64(usedTime)
|
||||
log.Printf("insert %d rows, used %d ms, speed %d rows/s", currentSuccessRows, usedTime/1e6, speed)
|
||||
|
||||
|
@ -301,14 +301,14 @@ func staticSpeed(){
|
|||
insertSql := fmt.Sprintf("insert into %s values(%d, %d)", saveTable, currentTime.UnixNano()/1e6, speed)
|
||||
connection.Exec(insertSql)
|
||||
}
|
||||
|
||||
|
||||
lastStaticTime = currentTime
|
||||
lastTotalRows = total
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func getTotalRows(successRows []int64) int64{
|
||||
func getTotalRows(successRows []int64) int64 {
|
||||
var total int64 = 0
|
||||
for j := 0; j < len(successRows); j++ {
|
||||
total += successRows[j]
|
||||
|
@ -316,18 +316,18 @@ func getTotalRows(successRows []int64) int64{
|
|||
return total
|
||||
}
|
||||
|
||||
func getSuperTableTimeConfig(fileRows dataRows) (start, cycleTime, avgInterval int64){
|
||||
func getSuperTableTimeConfig(fileRows dataRows) (start, cycleTime, avgInterval int64) {
|
||||
if auto == 1 {
|
||||
// use auto generate data time
|
||||
start = startTime
|
||||
avgInterval = interval
|
||||
maxTableRows := normalizationDataWithSameInterval(fileRows, avgInterval)
|
||||
cycleTime = maxTableRows * avgInterval + avgInterval
|
||||
cycleTime = maxTableRows*avgInterval + avgInterval
|
||||
|
||||
} else {
|
||||
|
||||
// use the sample data primary timestamp
|
||||
sort.Sort(fileRows)// sort the file data by the primarykey
|
||||
sort.Sort(fileRows) // sort the file data by the primarykey
|
||||
minTime := getPrimaryKey(fileRows.rows[0][fileRows.config.Timestamp])
|
||||
maxTime := getPrimaryKey(fileRows.rows[len(fileRows.rows)-1][fileRows.config.Timestamp])
|
||||
|
||||
|
@ -340,21 +340,21 @@ func getSuperTableTimeConfig(fileRows dataRows) (start, cycleTime, avgInterval i
|
|||
|
||||
if minTime == maxTime {
|
||||
avgInterval = interval
|
||||
cycleTime = tableNum * avgInterval + avgInterval
|
||||
}else{
|
||||
cycleTime = tableNum*avgInterval + avgInterval
|
||||
} else {
|
||||
avgInterval = (maxTime - minTime) / int64(len(fileRows.rows)) * tableNum
|
||||
cycleTime = maxTime - minTime + avgInterval
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createStatisticTable(){
|
||||
func createStatisticTable() {
|
||||
connection := getConnection()
|
||||
defer connection.Close()
|
||||
|
||||
_, err := connection.Exec("create table if not exist " + db + "."+ saveTable +"(ts timestamp, speed int)")
|
||||
_, err := connection.Exec("create table if not exist " + db + "." + saveTable + "(ts timestamp, speed int)")
|
||||
if err != nil {
|
||||
log.Fatalf("createStatisticTable error: %s\n", err)
|
||||
}
|
||||
|
@ -379,8 +379,8 @@ func createSubTable(subTableMaps map[string]*dataRows) {
|
|||
tableName := getScaleSubTableName(subTableName, i)
|
||||
|
||||
scaleTableMap[tableName] = &scaleTableInfo{
|
||||
subTableName: subTableName,
|
||||
insertRows: 0,
|
||||
subTableName: subTableName,
|
||||
insertRows: 0,
|
||||
}
|
||||
scaleTableNames = append(scaleTableNames, tableName)
|
||||
|
||||
|
@ -389,12 +389,12 @@ func createSubTable(subTableMaps map[string]*dataRows) {
|
|||
buffers.WriteString(" using ")
|
||||
buffers.WriteString(superTableName)
|
||||
buffers.WriteString(" tags(")
|
||||
for _, tag := range subTableMaps[subTableName].config.Tags{
|
||||
for _, tag := range subTableMaps[subTableName].config.Tags {
|
||||
tagValue := fmt.Sprintf("%v", tagValues[strings.ToLower(tag.Name)])
|
||||
buffers.WriteString("'" + tagValue + "'")
|
||||
buffers.WriteString(",")
|
||||
}
|
||||
buffers.Truncate(buffers.Len()-1)
|
||||
buffers.Truncate(buffers.Len() - 1)
|
||||
buffers.WriteString(")")
|
||||
|
||||
createTableSql := buffers.String()
|
||||
|
@ -451,14 +451,14 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) {
|
|||
buffer.WriteString(field.Name + " " + field.Type + ",")
|
||||
}
|
||||
|
||||
buffer.Truncate(buffer.Len()-1)
|
||||
buffer.Truncate(buffer.Len() - 1)
|
||||
buffer.WriteString(") tags( ")
|
||||
|
||||
for _, tag := range superTableConf.config.Tags {
|
||||
buffer.WriteString(tag.Name + " " + tag.Type + ",")
|
||||
}
|
||||
|
||||
buffer.Truncate(buffer.Len()-1)
|
||||
buffer.Truncate(buffer.Len() - 1)
|
||||
buffer.WriteString(")")
|
||||
|
||||
createSql := buffer.String()
|
||||
|
@ -475,16 +475,15 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) {
|
|||
|
||||
func getScaleSubTableName(subTableName string, hnum int) string {
|
||||
if hnum == 0 {
|
||||
return subTableName
|
||||
return subTableName
|
||||
}
|
||||
return fmt.Sprintf( "%s_%d", subTableName, hnum)
|
||||
return fmt.Sprintf("%s_%d", subTableName, hnum)
|
||||
}
|
||||
|
||||
func getSuperTableName(stname string) string {
|
||||
return SUPERTABLE_PREFIX + stname
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* normalizationData , and return the num of subTables
|
||||
*/
|
||||
|
@ -505,12 +504,12 @@ func normalizationData(fileRows dataRows, minTime int64) int64 {
|
|||
value, ok := subTableMap[subTableName]
|
||||
if !ok {
|
||||
subTableMap[subTableName] = &dataRows{
|
||||
rows: []map[string]interface{}{row},
|
||||
config: fileRows.config,
|
||||
rows: []map[string]interface{}{row},
|
||||
config: fileRows.config,
|
||||
}
|
||||
|
||||
tableNum++
|
||||
}else{
|
||||
} else {
|
||||
value.rows = append(value.rows, row)
|
||||
}
|
||||
}
|
||||
|
@ -518,9 +517,9 @@ func normalizationData(fileRows dataRows, minTime int64) int64 {
|
|||
}
|
||||
|
||||
// return the maximum table rows
|
||||
func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int64{
|
||||
func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int64 {
|
||||
// subTableMap
|
||||
currSubTableMap := make(map[string]*dataRows)
|
||||
currSubTableMap := make(map[string]*dataRows)
|
||||
for _, row := range fileRows.rows {
|
||||
// get subTableName
|
||||
tableValue := getSubTableNameValue(row[fileRows.config.SubTableName])
|
||||
|
@ -534,10 +533,10 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int
|
|||
if !ok {
|
||||
row[fileRows.config.Timestamp] = 0
|
||||
currSubTableMap[subTableName] = &dataRows{
|
||||
rows: []map[string]interface{}{row},
|
||||
config: fileRows.config,
|
||||
rows: []map[string]interface{}{row},
|
||||
config: fileRows.config,
|
||||
}
|
||||
}else{
|
||||
} else {
|
||||
row[fileRows.config.Timestamp] = int64(len(value.rows)) * avgInterval
|
||||
value.rows = append(value.rows, row)
|
||||
}
|
||||
|
@ -545,7 +544,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int
|
|||
}
|
||||
|
||||
var maxRows, tableRows int = 0, 0
|
||||
for tableName := range currSubTableMap{
|
||||
for tableName := range currSubTableMap {
|
||||
tableRows = len(currSubTableMap[tableName].rows)
|
||||
subTableMap[tableName] = currSubTableMap[tableName] // add to global subTableMap
|
||||
if tableRows > maxRows {
|
||||
|
@ -556,13 +555,11 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int
|
|||
return int64(maxRows)
|
||||
}
|
||||
|
||||
|
||||
func getSubTableName(subTableValue string, superTableName string) string {
|
||||
func getSubTableName(subTableValue string, superTableName string) string {
|
||||
return SUBTABLE_PREFIX + subTableValue + "_" + superTableName
|
||||
}
|
||||
|
||||
|
||||
func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []int64) {
|
||||
func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []int64) {
|
||||
connection := getConnection()
|
||||
defer connection.Close()
|
||||
defer wg.Done()
|
||||
|
@ -591,9 +588,9 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []
|
|||
var tableEndTime int64
|
||||
if vnum == 0 {
|
||||
// need continue generate data
|
||||
tableEndTime = time.Now().UnixNano()/1e6
|
||||
}else {
|
||||
tableEndTime = tableStartTime + superTableConf.cycleTime * int64(vnum) - superTableConf.avgInterval
|
||||
tableEndTime = time.Now().UnixNano() / 1e6
|
||||
} else {
|
||||
tableEndTime = tableStartTime + superTableConf.cycleTime*int64(vnum) - superTableConf.avgInterval
|
||||
}
|
||||
|
||||
insertRows := scaleTableMap[tableName].insertRows
|
||||
|
@ -603,10 +600,10 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []
|
|||
rowIndex := insertRows % subTableRows
|
||||
currentRow := subTableInfo.rows[rowIndex]
|
||||
|
||||
currentTime := getPrimaryKey(currentRow[subTableInfo.config.Timestamp]) + loopNum * superTableConf.cycleTime + tableStartTime
|
||||
currentTime := getPrimaryKey(currentRow[subTableInfo.config.Timestamp]) + loopNum*superTableConf.cycleTime + tableStartTime
|
||||
if currentTime <= tableEndTime {
|
||||
// append
|
||||
|
||||
|
||||
if lastTableName != tableName {
|
||||
buffers.WriteString(tableName)
|
||||
buffers.WriteString(" values")
|
||||
|
@ -616,22 +613,22 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []
|
|||
buffers.WriteString("(")
|
||||
buffers.WriteString(fmt.Sprintf("%v", currentTime))
|
||||
buffers.WriteString(",")
|
||||
|
||||
|
||||
// fieldNum := len(subTableInfo.config.Fields)
|
||||
for _,field := range subTableInfo.config.Fields {
|
||||
for _, field := range subTableInfo.config.Fields {
|
||||
buffers.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)]))
|
||||
buffers.WriteString(",")
|
||||
// if( i != fieldNum -1){
|
||||
|
||||
|
||||
// }
|
||||
}
|
||||
|
||||
buffers.Truncate(buffers.Len()-1)
|
||||
buffers.Truncate(buffers.Len() - 1)
|
||||
buffers.WriteString(") ")
|
||||
|
||||
appendRows++
|
||||
insertRows++
|
||||
if appendRows == batch {
|
||||
if appendRows == batch {
|
||||
// executebatch
|
||||
insertSql := buffers.String()
|
||||
connection.Exec("use " + db)
|
||||
|
@ -645,7 +642,7 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []
|
|||
lastTableName = ""
|
||||
appendRows = 0
|
||||
}
|
||||
}else {
|
||||
} else {
|
||||
// finished insert current table
|
||||
break
|
||||
}
|
||||
|
@ -654,14 +651,14 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []
|
|||
scaleTableMap[tableName].insertRows = insertRows
|
||||
|
||||
}
|
||||
|
||||
|
||||
// left := len(rows)
|
||||
if appendRows > 0 {
|
||||
if appendRows > 0 {
|
||||
// executebatch
|
||||
insertSql := buffers.String()
|
||||
connection.Exec("use " + db)
|
||||
affectedRows := executeBatchInsert(insertSql, connection)
|
||||
|
||||
|
||||
successRows[threadIndex] += affectedRows
|
||||
currSuccessRows += affectedRows
|
||||
|
||||
|
@ -676,7 +673,7 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []
|
|||
break
|
||||
}
|
||||
|
||||
if(num == 0){
|
||||
if num == 0 {
|
||||
wg.Done() //finished insert history data
|
||||
num++
|
||||
}
|
||||
|
@ -691,7 +688,7 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []
|
|||
|
||||
}
|
||||
|
||||
func buildSql(rows []tableRows) string{
|
||||
func buildSql(rows []tableRows) string {
|
||||
|
||||
var lastTableName string
|
||||
|
||||
|
@ -709,7 +706,7 @@ func buildSql(rows []tableRows) string{
|
|||
|
||||
if lastTableName == row.tableName {
|
||||
buffers.WriteString(row.value)
|
||||
}else {
|
||||
} else {
|
||||
buffers.WriteString(" ")
|
||||
buffers.WriteString(row.tableName)
|
||||
buffers.WriteString(" values")
|
||||
|
@ -722,7 +719,7 @@ func buildSql(rows []tableRows) string{
|
|||
return inserSql
|
||||
}
|
||||
|
||||
func buildRow(tableName string, currentTime int64, subTableInfo *dataRows, currentRow map[string]interface{}) tableRows{
|
||||
func buildRow(tableName string, currentTime int64, subTableInfo *dataRows, currentRow map[string]interface{}) tableRows {
|
||||
|
||||
tableRows := tableRows{tableName: tableName}
|
||||
|
||||
|
@ -732,12 +729,12 @@ func buildRow(tableName string, currentTime int64, subTableInfo *dataRows, curre
|
|||
buffers.WriteString(fmt.Sprintf("%v", currentTime))
|
||||
buffers.WriteString(",")
|
||||
|
||||
for _,field := range subTableInfo.config.Fields {
|
||||
for _, field := range subTableInfo.config.Fields {
|
||||
buffers.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)]))
|
||||
buffers.WriteString(",")
|
||||
}
|
||||
|
||||
buffers.Truncate(buffers.Len()-1)
|
||||
buffers.Truncate(buffers.Len() - 1)
|
||||
buffers.WriteString(")")
|
||||
|
||||
insertSql := buffers.String()
|
||||
|
@ -764,7 +761,7 @@ func getFieldValue(fieldValue interface{}) string {
|
|||
return fmt.Sprintf("'%v'", fieldValue)
|
||||
}
|
||||
|
||||
func getConnection() *sql.DB{
|
||||
func getConnection() *sql.DB {
|
||||
db, err := sql.Open(DRIVER_NAME, dataSourceName)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -772,7 +769,6 @@ func getConnection() *sql.DB{
|
|||
return db
|
||||
}
|
||||
|
||||
|
||||
func getSubTableNameValue(suffix interface{}) string {
|
||||
return fmt.Sprintf("%v", suffix)
|
||||
}
|
||||
|
@ -950,7 +946,7 @@ func parseMillisecond(str interface{}, layout string) int64 {
|
|||
log.Println(err)
|
||||
return -1
|
||||
}
|
||||
return t.UnixNano()/1e6
|
||||
return t.UnixNano() / 1e6
|
||||
}
|
||||
|
||||
// lowerMapKey transfer all the map key to lowercase
|
||||
|
@ -1009,7 +1005,7 @@ func checkUserCaseConfig(caseName string, caseConfig *dataimport.CaseConfig) {
|
|||
if i < len(caseConfig.Fields)-1 {
|
||||
// delete middle item, a = a[:i+copy(a[i:], a[i+1:])]
|
||||
caseConfig.Fields = caseConfig.Fields[:i+copy(caseConfig.Fields[i:], caseConfig.Fields[i+1:])]
|
||||
}else {
|
||||
} else {
|
||||
// delete the last item
|
||||
caseConfig.Fields = caseConfig.Fields[:len(caseConfig.Fields)-1]
|
||||
}
|
||||
|
@ -1057,7 +1053,7 @@ func parseArg() {
|
|||
flag.Parse()
|
||||
}
|
||||
|
||||
func printArg() {
|
||||
func printArg() {
|
||||
fmt.Println("used param: ")
|
||||
fmt.Println("-cfg: ", cfg)
|
||||
fmt.Println("-cases:", cases)
|
||||
|
|
|
@ -263,10 +263,12 @@ int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid);
|
|||
void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex);
|
||||
|
||||
bool hasMoreVnodesToTry(SSqlObj *pSql);
|
||||
bool hasMoreClauseToTry(SSqlObj* pSql);
|
||||
|
||||
void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp);
|
||||
void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
|
||||
void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)());
|
||||
int tscSetMgmtIpListFromCfg(const char *first, const char *second);
|
||||
void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp);
|
||||
int tscSetMgmtEpSetFromCfg(const char *first, const char *second);
|
||||
|
||||
void* malloc_throw(size_t size);
|
||||
void* calloc_throw(size_t nmemb, size_t size);
|
||||
|
|
|
@ -314,7 +314,7 @@ typedef struct SSqlObj {
|
|||
char * sqlstr;
|
||||
char retry;
|
||||
char maxRetry;
|
||||
SRpcIpSet ipList;
|
||||
SRpcEpSet epSet;
|
||||
char listed;
|
||||
tsem_t rspSem;
|
||||
SSqlCmd cmd;
|
||||
|
@ -358,7 +358,7 @@ void tscInitMsgsFp();
|
|||
|
||||
int tsParseSql(SSqlObj *pSql, bool initial);
|
||||
|
||||
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet);
|
||||
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet);
|
||||
int tscProcessSql(SSqlObj *pSql);
|
||||
|
||||
int tscRenewTableMeta(SSqlObj *pSql, char *tableId);
|
||||
|
@ -430,7 +430,6 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
|
|||
int32_t bytes = pInfo->pSqlExpr->resBytes;
|
||||
|
||||
char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row;
|
||||
|
||||
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
|
||||
int32_t realLen = varDataLen(pData);
|
||||
assert(realLen <= bytes - VARSTR_HEADER_SIZE);
|
||||
|
@ -465,7 +464,8 @@ extern void * tscQhandle;
|
|||
extern int tscKeepConn[];
|
||||
extern int tsInsertHeadSize;
|
||||
extern int tscNumOfThreads;
|
||||
extern SRpcCorIpSet tscMgmtIpSet;
|
||||
|
||||
extern SRpcCorEpSet tscMgmtEpSet;
|
||||
|
||||
extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo);
|
||||
|
||||
|
|
|
@ -169,7 +169,11 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
|
|||
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
|
||||
}
|
||||
|
||||
tscProcessSql(pSql);
|
||||
if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
|
||||
tscFetchDatablockFromSubquery(pSql);
|
||||
} else {
|
||||
tscProcessSql(pSql);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -474,33 +478,11 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
// in case of insert, redo parsing the sql string and build new submit data block for two reasons:
|
||||
// 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly.
|
||||
// 2. vnode may need the schema information along with submit block to update its local table schema.
|
||||
if (pCmd->command == TSDB_SQL_INSERT) {
|
||||
tscDebug("%p redo parse sql string to build submit block", pSql);
|
||||
|
||||
pCmd->parseFinished = false;
|
||||
tscResetSqlCmdObj(pCmd);
|
||||
|
||||
code = tsParseSql(pSql, true);
|
||||
|
||||
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||
return;
|
||||
} else if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Discard previous built submit blocks, and then parse the sql string again and build up all submit blocks,
|
||||
* and send the required submit block according to index value in supporter to server.
|
||||
*/
|
||||
pSql->fp = pSql->fetchFp; // restore the fp
|
||||
tscHandleInsertRetry(pSql);
|
||||
} else if (pCmd->command == TSDB_SQL_SELECT) { // in case of other query type, continue
|
||||
if (pCmd->command == TSDB_SQL_INSERT || pCmd->command == TSDB_SQL_SELECT) {
|
||||
tscDebug("%p redo parse sql string and proceed", pSql);
|
||||
//tscDebug("before %p fp:%p, fetchFp:%p", pSql, pSql->fp, pSql->fetchFp);
|
||||
pCmd->parseFinished = false;
|
||||
tscResetSqlCmdObj(pCmd);
|
||||
|
||||
//tscDebug("after %p fp:%p, fetchFp:%p", pSql, pSql->fp, pSql->fetchFp);
|
||||
code = tsParseSql(pSql, true);
|
||||
|
||||
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
|
||||
|
@ -509,8 +491,17 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
goto _error;
|
||||
}
|
||||
|
||||
tscProcessSql(pSql);
|
||||
} else { // in all other cases, simple retry
|
||||
if (pCmd->command == TSDB_SQL_INSERT) {
|
||||
/*
|
||||
* Discard previous built submit blocks, and then parse the sql string again and build up all submit blocks,
|
||||
* and send the required submit block according to index value in supporter to server.
|
||||
*/
|
||||
pSql->fp = pSql->fetchFp; // restore the fp
|
||||
tscHandleInsertRetry(pSql);
|
||||
} else if (pCmd->command == TSDB_SQL_SELECT) { // in case of other query type, continue
|
||||
tscProcessSql(pSql);
|
||||
}
|
||||
}else { // in all other cases, simple retry
|
||||
tscProcessSql(pSql);
|
||||
}
|
||||
|
||||
|
|
|
@ -1481,7 +1481,7 @@ static bool first_last_function_setup(SQLFunctionCtx *pCtx) {
|
|||
|
||||
// todo opt for null block
|
||||
static void first_function(SQLFunctionCtx *pCtx) {
|
||||
if (pCtx->order == TSDB_ORDER_DESC) {
|
||||
if (pCtx->order == TSDB_ORDER_DESC || pCtx->preAggVals.dataBlockLoaded == false) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1550,28 +1550,17 @@ static void first_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t in
|
|||
* to decide if the value is earlier than current intermediate result
|
||||
*/
|
||||
static void first_dist_function(SQLFunctionCtx *pCtx) {
|
||||
assert(pCtx->size > 0);
|
||||
|
||||
if (pCtx->size == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* do not to check data in the following cases:
|
||||
* 1. data block that are not loaded
|
||||
* 2. scan data files in desc order
|
||||
*/
|
||||
if (pCtx->order == TSDB_ORDER_DESC) {
|
||||
if (pCtx->order == TSDB_ORDER_DESC || pCtx->preAggVals.dataBlockLoaded == false) {
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t notNullElems = 0;
|
||||
|
||||
// data block is discard, not loaded, do not need to check it
|
||||
if (!pCtx->preAggVals.dataBlockLoaded) {
|
||||
return;
|
||||
}
|
||||
|
||||
// find the first not null value
|
||||
for (int32_t i = 0; i < pCtx->size; ++i) {
|
||||
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
|
||||
|
@ -1655,7 +1644,7 @@ static void first_dist_func_second_merge(SQLFunctionCtx *pCtx) {
|
|||
* least one data in this block that is not null.(TODO opt for this case)
|
||||
*/
|
||||
static void last_function(SQLFunctionCtx *pCtx) {
|
||||
if (pCtx->order != pCtx->param[0].i64Key) {
|
||||
if (pCtx->order != pCtx->param[0].i64Key || pCtx->preAggVals.dataBlockLoaded == false) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2303,8 +2292,9 @@ static void top_func_second_merge(SQLFunctionCtx *pCtx) {
|
|||
|
||||
// the intermediate result is binary, we only use the output data type
|
||||
for (int32_t i = 0; i < pInput->num; ++i) {
|
||||
int16_t type = (pCtx->outputType == TSDB_DATA_TYPE_FLOAT)? TSDB_DATA_TYPE_DOUBLE:pCtx->outputType;
|
||||
do_top_function_add(pOutput, pCtx->param[0].i64Key, &pInput->res[i]->v.i64Key, pInput->res[i]->timestamp,
|
||||
pCtx->outputType, &pCtx->tagInfo, pInput->res[i]->pTags, pCtx->currentStage);
|
||||
type, &pCtx->tagInfo, pInput->res[i]->pTags, pCtx->currentStage);
|
||||
}
|
||||
|
||||
SET_VAL(pCtx, pInput->num, pOutput->num);
|
||||
|
|
|
@ -485,7 +485,6 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
}
|
||||
|
||||
case TSDB_SQL_SELECT: {
|
||||
assert(pCmd->numOfClause == 1);
|
||||
const char* msg1 = "columns in select clause not identical";
|
||||
|
||||
for (int32_t i = pCmd->numOfClause; i < pInfo->subclauseInfo.numOfClause; ++i) {
|
||||
|
@ -496,16 +495,19 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
}
|
||||
|
||||
assert(pCmd->numOfClause == pInfo->subclauseInfo.numOfClause);
|
||||
for (int32_t i = 0; i < pInfo->subclauseInfo.numOfClause; ++i) {
|
||||
for (int32_t i = pCmd->clauseIndex; i < pInfo->subclauseInfo.numOfClause; ++i) {
|
||||
SQuerySQL* pQuerySql = pInfo->subclauseInfo.pClause[i];
|
||||
|
||||
tscTrace("%p start to parse %dth subclause, total:%d", pSql, i, pInfo->subclauseInfo.numOfClause);
|
||||
if ((code = doCheckForQuery(pSql, pQuerySql, i)) != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
tscPrintSelectClause(pSql, i);
|
||||
pCmd->clauseIndex += 1;
|
||||
}
|
||||
|
||||
// restore the clause index
|
||||
pCmd->clauseIndex = 0;
|
||||
// set the command/global limit parameters from the first subclause to the sqlcmd object
|
||||
SQueryInfo* pQueryInfo1 = tscGetQueryInfoDetail(pCmd, 0);
|
||||
pCmd->command = pQueryInfo1->command;
|
||||
|
@ -1385,6 +1387,11 @@ static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColum
|
|||
return numOfTotalColumns;
|
||||
}
|
||||
|
||||
static void tscInsertPrimaryTSSourceColumn(SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
|
||||
SColumnIndex tsCol = {.tableIndex = pIndex->tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
|
||||
tscColumnListInsert(pQueryInfo->colList, &tsCol);
|
||||
}
|
||||
|
||||
int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExprItem* pItem) {
|
||||
const char* msg0 = "invalid column name";
|
||||
const char* msg1 = "tag for normal table query is not allowed";
|
||||
|
@ -1427,6 +1434,8 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, t
|
|||
|
||||
addProjectQueryCol(pQueryInfo, startPos, &index, pItem);
|
||||
}
|
||||
|
||||
tscInsertPrimaryTSSourceColumn(pQueryInfo, &index);
|
||||
} else {
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
@ -1499,8 +1508,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
|
||||
switch (optr) {
|
||||
case TK_COUNT: {
|
||||
if (pItem->pNode->pParam != NULL && pItem->pNode->pParam->nExpr != 1) {
|
||||
/* more than one parameter for count() function */
|
||||
if (pItem->pNode->pParam != NULL && pItem->pNode->pParam->nExpr != 1) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
}
|
||||
|
||||
|
@ -1551,11 +1560,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
}
|
||||
} else { // count(*) is equalled to count(primary_timestamp_key)
|
||||
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
|
||||
|
||||
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize;
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false);
|
||||
}
|
||||
|
||||
|
||||
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
|
||||
|
||||
memset(pExpr->aliasName, 0, tListLen(pExpr->aliasName));
|
||||
getColumnName(pItem, pExpr->aliasName, sizeof(pExpr->aliasName) - 1);
|
||||
|
||||
|
@ -1570,9 +1580,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
}
|
||||
|
||||
// the time stamp may be always needed
|
||||
if (index.tableIndex > 0 && index.tableIndex < tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
|
||||
SColumnIndex tsCol = {.tableIndex = index.tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
|
||||
tscColumnListInsert(pQueryInfo->colList, &tsCol);
|
||||
if (index.tableIndex < tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
|
||||
tscInsertPrimaryTSSourceColumn(pQueryInfo, &index);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -1682,10 +1691,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
tscColumnListInsert(pQueryInfo->colList, &(ids.ids[i]));
|
||||
}
|
||||
}
|
||||
|
||||
SColumnIndex tsCol = {.tableIndex = index.tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
|
||||
tscColumnListInsert(pQueryInfo->colList, &tsCol);
|
||||
|
||||
|
||||
tscInsertPrimaryTSSourceColumn(pQueryInfo, &index);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
case TK_FIRST:
|
||||
|
@ -2348,9 +2355,9 @@ bool validateIpAddress(const char* ip, size_t size) {
|
|||
|
||||
strncpy(tmp, ip, size);
|
||||
|
||||
in_addr_t ipAddr = inet_addr(tmp);
|
||||
in_addr_t epAddr = inet_addr(tmp);
|
||||
|
||||
return ipAddr != INADDR_NONE;
|
||||
return epAddr != INADDR_NONE;
|
||||
}
|
||||
|
||||
int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) {
|
||||
|
@ -5862,6 +5869,8 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
|
|||
pTableMetaInfo = tscAddEmptyMetaInfo(pQueryInfo);
|
||||
}
|
||||
|
||||
assert(pCmd->clauseIndex == index);
|
||||
|
||||
// too many result columns not support order by in query
|
||||
if (pQuerySql->pSelection->nExpr > TSDB_MAX_COLUMNS) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
|
||||
|
@ -5975,12 +5984,11 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
|
|||
pQueryInfo->window.ekey = pQueryInfo->window.ekey / 1000;
|
||||
}
|
||||
} else { // set the time rang
|
||||
pQueryInfo->window.skey = TSKEY_INITIAL_VAL;
|
||||
pQueryInfo->window.ekey = INT64_MAX;
|
||||
pQueryInfo->window = TSWINDOW_INITIALIZER;
|
||||
}
|
||||
|
||||
// user does not specified the query time window, twa is not allowed in such case.
|
||||
if ((pQueryInfo->window.skey == 0 || pQueryInfo->window.ekey == INT64_MAX ||
|
||||
if ((pQueryInfo->window.skey == INT64_MIN || pQueryInfo->window.ekey == INT64_MAX ||
|
||||
(pQueryInfo->window.ekey == INT64_MAX / 1000 && tinfo.precision == TSDB_TIME_PRECISION_MILLI)) && tscIsTWAQuery(pQueryInfo)) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
|
||||
}
|
||||
|
|
|
@ -30,8 +30,8 @@
|
|||
|
||||
#define TSC_MGMT_VNODE 999
|
||||
|
||||
SRpcCorIpSet tscMgmtIpSet;
|
||||
SRpcIpSet tscDnodeIpSet;
|
||||
SRpcCorEpSet tscMgmtEpSet;
|
||||
SRpcEpSet tscDnodeEpSet;
|
||||
|
||||
int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0};
|
||||
|
||||
|
@ -45,18 +45,18 @@ void tscSaveSubscriptionProgress(void* sub);
|
|||
|
||||
static int32_t minMsgSize() { return tsRpcHeadSize + 100; }
|
||||
|
||||
static void tscSetDnodeIpList(SSqlObj* pSql, SCMVgroupInfo* pVgroupInfo) {
|
||||
SRpcIpSet* pIpList = &pSql->ipList;
|
||||
pIpList->inUse = 0;
|
||||
static void tscSetDnodeEpSet(SSqlObj* pSql, SCMVgroupInfo* pVgroupInfo) {
|
||||
SRpcEpSet* pEpSet = &pSql->epSet;
|
||||
pEpSet->inUse = 0;
|
||||
if (pVgroupInfo == NULL) {
|
||||
pIpList->numOfIps = 0;
|
||||
pEpSet->numOfEps = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
pIpList->numOfIps = pVgroupInfo->numOfIps;
|
||||
for(int32_t i = 0; i < pVgroupInfo->numOfIps; ++i) {
|
||||
strcpy(pIpList->fqdn[i], pVgroupInfo->ipAddr[i].fqdn);
|
||||
pIpList->port[i] = pVgroupInfo->ipAddr[i].port;
|
||||
pEpList->numOfEps = pVgroupInfo->numOfEps;
|
||||
for(int32_t i = 0; i < pVgroupInfo->numOfEps; ++i) {
|
||||
strcpy(pEpList->fqdn[i], pVgroupInfo->epAddr[i].fqdn);
|
||||
pEpList->port[i] = pVgroupInfo->epAddr[i].port;
|
||||
}
|
||||
}
|
||||
void tscIpSetCopy(SRpcIpSet *dst, SRpcIpSet *src) {
|
||||
|
@ -118,21 +118,21 @@ static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcIpSet *pIpSet) {
|
|||
|
||||
taosCorBeginWrite(&pVgroupInfo->version);
|
||||
//TODO(dengyihao), dont care vgid
|
||||
pVgroupInfo->inUse = pIpSet->inUse;
|
||||
pVgroupInfo->numOfIps = pIpSet->numOfIps;
|
||||
for (int32_t i = 0; pVgroupInfo->numOfIps; i++) {
|
||||
strncpy(pVgroupInfo->ipAddr[i].fqdn, pIpSet->fqdn[i], TSDB_FQDN_LEN);
|
||||
pVgroupInfo->ipAddr[i].port = pIpSet->port[i];
|
||||
pVgroupInfo->inUse = pEpSet->inUse;
|
||||
pVgroupInfo->numOfEps = pEpSet->numOfEps;
|
||||
for (int32_t i = 0; pVgroupInfo->numOfEps; i++) {
|
||||
strncpy(pVgroupInfo->epAddr[i].fqdn, pEpSet->fqdn[i], TSDB_FQDN_LEN);
|
||||
pVgroupInfo->epAddr[i].port = pEpSet->port[i];
|
||||
}
|
||||
taosCorEndWrite(&pVgroupInfo->version);
|
||||
}
|
||||
void tscPrintMgmtIp() {
|
||||
SRpcIpSet dump;
|
||||
tscDumpMgmtIpSet(&dump);
|
||||
if (dump.numOfIps <= 0) {
|
||||
tscError("invalid mnode IP list:%d", dump.numOfIps);
|
||||
void tscPrintMgmtEp() {
|
||||
SRpcEpSet dump;
|
||||
tscDumpMgmtEpSet(&dump);
|
||||
if (dump.numOfEps <= 0) {
|
||||
tscError("invalid mnode EP list:%d", dump.numOfEPs);
|
||||
} else {
|
||||
for (int i = 0; i < dump.numOfIps; ++i) {
|
||||
for (int i = 0; i < dump.numOfEps; ++i) {
|
||||
tscDebug("mnode index:%d %s:%d", i, dump.fqdn[i], dump.port[i]);
|
||||
}
|
||||
}
|
||||
|
@ -148,9 +148,9 @@ void tscPrintMgmtIp() {
|
|||
UNUSED_FUNC
|
||||
static int32_t tscGetMgmtConnMaxRetryTimes() {
|
||||
int32_t factor = 2;
|
||||
SRpcIpSet dump;
|
||||
tscDumpMgmtIpSet(&dump);
|
||||
return dump.numOfIps * factor;
|
||||
SRpcEpSet dump;
|
||||
tscDumpMgmtEpSet(&dump);
|
||||
return dump.numOfEps * factor;
|
||||
}
|
||||
|
||||
void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
|
||||
|
@ -166,10 +166,10 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
|
|||
|
||||
if (code == 0) {
|
||||
SCMHeartBeatRsp *pRsp = (SCMHeartBeatRsp *)pRes->pRsp;
|
||||
SRpcIpSet * pIpList = &pRsp->ipList;
|
||||
if (pIpList->numOfIps > 0) {
|
||||
tscIpSetHtons(pIpList);
|
||||
tscUpdateMgmtIpList(pIpList);
|
||||
SRpcEpSet * pEpList = &pRsp->epList;
|
||||
if (pEpList->numOfEps > 0) {
|
||||
tscEpSetHtons(pEpList);
|
||||
tscUpdateMgmtEpList(pEpList);
|
||||
}
|
||||
|
||||
pSql->pTscObj->connId = htonl(pRsp->connId);
|
||||
|
@ -242,7 +242,7 @@ int tscSendMsgToServer(SSqlObj *pSql) {
|
|||
|
||||
// set the mgmt ip list
|
||||
if (pSql->cmd.command >= TSDB_SQL_MGMT) {
|
||||
tscDumpMgmtIpSet(&pSql->ipList);
|
||||
tscDumpMgmtEpSet(&pSql->epList);
|
||||
}
|
||||
|
||||
memcpy(pMsg, pSql->cmd.payload, pSql->cmd.payloadLen);
|
||||
|
@ -260,11 +260,11 @@ int tscSendMsgToServer(SSqlObj *pSql) {
|
|||
// Otherwise, the pSql object may have been released already during the response function, which is
|
||||
// processMsgFromServer function. In the meanwhile, the assignment of the rpc context to sql object will absolutely
|
||||
// cause crash.
|
||||
rpcSendRequest(pObj->pDnodeConn, &pSql->ipList, &rpcMsg);
|
||||
rpcSendRequest(pObj->pDnodeConn, &pSql->epSet, &rpcMsg);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
|
||||
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
|
||||
SSqlObj *pSql = (SSqlObj *)rpcMsg->ahandle;
|
||||
if (pSql == NULL || pSql->signature != pSql) {
|
||||
tscError("%p sql is already released", pSql);
|
||||
|
@ -293,14 +293,14 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
|
|||
return;
|
||||
}
|
||||
|
||||
if (pIpSet) {
|
||||
//SRpcIpSet dump;
|
||||
tscIpSetHtons(pIpSet);
|
||||
if (tscIpSetIsEqual(&pSql->ipList, pIpSet)) {
|
||||
if (pEpSet) {
|
||||
//SRpcEpSet dump;
|
||||
tscEpSetHtons(pEpSet);
|
||||
if (tscEpSetIsEqual(&pSql->epList, pEpSet)) {
|
||||
if(pCmd->command < TSDB_SQL_MGMT) {
|
||||
tscUpdateVgroupInfo(pSql, pIpSet);
|
||||
tscUpdateVgroupInfo(pSql, pEpSet);
|
||||
} else {
|
||||
tscUpdateMgmtIpList(pIpSet);
|
||||
tscUpdateMgmtEpList(pEpSet);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -484,8 +484,9 @@ int tscProcessSql(SSqlObj *pSql) {
|
|||
return pSql->res.code;
|
||||
}
|
||||
} else if (pCmd->command < TSDB_SQL_LOCAL) {
|
||||
//tscDumpMgmtIpSet(&pSql->ipList);
|
||||
} else {
|
||||
|
||||
//pSql->epSet = tscMgmtEpSet;
|
||||
} else { // local handler
|
||||
return (*tscProcessMsgRsp[pCmd->command])(pSql);
|
||||
}
|
||||
|
||||
|
@ -538,7 +539,7 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
SRetrieveTableMsg *pRetrieveMsg = (SRetrieveTableMsg *) pSql->cmd.payload;
|
||||
pRetrieveMsg->qhandle = htobe64(pSql->res.qhandle);
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
|
||||
pRetrieveMsg->free = htons(pQueryInfo->type);
|
||||
|
||||
// todo valid the vgroupId at the client side
|
||||
|
@ -588,10 +589,10 @@ int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
|
||||
// pSql->cmd.payloadLen is set during copying data into payload
|
||||
pSql->cmd.msgType = TSDB_MSG_TYPE_SUBMIT;
|
||||
tscDumpIpSetFromVgroupInfo(&pTableMeta->corVgroupInfo, &pSql->ipList);
|
||||
|
||||
tscDebug("%p build submit msg, vgId:%d numOfTables:%d numberOfIP:%d", pSql, vgId, pSql->cmd.numOfTablesInSubmit,
|
||||
pSql->ipList.numOfIps);
|
||||
tscDumpEpSetFromVgroupInfo(&pTableMeta->corVgroupInfo, &pSql->epList);
|
||||
|
||||
tscDebug("%p build submit msg, vgId:%d numOfTables:%d numberOfEP:%d", pSql, vgId, pSql->cmd.numOfTablesInSubmit,
|
||||
pSql->epSet.numOfEps);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -630,7 +631,8 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
|
|||
} else {
|
||||
pVgroupInfo = &pTableMeta->vgroupInfo;
|
||||
}
|
||||
tscSetDnodeIpList(pSql, pVgroupInfo);
|
||||
tscSetDnodeEpList(pSql, pVgroupInfo);
|
||||
|
||||
if (pVgroupInfo != NULL) {
|
||||
pQueryMsg->head.vgId = htonl(pVgroupInfo->vgId);
|
||||
}
|
||||
|
@ -642,7 +644,7 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
|
|||
|
||||
pQueryMsg->numOfTables = htonl(1); // set the number of tables
|
||||
pMsg += sizeof(STableIdInfo);
|
||||
} else { // it is a subquery of the super table query, this IP info is acquired from vgroupInfo
|
||||
} else { // it is a subquery of the super table query, this EP info is acquired from vgroupInfo
|
||||
int32_t index = pTableMetaInfo->vgroupIndex;
|
||||
int32_t numOfVgroups = taosArrayGetSize(pTableMetaInfo->pVgroupTables);
|
||||
assert(index >= 0 && index < numOfVgroups);
|
||||
|
@ -650,9 +652,9 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
|
|||
tscDebug("%p query on stable, vgIndex:%d, numOfVgroups:%d", pSql, index, numOfVgroups);
|
||||
|
||||
SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, index);
|
||||
|
||||
|
||||
// set the vgroup info
|
||||
tscSetDnodeIpList(pSql, &pTableIdList->vgInfo);
|
||||
tscSetDnodeEpList(pSql, &pTableIdList->vgInfo);
|
||||
pQueryMsg->head.vgId = htonl(pTableIdList->vgInfo.vgId);
|
||||
|
||||
int32_t numOfTables = taosArrayGetSize(pTableIdList->itemList);
|
||||
|
@ -1385,7 +1387,7 @@ int tscBuildUpdateTagMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
|
|||
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
|
||||
tscDumpIpSetFromVgroupInfo(&pTableMetaInfo->pTableMeta->corVgroupInfo, &pSql->ipList);
|
||||
tscDumpEpSetFromVgroupInfo(&pTableMetaInfo->pTableMeta->corVgroupInfo, &pSql->epList);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1720,8 +1722,8 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
|
|||
pMetaMsg->contLen = htons(pMetaMsg->contLen);
|
||||
pMetaMsg->numOfColumns = htons(pMetaMsg->numOfColumns);
|
||||
|
||||
if (pMetaMsg->sid < 0 || pMetaMsg->vgroup.numOfIps < 0) {
|
||||
tscError("invalid meter vgId:%d, sid%d", pMetaMsg->vgroup.numOfIps, pMetaMsg->sid);
|
||||
if (pMetaMsg->sid < 0 || pMetaMsg->vgroup.numOfEps < 0) {
|
||||
tscError("invalid meter vgId:%d, sid%d", pMetaMsg->vgroup.numOfEps, pMetaMsg->sid);
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
|
||||
|
@ -1735,8 +1737,8 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
|
|||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
|
||||
for (int i = 0; i < pMetaMsg->vgroup.numOfIps; ++i) {
|
||||
pMetaMsg->vgroup.ipAddr[i].port = htons(pMetaMsg->vgroup.ipAddr[i].port);
|
||||
for (int i = 0; i < pMetaMsg->vgroup.numOfEps; ++i) {
|
||||
pMetaMsg->vgroup.epAddr[i].port = htons(pMetaMsg->vgroup.epAddr[i].port);
|
||||
}
|
||||
|
||||
SSchema* pSchema = pMetaMsg->schema;
|
||||
|
@ -1912,9 +1914,11 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
|
|||
//just init, no need to lock
|
||||
SCMVgroupInfo *pVgroups = &pInfo->vgroupList->vgroups[j];
|
||||
pVgroups->vgId = htonl(pVgroups->vgId);
|
||||
assert(pVgroups->numOfIps >= 1);
|
||||
for (int32_t k = 0; k < pVgroups->numOfIps; ++k) {
|
||||
pVgroups->ipAddr[k].port = htons(pVgroups->ipAddr[k].port);
|
||||
assert(pVgroups->numOfEps >= 1);
|
||||
|
||||
for (int32_t k = 0; k < pVgroups->numOfEps; ++k) {
|
||||
pVgroups->epAddr[k].port = htons(pVgroups->epAddr[k].port);
|
||||
|
||||
}
|
||||
|
||||
pMsg += size;
|
||||
|
@ -2007,9 +2011,9 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
|
|||
assert(len <= sizeof(pObj->db));
|
||||
tstrncpy(pObj->db, temp, sizeof(pObj->db));
|
||||
|
||||
if (pConnect->ipList.numOfIps > 0) {
|
||||
tscIpSetHtons(&pConnect->ipList);
|
||||
tscUpdateMgmtIpList(&pConnect->ipList);
|
||||
if (pConnect->epList.numOfEps > 0) {
|
||||
tscEpSetHtons(&pConnect->epList);
|
||||
tscUpdateMgmtEpList(&pConnect->epList);
|
||||
}
|
||||
|
||||
strcpy(pObj->sversion, pConnect->serverVersion);
|
||||
|
|
|
@ -62,8 +62,8 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
|
|||
}
|
||||
|
||||
if (ip) {
|
||||
if (tscSetMgmtIpListFromCfg(ip, NULL) < 0) return NULL;
|
||||
if (port) tscMgmtIpSet.ipSet.port[0] = port;
|
||||
if (tscSetMgmtEpListFromCfg(ip, NULL) < 0) return NULL;
|
||||
if (port) tscMgmtEpSet.epSet.port[0] = port;
|
||||
}
|
||||
|
||||
void *pDnodeConn = NULL;
|
||||
|
@ -424,7 +424,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
|
|||
}
|
||||
|
||||
// current data set are exhausted, fetch more data from node
|
||||
if (pRes->row >= pRes->numOfRows && (pRes->completed != true || hasMoreVnodesToTry(pSql)) &&
|
||||
if (pRes->row >= pRes->numOfRows && (pRes->completed != true || hasMoreVnodesToTry(pSql) || hasMoreClauseToTry(pSql)) &&
|
||||
(pCmd->command == TSDB_SQL_RETRIEVE ||
|
||||
pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE ||
|
||||
pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE ||
|
||||
|
|
|
@ -458,7 +458,7 @@ void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArr
|
|||
break;
|
||||
}
|
||||
}
|
||||
assert(info.vgInfo.numOfIps != 0);
|
||||
assert(info.vgInfo.numOfEps != 0);
|
||||
|
||||
vgTables = taosArrayInit(4, sizeof(STableIdInfo));
|
||||
info.itemList = vgTables;
|
||||
|
@ -848,13 +848,14 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
|
|||
SSqlRes* pRes = &pSql->res;
|
||||
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
|
||||
// TODO put to async res?
|
||||
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) {
|
||||
assert(numOfRows == taos_errno(pSql));
|
||||
|
||||
pParentSql->res.code = numOfRows;
|
||||
tscError("%p retrieve failed, index:%d, code:%s", pSql, pSupporter->subqueryIndex, tstrerror(numOfRows));
|
||||
|
||||
tscQueueAsyncRes(pParentSql);
|
||||
return;
|
||||
}
|
||||
|
||||
if (numOfRows >= 0) {
|
||||
|
@ -941,31 +942,22 @@ void tscFetchDatablockFromSubquery(SSqlObj* pSql) {
|
|||
|
||||
SSqlRes *pRes = &pSub->res;
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSub->cmd, 0);
|
||||
// STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
|
||||
// if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
|
||||
// if (pRes->row >= pRes->numOfRows && pTableMetaInfo->vgroupIndex < pTableMetaInfo->vgroupList->numOfVgroups &&
|
||||
// (!tscHasReachLimitation(pQueryInfo, pRes)) && !pRes->completed) {
|
||||
// numOfFetch++;
|
||||
// }
|
||||
// } else {
|
||||
if (!tscHasReachLimitation(pQueryInfo, pRes)) {
|
||||
if (pRes->row >= pRes->numOfRows) {
|
||||
hasData = false;
|
||||
|
||||
if (!pRes->completed) {
|
||||
numOfFetch++;
|
||||
}
|
||||
}
|
||||
} else { // has reach the limitation, no data anymore
|
||||
if (pRes->row >= pRes->numOfRows) {
|
||||
hasData = false;
|
||||
break;
|
||||
if (!tscHasReachLimitation(pQueryInfo, pRes)) {
|
||||
if (pRes->row >= pRes->numOfRows) {
|
||||
hasData = false;
|
||||
|
||||
if (!pRes->completed) {
|
||||
numOfFetch++;
|
||||
}
|
||||
}
|
||||
|
||||
} else { // has reach the limitation, no data anymore
|
||||
if (pRes->row >= pRes->numOfRows) {
|
||||
hasData = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// }
|
||||
}
|
||||
|
||||
// has data remains in client side, and continue to return data to app
|
||||
if (hasData) {
|
||||
|
@ -1026,7 +1018,7 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) {
|
|||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
SSqlRes* pRes = &pSql->res;
|
||||
|
||||
tscDebug("%p all subquery response, retrieve data", pSql);
|
||||
tscDebug("%p all subquery response, retrieve data for subclause:%d", pSql, pCmd->clauseIndex);
|
||||
|
||||
// the column transfer support struct has been built
|
||||
if (pRes->pColumnIndex != NULL) {
|
||||
|
@ -1195,8 +1187,11 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
|
|||
|
||||
pNew->cmd.numOfCols = 0;
|
||||
pNewQueryInfo->intervalTime = 0;
|
||||
memset(&pNewQueryInfo->limit, 0, sizeof(SLimitVal));
|
||||
|
||||
pSupporter->limit = pNewQueryInfo->limit;
|
||||
|
||||
pNewQueryInfo->limit.limit = -1;
|
||||
pNewQueryInfo->limit.offset = 0;
|
||||
|
||||
// backup the data and clear it in the sqlcmd object
|
||||
pSupporter->groupbyExpr = pNewQueryInfo->groupbyExpr;
|
||||
memset(&pNewQueryInfo->groupbyExpr, 0, sizeof(SSqlGroupbyExpr));
|
||||
|
@ -1307,7 +1302,7 @@ int32_t tscHandleMasterJoinQuery(SSqlObj* pSql) {
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pSql->cmd.command = (pSql->numOfSubs <= 0)? TSDB_SQL_RETRIEVE_EMPTY_RESULT:TSDB_SQL_TABLE_JOIN_RETRIEVE;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -1605,8 +1600,8 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
|
|||
|
||||
// data in from current vnode is stored in cache and disk
|
||||
uint32_t numOfRowsFromSubquery = trsupport->pExtMemBuffer[idx]->numOfTotalElems + trsupport->localBuffer->num;
|
||||
tscDebug("%p sub:%p all data retrieved from ip:%s, vgId:%d, numOfRows:%d, orderOfSub:%d", pParentSql, pSql,
|
||||
pTableMetaInfo->vgroupList->vgroups[0].ipAddr[0].fqdn, pTableMetaInfo->vgroupList->vgroups[0].vgId,
|
||||
tscDebug("%p sub:%p all data retrieved from ep:%s, vgId:%d, numOfRows:%d, orderOfSub:%d", pParentSql, pSql,
|
||||
pTableMetaInfo->vgroupList->vgroups[0].epAddr[0].fqdn, pTableMetaInfo->vgroupList->vgroups[0].vgId,
|
||||
numOfRowsFromSubquery, idx);
|
||||
|
||||
tColModelCompact(pDesc->pColumnModel, trsupport->localBuffer, pDesc->pColumnModel->capacity);
|
||||
|
@ -1724,8 +1719,8 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
|
|||
assert(pRes->numOfRows == numOfRows);
|
||||
int64_t num = atomic_add_fetch_64(&pState->numOfRetrievedRows, numOfRows);
|
||||
|
||||
tscDebug("%p sub:%p retrieve numOfRows:%" PRId64 " totalNumOfRows:%" PRIu64 " from ip:%s, orderOfSub:%d", pParentSql, pSql,
|
||||
pRes->numOfRows, pState->numOfRetrievedRows, pSql->ipList.fqdn[pSql->ipList.inUse], idx);
|
||||
tscDebug("%p sub:%p retrieve numOfRows:%" PRId64 " totalNumOfRows:%" PRIu64 " from ep:%s, orderOfSub:%d", pParentSql, pSql,
|
||||
pRes->numOfRows, pState->numOfRetrievedRows, pSql->epSet.fqdn[pSql->epSet.inUse], idx);
|
||||
|
||||
if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0)) {
|
||||
tscError("%p sub:%p num of OrderedRes is too many, max allowed:%" PRId32 " , current:%" PRId64,
|
||||
|
@ -1833,8 +1828,8 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) {
|
|||
return;
|
||||
}
|
||||
|
||||
tscTrace("%p sub:%p query complete, ip:%s, vgId:%d, orderOfSub:%d, retrieve data", trsupport->pParentSql, pSql,
|
||||
pVgroup->ipAddr[0].fqdn, pVgroup->vgId, trsupport->subqueryIndex);
|
||||
tscTrace("%p sub:%p query complete, ep:%s, vgId:%d, orderOfSub:%d, retrieve data", trsupport->pParentSql, pSql,
|
||||
pVgroup->epAddr[0].fqdn, pVgroup->vgId, trsupport->subqueryIndex);
|
||||
|
||||
if (pSql->res.qhandle == 0) { // qhandle is NULL, code is TSDB_CODE_SUCCESS means no results generated from this vnode
|
||||
tscRetrieveFromDnodeCallBack(param, pSql, 0);
|
||||
|
@ -1982,88 +1977,119 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
|
|||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
static char* getResultBlockPosition(SSqlCmd* pCmd, SSqlRes* pRes, int32_t columnIndex, int16_t* bytes) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
|
||||
SFieldSupInfo* pInfo = (SFieldSupInfo*) TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.pSupportInfo, columnIndex);
|
||||
assert(pInfo->pSqlExpr != NULL);
|
||||
|
||||
*bytes = pInfo->pSqlExpr->resBytes;
|
||||
char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows;
|
||||
|
||||
return pData;
|
||||
}
|
||||
|
||||
static void doBuildResFromSubqueries(SSqlObj* pSql) {
|
||||
SSqlRes* pRes = &pSql->res;
|
||||
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
|
||||
|
||||
int32_t numOfRes = INT32_MAX;
|
||||
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
|
||||
if (pSql->pSubs[i] == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
numOfRes = MIN(numOfRes, pSql->pSubs[i]->res.numOfRows);
|
||||
}
|
||||
|
||||
int32_t totalSize = tscGetResRowLength(pQueryInfo->exprList);
|
||||
pRes->pRsp = realloc(pRes->pRsp, numOfRes * totalSize);
|
||||
pRes->data = pRes->pRsp;
|
||||
|
||||
char* data = pRes->data;
|
||||
int16_t bytes = 0;
|
||||
|
||||
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
|
||||
for(int32_t i = 0; i < numOfExprs; ++i) {
|
||||
SColumnIndex* pIndex = &pRes->pColumnIndex[i];
|
||||
SSqlRes *pRes1 = &pSql->pSubs[pIndex->tableIndex]->res;
|
||||
SSqlCmd *pCmd1 = &pSql->pSubs[pIndex->tableIndex]->cmd;
|
||||
|
||||
char* pData = getResultBlockPosition(pCmd1, pRes1, pIndex->columnIndex, &bytes);
|
||||
memcpy(data, pData, bytes * numOfRes);
|
||||
|
||||
data += bytes * numOfRes;
|
||||
pRes1->row = numOfRes;
|
||||
}
|
||||
|
||||
pRes->numOfRows = numOfRes;
|
||||
pRes->numOfClauseTotal += numOfRes;
|
||||
}
|
||||
|
||||
void tscBuildResFromSubqueries(SSqlObj *pSql) {
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
|
||||
SSqlRes* pRes = &pSql->res;
|
||||
|
||||
if (pRes->code != TSDB_CODE_SUCCESS) {
|
||||
tscQueueAsyncRes(pSql);
|
||||
return;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
|
||||
|
||||
if (pRes->tsrow == NULL) {
|
||||
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
|
||||
|
||||
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
|
||||
|
||||
if (pRes->tsrow == NULL) {
|
||||
pRes->tsrow = calloc(numOfExprs, POINTER_BYTES);
|
||||
pRes->length = calloc(numOfExprs, sizeof(int32_t));
|
||||
pRes->tsrow = calloc(numOfExprs, POINTER_BYTES);
|
||||
pRes->buffer = calloc(numOfExprs, POINTER_BYTES);
|
||||
pRes->length = calloc(numOfExprs, sizeof(int32_t));
|
||||
|
||||
tscRestoreSQLFuncForSTableQuery(pQueryInfo);
|
||||
}
|
||||
|
||||
while (1) {
|
||||
if (pRes->row < pRes->numOfRows) {
|
||||
assert(0);
|
||||
}
|
||||
|
||||
bool success = false;
|
||||
|
||||
int32_t numOfTableHasRes = 0;
|
||||
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
|
||||
if (pSql->pSubs[i] != NULL) {
|
||||
numOfTableHasRes++;
|
||||
}
|
||||
}
|
||||
|
||||
if (numOfTableHasRes >= 2) { // do merge result
|
||||
success = (doSetResultRowData(pSql->pSubs[0], false) != NULL) && (doSetResultRowData(pSql->pSubs[1], false) != NULL);
|
||||
} else { // only one subquery
|
||||
SSqlObj *pSub = pSql->pSubs[0];
|
||||
if (pSub == NULL) {
|
||||
pSub = pSql->pSubs[1];
|
||||
}
|
||||
|
||||
success = (doSetResultRowData(pSub, false) != NULL);
|
||||
}
|
||||
|
||||
if (success) { // current row of final output has been built, return to app
|
||||
for (int32_t i = 0; i < numOfExprs; ++i) {
|
||||
SColumnIndex* pIndex = &pRes->pColumnIndex[i];
|
||||
SSqlRes *pRes1 = &pSql->pSubs[pIndex->tableIndex]->res;
|
||||
pRes->tsrow[i] = pRes1->tsrow[pIndex->columnIndex];
|
||||
pRes->length[i] = pRes1->length[pIndex->columnIndex];
|
||||
}
|
||||
|
||||
pRes->numOfClauseTotal++;
|
||||
break;
|
||||
} else { // continue retrieve data from vnode
|
||||
if (!tscHasRemainDataInSubqueryResultSet(pSql)) {
|
||||
tscDebug("%p at least one subquery exhausted, free all other %d subqueries", pSql, pSql->numOfSubs - 1);
|
||||
SSubqueryState *pState = NULL;
|
||||
|
||||
// free all sub sqlobj
|
||||
for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
|
||||
SSqlObj *pChildObj = pSql->pSubs[i];
|
||||
if (pChildObj == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
SJoinSupporter *pSupporter = (SJoinSupporter *)pChildObj->param;
|
||||
pState = pSupporter->pState;
|
||||
|
||||
tscDestroyJoinSupporter(pChildObj->param);
|
||||
taos_free_result(pChildObj);
|
||||
}
|
||||
|
||||
free(pState);
|
||||
|
||||
pRes->completed = true; // set query completed
|
||||
sem_post(&pSql->rspSem);
|
||||
return;
|
||||
}
|
||||
|
||||
tscFetchDatablockFromSubquery(pSql);
|
||||
if (pRes->code != TSDB_CODE_SUCCESS) {
|
||||
return;
|
||||
}
|
||||
|
||||
doBuildResFromSubqueries(pSql);
|
||||
sem_post(&pSql->rspSem);
|
||||
|
||||
return;
|
||||
|
||||
// continue retrieve data from vnode
|
||||
// if (!tscHasRemainDataInSubqueryResultSet(pSql)) {
|
||||
// tscDebug("%p at least one subquery exhausted, free all other %d subqueries", pSql, pSql->numOfSubs - 1);
|
||||
// SSubqueryState* pState = NULL;
|
||||
//
|
||||
// // free all sub sqlobj
|
||||
// for (int32_t i = 0; i < pSql->numOfSubs; ++i) {
|
||||
// SSqlObj* pChildObj = pSql->pSubs[i];
|
||||
// if (pChildObj == NULL) {
|
||||
// continue;
|
||||
// }
|
||||
//
|
||||
// SJoinSupporter* pSupporter = (SJoinSupporter*)pChildObj->param;
|
||||
// pState = pSupporter->pState;
|
||||
//
|
||||
// tscDestroyJoinSupporter(pChildObj->param);
|
||||
// taos_free_result(pChildObj);
|
||||
// }
|
||||
//
|
||||
// free(pState);
|
||||
//
|
||||
// pRes->completed = true; // set query completed
|
||||
// sem_post(&pSql->rspSem);
|
||||
// return;
|
||||
// }
|
||||
|
||||
tscFetchDatablockFromSubquery(pSql);
|
||||
if (pRes->code != TSDB_CODE_SUCCESS) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (pSql->res.code == TSDB_CODE_SUCCESS) {
|
||||
(*pSql->fp)(pSql->param, pSql, 0);
|
||||
(*pSql->fp)(pSql->param, pSql, pRes->numOfRows);
|
||||
} else {
|
||||
tscQueueAsyncRes(pSql);
|
||||
}
|
||||
|
@ -2117,14 +2143,6 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
|
|||
|
||||
assert(pRes->row >= 0 && pRes->row <= pRes->numOfRows);
|
||||
|
||||
if(pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
|
||||
if (pRes->completed) {
|
||||
tfree(pRes->tsrow);
|
||||
}
|
||||
|
||||
return pRes->tsrow;
|
||||
}
|
||||
|
||||
if (pRes->row >= pRes->numOfRows) { // all the results has returned to invoker
|
||||
tfree(pRes->tsrow);
|
||||
return pRes->tsrow;
|
||||
|
@ -2182,7 +2200,7 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
|
|||
return pRes->tsrow;
|
||||
}
|
||||
|
||||
static bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) {
|
||||
static UNUSED_FUNC bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) {
|
||||
bool hasData = true;
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
|
||||
|
|
|
@ -41,8 +41,7 @@ int tscNumOfThreads;
|
|||
|
||||
static pthread_once_t tscinit = PTHREAD_ONCE_INIT;
|
||||
void taosInitNote(int numOfNoteLines, int maxNotes, char* lable);
|
||||
//void tscUpdateIpSet(void *ahandle, SRpcIpSet *pIpSet);
|
||||
|
||||
//void tscUpdateEpSet(void *ahandle, SRpcEpSet *pEpSet);
|
||||
|
||||
void tscCheckDiskUsage(void *UNUSED_PARAM(para), void* UNUSED_PARAM(param)) {
|
||||
taosGetDisk();
|
||||
|
@ -117,8 +116,8 @@ void taos_init_imp() {
|
|||
taosInitNote(tsNumOfLogLines / 10, 1, (char*)"tsc_note");
|
||||
}
|
||||
|
||||
if (tscSetMgmtIpListFromCfg(tsFirst, tsSecond) < 0) {
|
||||
tscError("failed to init mnode IP list");
|
||||
if (tscSetMgmtEpSetFromCfg(tsFirst, tsSecond) < 0) {
|
||||
tscError("failed to init mnode EP list");
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -1994,6 +1994,10 @@ bool hasMoreVnodesToTry(SSqlObj* pSql) {
|
|||
(!tscHasReachLimitation(pQueryInfo, pRes)) && (pTableMetaInfo->vgroupIndex < numOfVgroups - 1);
|
||||
}
|
||||
|
||||
bool hasMoreClauseToTry(SSqlObj* pSql) {
|
||||
return pSql->cmd.clauseIndex < pSql->cmd.numOfClause - 1;
|
||||
}
|
||||
|
||||
void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) {
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
SSqlRes* pRes = &pSql->res;
|
||||
|
@ -2050,7 +2054,7 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) {
|
|||
}
|
||||
}
|
||||
|
||||
void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)()) {
|
||||
void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) {
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
SSqlRes* pRes = &pSql->res;
|
||||
|
||||
|
@ -2070,17 +2074,13 @@ void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)()) {
|
|||
|
||||
tfree(pSql->pSubs);
|
||||
pSql->numOfSubs = 0;
|
||||
|
||||
if (pSql->fp != NULL) {
|
||||
pSql->fp = queryFp;
|
||||
assert(queryFp != NULL);
|
||||
}
|
||||
pSql->fp = fp;
|
||||
|
||||
tscDebug("%p try data in the next subclause:%d, total subclause:%d", pSql, pCmd->clauseIndex, pCmd->numOfClause);
|
||||
if (pCmd->command > TSDB_SQL_LOCAL) {
|
||||
tscProcessLocalCmd(pSql);
|
||||
} else {
|
||||
tscProcessSql(pSql);
|
||||
tscDoQuery(pSql);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2145,20 +2145,20 @@ char* strdup_throw(const char* str) {
|
|||
return p;
|
||||
}
|
||||
|
||||
int tscSetMgmtIpListFromCfg(const char *first, const char *second) {
|
||||
int tscSetMgmtEpListFromCfg(const char *first, const char *second) {
|
||||
// init mgmt ip set
|
||||
tscMgmtIpSet.version = 0;
|
||||
SRpcIpSet *mgmtIpSet = &(tscMgmtIpSet.ipSet);
|
||||
mgmtIpSet->numOfIps = 0;
|
||||
mgmtIpSet->inUse = 0;
|
||||
tscMgmtEpSet.version = 0;
|
||||
SRpcEpSet *mgmtEpSet = &(tscMgmtEpSet.epSet);
|
||||
mgmtEpSet->numOfEps = 0;
|
||||
mgmtEpSet->inUse = 0;
|
||||
|
||||
if (first && first[0] != 0) {
|
||||
if (strlen(first) >= TSDB_EP_LEN) {
|
||||
terrno = TSDB_CODE_TSC_INVALID_FQDN;
|
||||
return -1;
|
||||
}
|
||||
taosGetFqdnPortFromEp(first, mgmtIpSet->fqdn[mgmtIpSet->numOfIps], &(mgmtIpSet->port[mgmtIpSet->numOfIps]));
|
||||
mgmtIpSet->numOfIps++;
|
||||
taosGetFqdnPortFromEp(first, mgmtEpSet->fqdn[mgmtEpSet->numOfEps], &(mgmtEpSet->port[mgmtEpSet->numOfEps]));
|
||||
mgmtEpSet->numOfEps++;
|
||||
}
|
||||
|
||||
if (second && second[0] != 0) {
|
||||
|
@ -2166,11 +2166,11 @@ int tscSetMgmtIpListFromCfg(const char *first, const char *second) {
|
|||
terrno = TSDB_CODE_TSC_INVALID_FQDN;
|
||||
return -1;
|
||||
}
|
||||
taosGetFqdnPortFromEp(second, mgmtIpSet->fqdn[mgmtIpSet->numOfIps], &(mgmtIpSet->port[mgmtIpSet->numOfIps]));
|
||||
mgmtIpSet->numOfIps++;
|
||||
taosGetFqdnPortFromEp(second, mgmtEpSet->fqdn[mgmtEpSet->numOfEps], &(mgmtEpSet->port[mgmtEpSet->numOfEps]));
|
||||
mgmtEpSet->numOfEps++;
|
||||
}
|
||||
|
||||
if (mgmtIpSet->numOfIps == 0) {
|
||||
if (mgmtEpSet->numOfEps == 0) {
|
||||
terrno = TSDB_CODE_TSC_INVALID_FQDN;
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -30,8 +30,6 @@ extern uint16_t tsDnodeShellPort;
|
|||
extern uint16_t tsDnodeDnodePort;
|
||||
extern uint16_t tsSyncPort;
|
||||
extern int32_t tsStatusInterval;
|
||||
extern int16_t tsNumOfVnodesPerCore;
|
||||
extern int16_t tsNumOfTotalVnodes;
|
||||
extern int32_t tsNumOfMnodes;
|
||||
extern int32_t tsEnableVnodeBak;
|
||||
|
||||
|
|
|
@ -38,12 +38,9 @@ uint16_t tsDnodeShellPort = 6030; // udp[6035-6039] tcp[6035]
|
|||
uint16_t tsDnodeDnodePort = 6035; // udp/tcp
|
||||
uint16_t tsSyncPort = 6040;
|
||||
int32_t tsStatusInterval = 1; // second
|
||||
int16_t tsNumOfVnodesPerCore = 32;
|
||||
int16_t tsNumOfTotalVnodes = TSDB_INVALID_VNODE_NUM;
|
||||
int32_t tsNumOfMnodes = 3;
|
||||
int32_t tsEnableVnodeBak = 1;
|
||||
|
||||
|
||||
// common
|
||||
int32_t tsRpcTimer = 1000;
|
||||
int32_t tsRpcMaxTime = 600; // seconds;
|
||||
|
@ -199,6 +196,9 @@ int32_t sDebugFlag = 135;
|
|||
int32_t wDebugFlag = 135;
|
||||
int32_t tsdbDebugFlag = 131;
|
||||
|
||||
int32_t (*monitorStartSystemFp)() = NULL;
|
||||
void (*monitorStopSystemFp)() = NULL;
|
||||
|
||||
static pthread_once_t tsInitGlobalCfgOnce = PTHREAD_ONCE_INIT;
|
||||
|
||||
void taosSetAllDebugFlag() {
|
||||
|
@ -248,11 +248,17 @@ bool taosCfgDynamicOptions(char *msg) {
|
|||
*((int32_t *)cfg->ptr) = vint;
|
||||
|
||||
if (strncasecmp(cfg->option, "monitor", olen) == 0) {
|
||||
// if (0 == vint) {
|
||||
// monitorStartSystem();
|
||||
// } else {
|
||||
// monitorStopSystem();
|
||||
// }
|
||||
if (1 == vint) {
|
||||
if (monitorStartSystemFp) {
|
||||
(*monitorStartSystemFp)();
|
||||
uInfo("monitor is enabled");
|
||||
}
|
||||
} else {
|
||||
if (monitorStopSystemFp) {
|
||||
(*monitorStopSystemFp)();
|
||||
uInfo("monitor is disabled");
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -393,16 +399,6 @@ static void doInitGlobalConfig() {
|
|||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "numOfTotalVnodes";
|
||||
cfg.ptr = &tsNumOfTotalVnodes;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT16;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
|
||||
cfg.minValue = 0;
|
||||
cfg.maxValue = TSDB_MAX_VNODES;
|
||||
cfg.ptrLength = 0;
|
||||
cfg.unitType = TAOS_CFG_UTYPE_NONE;
|
||||
taosInitConfigOption(cfg);
|
||||
|
||||
cfg.option = "numOfMnodes";
|
||||
cfg.ptr = &tsNumOfMnodes;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
|
@ -444,7 +440,7 @@ static void doInitGlobalConfig() {
|
|||
taosInitConfigOption(cfg);
|
||||
|
||||
// 0-any; 1-mnode; 2-vnode
|
||||
cfg.option = "alternativeRole";
|
||||
cfg.option = "role";
|
||||
cfg.ptr = &tsAlternativeRole;
|
||||
cfg.valType = TAOS_CFG_VTYPE_INT32;
|
||||
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
|
||||
|
@ -1271,12 +1267,6 @@ bool taosCheckGlobalCfg() {
|
|||
tsNumOfCores = 1;
|
||||
}
|
||||
|
||||
if (tsNumOfTotalVnodes == TSDB_INVALID_VNODE_NUM) {
|
||||
tsNumOfTotalVnodes = tsNumOfCores * tsNumOfVnodesPerCore;
|
||||
tsNumOfTotalVnodes = tsNumOfTotalVnodes > TSDB_MAX_VNODES ? TSDB_MAX_VNODES : tsNumOfTotalVnodes;
|
||||
tsNumOfTotalVnodes = tsNumOfTotalVnodes < TSDB_MIN_VNODES ? TSDB_MIN_VNODES : tsNumOfTotalVnodes;
|
||||
}
|
||||
|
||||
// todo refactor
|
||||
tsVersion = 0;
|
||||
for (int i = 0; i < 10; i++) {
|
||||
|
|
|
@ -208,7 +208,6 @@ public class TDNode {
|
|||
setCfgConfig("mnodeEqualVnodeNum", "0");
|
||||
setCfgConfig("walLevel", "1");
|
||||
setCfgConfig("statusInterval", "1");
|
||||
setCfgConfig("numOfTotalVnodes", "64");
|
||||
setCfgConfig("numOfMnodes", "3");
|
||||
setCfgConfig("numOfThreadsPerCore", "2.0");
|
||||
setCfgConfig("monitor", "0");
|
||||
|
|
|
@ -103,9 +103,6 @@ void cqClose(void *handle) {
|
|||
SCqContext *pContext = handle;
|
||||
if (handle == NULL) return;
|
||||
|
||||
taosTmrCleanUp(pContext->tmrCtrl);
|
||||
pContext->tmrCtrl = NULL;
|
||||
|
||||
// stop all CQs
|
||||
cqStop(pContext);
|
||||
|
||||
|
@ -125,6 +122,9 @@ void cqClose(void *handle) {
|
|||
|
||||
pthread_mutex_destroy(&pContext->mutex);
|
||||
|
||||
taosTmrCleanUp(pContext->tmrCtrl);
|
||||
pContext->tmrCtrl = NULL;
|
||||
|
||||
cTrace("vgId:%d, CQ is closed", pContext->vgId);
|
||||
free(pContext);
|
||||
}
|
||||
|
|
|
@ -35,8 +35,8 @@ void* dnodeGetVnodeTsdb(void *pVnode);
|
|||
void dnodeReleaseVnode(void *pVnode);
|
||||
|
||||
void dnodeSendRedirectMsg(SRpcMsg *rpcMsg, bool forShell);
|
||||
void dnodeGetMnodeIpSetForPeer(void *ipSet);
|
||||
void dnodeGetMnodeIpSetForShell(void *ipSet);
|
||||
void dnodeGetMnodeEpSetForPeer(void *epSet);
|
||||
void dnodeGetMnodeEpSetForShell(void *epSet);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ void * tsDnodeTmr = NULL;
|
|||
static void * tsStatusTimer = NULL;
|
||||
static uint32_t tsRebootTime;
|
||||
|
||||
static SRpcIpSet tsDMnodeIpSet = {0};
|
||||
static SRpcEpSet tsDMnodeEpSet = {0};
|
||||
static SDMMnodeInfos tsDMnodeInfos = {0};
|
||||
static SDMDnodeCfg tsDnodeCfg = {0};
|
||||
static taos_qset tsMgmtQset = NULL;
|
||||
|
@ -90,21 +90,21 @@ int32_t dnodeInitMgmt() {
|
|||
tsRebootTime = taosGetTimestampSec();
|
||||
|
||||
if (!dnodeReadMnodeInfos()) {
|
||||
memset(&tsDMnodeIpSet, 0, sizeof(SRpcIpSet));
|
||||
memset(&tsDMnodeEpSet, 0, sizeof(SRpcEpSet));
|
||||
memset(&tsDMnodeInfos, 0, sizeof(SDMMnodeInfos));
|
||||
|
||||
tsDMnodeIpSet.numOfIps = 1;
|
||||
taosGetFqdnPortFromEp(tsFirst, tsDMnodeIpSet.fqdn[0], &tsDMnodeIpSet.port[0]);
|
||||
tsDMnodeEpSet.numOfEps = 1;
|
||||
taosGetFqdnPortFromEp(tsFirst, tsDMnodeEpSet.fqdn[0], &tsDMnodeEpSet.port[0]);
|
||||
|
||||
if (strcmp(tsSecond, tsFirst) != 0) {
|
||||
tsDMnodeIpSet.numOfIps = 2;
|
||||
taosGetFqdnPortFromEp(tsSecond, tsDMnodeIpSet.fqdn[1], &tsDMnodeIpSet.port[1]);
|
||||
tsDMnodeEpSet.numOfEps = 2;
|
||||
taosGetFqdnPortFromEp(tsSecond, tsDMnodeEpSet.fqdn[1], &tsDMnodeEpSet.port[1]);
|
||||
}
|
||||
} else {
|
||||
tsDMnodeIpSet.inUse = tsDMnodeInfos.inUse;
|
||||
tsDMnodeIpSet.numOfIps = tsDMnodeInfos.nodeNum;
|
||||
tsDMnodeEpSet.inUse = tsDMnodeInfos.inUse;
|
||||
tsDMnodeEpSet.numOfEps = tsDMnodeInfos.nodeNum;
|
||||
for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) {
|
||||
taosGetFqdnPortFromEp(tsDMnodeInfos.nodeInfos[i].nodeEp, tsDMnodeIpSet.fqdn[i], &tsDMnodeIpSet.port[i]);
|
||||
taosGetFqdnPortFromEp(tsDMnodeInfos.nodeInfos[i].nodeEp, tsDMnodeEpSet.fqdn[i], &tsDMnodeEpSet.port[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -450,27 +450,27 @@ static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg) {
|
|||
return taosCfgDynamicOptions(pCfg->config);
|
||||
}
|
||||
|
||||
void dnodeUpdateMnodeIpSetForPeer(SRpcIpSet *pIpSet) {
|
||||
dInfo("mnode IP list for is changed, numOfIps:%d inUse:%d", pIpSet->numOfIps, pIpSet->inUse);
|
||||
for (int i = 0; i < pIpSet->numOfIps; ++i) {
|
||||
pIpSet->port[i] -= TSDB_PORT_DNODEDNODE;
|
||||
dInfo("mnode index:%d %s:%u", i, pIpSet->fqdn[i], pIpSet->port[i])
|
||||
void dnodeUpdateMnodeEpSetForPeer(SRpcEpSet *pEpSet) {
|
||||
dInfo("mnode EP list for is changed, numOfEps:%d inUse:%d", pEpSet->numOfEps, pEpSet->inUse);
|
||||
for (int i = 0; i < pEpSet->numOfEps; ++i) {
|
||||
pEpSet->port[i] -= TSDB_PORT_DNODEDNODE;
|
||||
dInfo("mnode index:%d %s:%u", i, pEpSet->fqdn[i], pEpSet->port[i])
|
||||
}
|
||||
|
||||
tsDMnodeIpSet = *pIpSet;
|
||||
tsDMnodeEpSet = *pEpSet;
|
||||
}
|
||||
|
||||
void dnodeGetMnodeIpSetForPeer(void *ipSetRaw) {
|
||||
SRpcIpSet *ipSet = ipSetRaw;
|
||||
*ipSet = tsDMnodeIpSet;
|
||||
void dnodeGetMnodeEpSetForPeer(void *epSetRaw) {
|
||||
SRpcEpSet *epSet = epSetRaw;
|
||||
*epSet = tsDMnodeEpSet;
|
||||
|
||||
for (int i=0; i<ipSet->numOfIps; ++i)
|
||||
ipSet->port[i] += TSDB_PORT_DNODEDNODE;
|
||||
for (int i=0; i<epSet->numOfEps; ++i)
|
||||
epSet->port[i] += TSDB_PORT_DNODEDNODE;
|
||||
}
|
||||
|
||||
void dnodeGetMnodeIpSetForShell(void *ipSetRaw) {
|
||||
SRpcIpSet *ipSet = ipSetRaw;
|
||||
*ipSet = tsDMnodeIpSet;
|
||||
void dnodeGetMnodeEpSetForShell(void *epSetRaw) {
|
||||
SRpcEpSet *epSet = epSetRaw;
|
||||
*epSet = tsDMnodeEpSet;
|
||||
}
|
||||
|
||||
static void dnodeProcessStatusRsp(SRpcMsg *pMsg) {
|
||||
|
@ -536,10 +536,10 @@ static void dnodeUpdateMnodeInfos(SDMMnodeInfos *pMnodes) {
|
|||
dInfo("mnode index:%d, %s", tsDMnodeInfos.nodeInfos[i].nodeId, tsDMnodeInfos.nodeInfos[i].nodeEp);
|
||||
}
|
||||
|
||||
tsDMnodeIpSet.inUse = tsDMnodeInfos.inUse;
|
||||
tsDMnodeIpSet.numOfIps = tsDMnodeInfos.nodeNum;
|
||||
tsDMnodeEpSet.inUse = tsDMnodeInfos.inUse;
|
||||
tsDMnodeEpSet.numOfEps = tsDMnodeInfos.nodeNum;
|
||||
for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) {
|
||||
taosGetFqdnPortFromEp(tsDMnodeInfos.nodeInfos[i].nodeEp, tsDMnodeIpSet.fqdn[i], &tsDMnodeIpSet.port[i]);
|
||||
taosGetFqdnPortFromEp(tsDMnodeInfos.nodeInfos[i].nodeEp, tsDMnodeEpSet.fqdn[i], &tsDMnodeEpSet.port[i]);
|
||||
}
|
||||
|
||||
dnodeSaveMnodeInfos();
|
||||
|
@ -549,10 +549,10 @@ static void dnodeUpdateMnodeInfos(SDMMnodeInfos *pMnodes) {
|
|||
static bool dnodeReadMnodeInfos() {
|
||||
char ipFile[TSDB_FILENAME_LEN*2] = {0};
|
||||
|
||||
sprintf(ipFile, "%s/mnodeIpList.json", tsDnodeDir);
|
||||
sprintf(ipFile, "%s/mnodeEpSet.json", tsDnodeDir);
|
||||
FILE *fp = fopen(ipFile, "r");
|
||||
if (!fp) {
|
||||
dDebug("failed to read mnodeIpList.json, file not exist");
|
||||
dDebug("failed to read mnodeEpSet.json, file not exist");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -563,40 +563,40 @@ static bool dnodeReadMnodeInfos() {
|
|||
if (len <= 0) {
|
||||
free(content);
|
||||
fclose(fp);
|
||||
dError("failed to read mnodeIpList.json, content is null");
|
||||
dError("failed to read mnodeEpSet.json, content is null");
|
||||
return false;
|
||||
}
|
||||
|
||||
content[len] = 0;
|
||||
cJSON* root = cJSON_Parse(content);
|
||||
if (root == NULL) {
|
||||
dError("failed to read mnodeIpList.json, invalid json format");
|
||||
dError("failed to read mnodeEpSet.json, invalid json format");
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
cJSON* inUse = cJSON_GetObjectItem(root, "inUse");
|
||||
if (!inUse || inUse->type != cJSON_Number) {
|
||||
dError("failed to read mnodeIpList.json, inUse not found");
|
||||
dError("failed to read mnodeEpSet.json, inUse not found");
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
tsDMnodeInfos.inUse = inUse->valueint;
|
||||
|
||||
cJSON* nodeNum = cJSON_GetObjectItem(root, "nodeNum");
|
||||
if (!nodeNum || nodeNum->type != cJSON_Number) {
|
||||
dError("failed to read mnodeIpList.json, nodeNum not found");
|
||||
dError("failed to read mnodeEpSet.json, nodeNum not found");
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
tsDMnodeInfos.nodeNum = nodeNum->valueint;
|
||||
|
||||
cJSON* nodeInfos = cJSON_GetObjectItem(root, "nodeInfos");
|
||||
if (!nodeInfos || nodeInfos->type != cJSON_Array) {
|
||||
dError("failed to read mnodeIpList.json, nodeInfos not found");
|
||||
dError("failed to read mnodeEpSet.json, nodeInfos not found");
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
int size = cJSON_GetArraySize(nodeInfos);
|
||||
if (size != tsDMnodeInfos.nodeNum) {
|
||||
dError("failed to read mnodeIpList.json, nodeInfos size not matched");
|
||||
dError("failed to read mnodeEpSet.json, nodeInfos size not matched");
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
|
@ -606,14 +606,14 @@ static bool dnodeReadMnodeInfos() {
|
|||
|
||||
cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId");
|
||||
if (!nodeId || nodeId->type != cJSON_Number) {
|
||||
dError("failed to read mnodeIpList.json, nodeId not found");
|
||||
dError("failed to read mnodeEpSet.json, nodeId not found");
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
tsDMnodeInfos.nodeInfos[i].nodeId = nodeId->valueint;
|
||||
|
||||
cJSON *nodeEp = cJSON_GetObjectItem(nodeInfo, "nodeEp");
|
||||
if (!nodeEp || nodeEp->type != cJSON_String || nodeEp->valuestring == NULL) {
|
||||
dError("failed to read mnodeIpList.json, nodeName not found");
|
||||
dError("failed to read mnodeEpSet.json, nodeName not found");
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
strncpy(tsDMnodeInfos.nodeInfos[i].nodeEp, nodeEp->valuestring, TSDB_EP_LEN);
|
||||
|
@ -621,7 +621,7 @@ static bool dnodeReadMnodeInfos() {
|
|||
|
||||
ret = true;
|
||||
|
||||
dInfo("read mnode iplist successed, numOfIps:%d inUse:%d", tsDMnodeInfos.nodeNum, tsDMnodeInfos.inUse);
|
||||
dInfo("read mnode epSet successed, numOfEps:%d inUse:%d", tsDMnodeInfos.nodeNum, tsDMnodeInfos.inUse);
|
||||
for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) {
|
||||
dInfo("mnode:%d, %s", tsDMnodeInfos.nodeInfos[i].nodeId, tsDMnodeInfos.nodeInfos[i].nodeEp);
|
||||
}
|
||||
|
@ -635,7 +635,7 @@ PARSE_OVER:
|
|||
|
||||
static void dnodeSaveMnodeInfos() {
|
||||
char ipFile[TSDB_FILENAME_LEN] = {0};
|
||||
sprintf(ipFile, "%s/mnodeIpList.json", tsDnodeDir);
|
||||
sprintf(ipFile, "%s/mnodeEpSet.json", tsDnodeDir);
|
||||
FILE *fp = fopen(ipFile, "w");
|
||||
if (!fp) return;
|
||||
|
||||
|
@ -663,11 +663,11 @@ static void dnodeSaveMnodeInfos() {
|
|||
fclose(fp);
|
||||
free(content);
|
||||
|
||||
dInfo("save mnode iplist successed");
|
||||
dInfo("save mnode epSet successed");
|
||||
}
|
||||
|
||||
char *dnodeGetMnodeMasterEp() {
|
||||
return tsDMnodeInfos.nodeInfos[tsDMnodeIpSet.inUse].nodeEp;
|
||||
return tsDMnodeInfos.nodeInfos[tsDMnodeEpSet.inUse].nodeEp;
|
||||
}
|
||||
|
||||
void* dnodeGetMnodeInfos() {
|
||||
|
@ -699,7 +699,6 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) {
|
|||
pStatus->dnodeId = htonl(tsDnodeCfg.dnodeId);
|
||||
strcpy(pStatus->dnodeEp, tsLocalEp);
|
||||
pStatus->lastReboot = htonl(tsRebootTime);
|
||||
pStatus->numOfTotalVnodes = htons((uint16_t) tsNumOfTotalVnodes);
|
||||
pStatus->numOfCores = htons((uint16_t) tsNumOfCores);
|
||||
pStatus->diskAvailable = tsAvailDataDirGB;
|
||||
pStatus->alternativeRole = (uint8_t) tsAlternativeRole;
|
||||
|
@ -727,9 +726,9 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) {
|
|||
.msgType = TSDB_MSG_TYPE_DM_STATUS
|
||||
};
|
||||
|
||||
SRpcIpSet ipSet;
|
||||
dnodeGetMnodeIpSetForPeer(&ipSet);
|
||||
dnodeSendMsgToDnode(&ipSet, &rpcMsg);
|
||||
SRpcEpSet epSet;
|
||||
dnodeGetMnodeEpSetForPeer(&epSet);
|
||||
dnodeSendMsgToDnode(&epSet, &rpcMsg);
|
||||
}
|
||||
|
||||
static bool dnodeReadDnodeCfg() {
|
||||
|
@ -818,20 +817,20 @@ void dnodeSendRedirectMsg(SRpcMsg *rpcMsg, bool forShell) {
|
|||
SRpcConnInfo connInfo = {0};
|
||||
rpcGetConnInfo(rpcMsg->handle, &connInfo);
|
||||
|
||||
SRpcIpSet ipSet = {0};
|
||||
SRpcEpSet epSet = {0};
|
||||
if (forShell) {
|
||||
dnodeGetMnodeIpSetForShell(&ipSet);
|
||||
dnodeGetMnodeEpSetForShell(&epSet);
|
||||
} else {
|
||||
dnodeGetMnodeIpSetForPeer(&ipSet);
|
||||
dnodeGetMnodeEpSetForPeer(&epSet);
|
||||
}
|
||||
|
||||
dDebug("msg:%s will be redirected, dnodeIp:%s user:%s, numOfIps:%d inUse:%d", taosMsg[rpcMsg->msgType],
|
||||
taosIpStr(connInfo.clientIp), connInfo.user, ipSet.numOfIps, ipSet.inUse);
|
||||
dDebug("msg:%s will be redirected, dnodeIp:%s user:%s, numOfEps:%d inUse:%d", taosMsg[rpcMsg->msgType],
|
||||
taosIpStr(connInfo.clientIp), connInfo.user, epSet.numOfEps, epSet.inUse);
|
||||
|
||||
for (int i = 0; i < ipSet.numOfIps; ++i) {
|
||||
dDebug("mnode index:%d %s:%d", i, ipSet.fqdn[i], ipSet.port[i]);
|
||||
ipSet.port[i] = htons(ipSet.port[i]);
|
||||
for (int i = 0; i < epSet.numOfEps; ++i) {
|
||||
dDebug("mnode index:%d %s:%d", i, epSet.fqdn[i], epSet.port[i]);
|
||||
epSet.port[i] = htons(epSet.port[i]);
|
||||
}
|
||||
|
||||
rpcSendRedirectRsp(rpcMsg->handle, &ipSet);
|
||||
rpcSendRedirectRsp(rpcMsg->handle, &epSet);
|
||||
}
|
||||
|
|
|
@ -29,11 +29,11 @@
|
|||
#include "dnodeVWrite.h"
|
||||
#include "dnodeMPeer.h"
|
||||
|
||||
extern void dnodeUpdateMnodeIpSetForPeer(SRpcIpSet *pIpSet);
|
||||
extern void dnodeUpdateMnodeEpSetForPeer(SRpcEpSet *pEpSet);
|
||||
static void (*dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *);
|
||||
static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg, SRpcIpSet *);
|
||||
static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg, SRpcEpSet *);
|
||||
static void (*dnodeProcessRspMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *rpcMsg);
|
||||
static void dnodeProcessRspFromDnode(SRpcMsg *pMsg, SRpcIpSet *pIpSet);
|
||||
static void dnodeProcessRspFromDnode(SRpcMsg *pMsg, SRpcEpSet *pEpSet);
|
||||
static void *tsDnodeServerRpc = NULL;
|
||||
static void *tsDnodeClientRpc = NULL;
|
||||
|
||||
|
@ -83,7 +83,7 @@ void dnodeCleanupServer() {
|
|||
}
|
||||
}
|
||||
|
||||
static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg, SRpcIpSet *pIpSet) {
|
||||
static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
|
||||
SRpcMsg rspMsg = {
|
||||
.handle = pMsg->handle,
|
||||
.pCont = NULL,
|
||||
|
@ -148,9 +148,9 @@ void dnodeCleanupClient() {
|
|||
}
|
||||
}
|
||||
|
||||
static void dnodeProcessRspFromDnode(SRpcMsg *pMsg, SRpcIpSet *pIpSet) {
|
||||
if (pMsg->msgType == TSDB_MSG_TYPE_DM_STATUS_RSP && pIpSet) {
|
||||
dnodeUpdateMnodeIpSetForPeer(pIpSet);
|
||||
static void dnodeProcessRspFromDnode(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
|
||||
if (pMsg->msgType == TSDB_MSG_TYPE_DM_STATUS_RSP && pEpSet) {
|
||||
dnodeUpdateMnodeEpSetForPeer(pEpSet);
|
||||
}
|
||||
|
||||
if (dnodeProcessRspMsgFp[pMsg->msgType]) {
|
||||
|
@ -166,12 +166,12 @@ void dnodeAddClientRspHandle(uint8_t msgType, void (*fp)(SRpcMsg *rpcMsg)) {
|
|||
dnodeProcessRspMsgFp[msgType] = fp;
|
||||
}
|
||||
|
||||
void dnodeSendMsgToDnode(SRpcIpSet *ipSet, SRpcMsg *rpcMsg) {
|
||||
rpcSendRequest(tsDnodeClientRpc, ipSet, rpcMsg);
|
||||
void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg) {
|
||||
rpcSendRequest(tsDnodeClientRpc, epSet, rpcMsg);
|
||||
}
|
||||
|
||||
void dnodeSendMsgToDnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp) {
|
||||
SRpcIpSet ipSet = {0};
|
||||
dnodeGetMnodeIpSetForPeer(&ipSet);
|
||||
rpcSendRecv(tsDnodeClientRpc, &ipSet, rpcMsg, rpcRsp);
|
||||
SRpcEpSet epSet = {0};
|
||||
dnodeGetMnodeEpSetForPeer(&epSet);
|
||||
rpcSendRecv(tsDnodeClientRpc, &epSet, rpcMsg, rpcRsp);
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#include "dnodeShell.h"
|
||||
|
||||
static void (*dnodeProcessShellMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *);
|
||||
static void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcIpSet *);
|
||||
static void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcEpSet *);
|
||||
static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char *secret, char *ckey);
|
||||
static void * tsDnodeShellRpc = NULL;
|
||||
static int32_t tsDnodeQueryReqNum = 0;
|
||||
|
@ -108,7 +108,7 @@ void dnodeCleanupShell() {
|
|||
}
|
||||
}
|
||||
|
||||
void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcIpSet *pIpSet) {
|
||||
void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
|
||||
SRpcMsg rpcMsg = {
|
||||
.handle = pMsg->handle,
|
||||
.pCont = NULL,
|
||||
|
|
|
@ -39,13 +39,13 @@ SDnodeStatisInfo dnodeGetStatisInfo();
|
|||
|
||||
bool dnodeIsFirstDeploy();
|
||||
char * dnodeGetMnodeMasterEp();
|
||||
void dnodeGetMnodeIpSetForPeer(void *ipSet);
|
||||
void dnodeGetMnodeIpSetForShell(void *ipSet);
|
||||
void dnodeGetMnodeEpSetForPeer(void *epSet);
|
||||
void dnodeGetMnodeEpSetForShell(void *epSet);
|
||||
void * dnodeGetMnodeInfos();
|
||||
int32_t dnodeGetDnodeId();
|
||||
|
||||
void dnodeAddClientRspHandle(uint8_t msgType, void (*fp)(SRpcMsg *rpcMsg));
|
||||
void dnodeSendMsgToDnode(SRpcIpSet *ipSet, SRpcMsg *rpcMsg);
|
||||
void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg);
|
||||
void dnodeSendMsgToDnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp);
|
||||
void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t sid);
|
||||
|
||||
|
|
|
@ -274,9 +274,8 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
|
|||
#define TSDB_DEFAULT_PAYLOAD_SIZE 5120 // default payload size, greater than PATH_MAX value
|
||||
#define TSDB_EXTRA_PAYLOAD_SIZE 128 // extra bytes for auth
|
||||
#define TSDB_CQ_SQL_SIZE 1024
|
||||
#define TSDB_MIN_VNODES 64
|
||||
#define TSDB_MAX_VNODES 2048
|
||||
#define TSDB_MIN_VNODES 256
|
||||
#define TSDB_INVALID_VNODE_NUM 0
|
||||
|
||||
#define TSDB_DNODE_ROLE_ANY 0
|
||||
#define TSDB_DNODE_ROLE_MGMT 1
|
||||
|
@ -293,7 +292,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
|
|||
|
||||
#define TSDB_MIN_TOTAL_BLOCKS 2
|
||||
#define TSDB_MAX_TOTAL_BLOCKS 10000
|
||||
#define TSDB_DEFAULT_TOTAL_BLOCKS 4
|
||||
#define TSDB_DEFAULT_TOTAL_BLOCKS 6
|
||||
|
||||
#define TSDB_MIN_TABLES 4
|
||||
#define TSDB_MAX_TABLES 10000000
|
||||
|
|
|
@ -176,7 +176,7 @@ extern char *taosMsg[];
|
|||
typedef struct {
|
||||
char fqdn[TSDB_FQDN_LEN];
|
||||
uint16_t port;
|
||||
} SIpAddr;
|
||||
} SEpAddr;
|
||||
|
||||
typedef struct {
|
||||
int32_t numOfVnodes;
|
||||
|
@ -306,7 +306,7 @@ typedef struct {
|
|||
int8_t reserved1;
|
||||
int8_t reserved2;
|
||||
int32_t connId;
|
||||
SRpcIpSet ipList;
|
||||
SRpcEpSet epSet;
|
||||
} SCMConnectRsp;
|
||||
|
||||
typedef struct {
|
||||
|
@ -581,12 +581,12 @@ typedef struct {
|
|||
char dnodeEp[TSDB_EP_LEN];
|
||||
uint32_t moduleStatus;
|
||||
uint32_t lastReboot; // time stamp for last reboot
|
||||
uint16_t numOfTotalVnodes; // from config file
|
||||
uint16_t reserve1; // from config file
|
||||
uint16_t openVnodes;
|
||||
uint16_t numOfCores;
|
||||
float diskAvailable; // GB
|
||||
uint8_t alternativeRole;
|
||||
uint8_t reserve[15];
|
||||
uint8_t reserve2[15];
|
||||
SClusterCfg clusterCfg;
|
||||
SVnodeLoad load[];
|
||||
} SDMStatusMsg;
|
||||
|
@ -648,8 +648,8 @@ typedef struct SCMSTableVgroupMsg {
|
|||
|
||||
typedef struct {
|
||||
int32_t vgId;
|
||||
int8_t numOfIps;
|
||||
SIpAddr ipAddr[TSDB_MAX_REPLICA];
|
||||
int8_t numOfEps;
|
||||
SEpAddr epAddr[TSDB_MAX_REPLICA];
|
||||
} SCMVgroupInfo;
|
||||
|
||||
typedef struct {
|
||||
|
@ -753,7 +753,7 @@ typedef struct {
|
|||
uint32_t onlineDnodes;
|
||||
uint32_t connId;
|
||||
int8_t killConnection;
|
||||
SRpcIpSet ipList;
|
||||
SRpcEpSet epSet;
|
||||
} SCMHeartBeatRsp;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -28,12 +28,12 @@ extern "C" {
|
|||
|
||||
extern int tsRpcHeadSize;
|
||||
|
||||
typedef struct SRpcIpSet {
|
||||
typedef struct SRpcEpSet {
|
||||
int8_t inUse;
|
||||
int8_t numOfIps;
|
||||
int8_t numOfEps;
|
||||
uint16_t port[TSDB_MAX_REPLICA];
|
||||
char fqdn[TSDB_MAX_REPLICA][TSDB_FQDN_LEN];
|
||||
} SRpcIpSet;
|
||||
} SRpcEpSet;
|
||||
|
||||
typedef struct SRpcCorIpSet {
|
||||
int32_t version;
|
||||
|
@ -72,7 +72,7 @@ typedef struct SRpcInit {
|
|||
char *ckey; // ciphering key
|
||||
|
||||
// call back to process incoming msg, code shall be ignored by server app
|
||||
void (*cfp)(SRpcMsg *, SRpcIpSet *);
|
||||
void (*cfp)(SRpcMsg *, SRpcEpSet *);
|
||||
|
||||
// call back to retrieve the client auth info, for server app only
|
||||
int (*afp)(char *tableId, char *spi, char *encrypt, char *secret, char *ckey);
|
||||
|
@ -83,11 +83,11 @@ void rpcClose(void *);
|
|||
void *rpcMallocCont(int contLen);
|
||||
void rpcFreeCont(void *pCont);
|
||||
void *rpcReallocCont(void *ptr, int contLen);
|
||||
void rpcSendRequest(void *thandle, const SRpcIpSet *pIpSet, SRpcMsg *pMsg);
|
||||
void rpcSendRequest(void *thandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg);
|
||||
void rpcSendResponse(const SRpcMsg *pMsg);
|
||||
void rpcSendRedirectRsp(void *pConn, const SRpcIpSet *pIpSet);
|
||||
void rpcSendRedirectRsp(void *pConn, const SRpcEpSet *pEpSet);
|
||||
int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo);
|
||||
void rpcSendRecv(void *shandle, SRpcIpSet *pIpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
|
||||
void rpcSendRecv(void *shandle, SRpcEpSet *pEpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
|
||||
int rpcReportProgress(void *pConn, char *pCont, int contLen);
|
||||
void rpcCancelRequest(void *pContext);
|
||||
|
||||
|
|
|
@ -46,7 +46,6 @@ typedef struct {
|
|||
int (*eventCallBack)(void *);
|
||||
void *(*cqCreateFunc)(void *handle, uint64_t uid, int sid, char *sqlStr, STSchema *pSchema);
|
||||
void (*cqDropFunc)(void *handle);
|
||||
void *(*configFunc)(int32_t vgId, int32_t sid);
|
||||
} STsdbAppH;
|
||||
|
||||
// --------- TSDB REPOSITORY CONFIGURATION DEFINITION
|
||||
|
@ -175,11 +174,6 @@ typedef struct {
|
|||
SHashObj *map; // speedup acquire the tableQueryInfo from STableId
|
||||
} STableGroupInfo;
|
||||
|
||||
typedef struct SQueryRowCond {
|
||||
int32_t rel;
|
||||
TSKEY ts;
|
||||
} SQueryRowCond;
|
||||
|
||||
/**
|
||||
* Get the data block iterator, starting from position according to the query condition
|
||||
*
|
||||
|
@ -276,7 +270,7 @@ int32_t tsdbQuerySTableByTagCond(TSDB_REPO_T *tsdb, uint64_t uid, const char *pT
|
|||
* destory the created table group list, which is generated by tag query
|
||||
* @param pGroupList
|
||||
*/
|
||||
void tsdbDestoryTableGroup(STableGroupInfo *pGroupList);
|
||||
void tsdbDestroyTableGroup(STableGroupInfo *pGroupList);
|
||||
|
||||
/**
|
||||
* create the table group result including only one table, used to handle the normal table query
|
||||
|
|
|
@ -210,10 +210,10 @@ int32_t main(int32_t argc, char *argv[]) {
|
|||
(void)snprintf(mnodeWal, TSDB_FILENAME_LEN*2, "%s/mnode/wal/wal0", arguments.dataDir);
|
||||
walModWalFile(mnodeWal);
|
||||
|
||||
// 2. modfiy dnode config: mnodeIpList.json
|
||||
char dnodeIpList[TSDB_FILENAME_LEN*2] = {0};
|
||||
(void)snprintf(dnodeIpList, TSDB_FILENAME_LEN*2, "%s/dnode/mnodeIpList.json", arguments.dataDir);
|
||||
modDnodeIpList(dnodeIpList);
|
||||
// 2. modfiy dnode config: mnodeEpSet.json
|
||||
char dnodeEpSet[TSDB_FILENAME_LEN*2] = {0};
|
||||
(void)snprintf(dnodeEpSet, TSDB_FILENAME_LEN*2, "%s/dnode/mnodeEpSet.json", arguments.dataDir);
|
||||
modDnodeEpSet(dnodeEpSet);
|
||||
|
||||
// 3. modify vnode config: config.json
|
||||
char vnodeDir[TSDB_FILENAME_LEN*2] = {0};
|
||||
|
|
|
@ -71,7 +71,7 @@ int tSystemShell(const char * cmd);
|
|||
void taosMvFile(char* destFile, char *srcFile) ;
|
||||
void walModWalFile(char* walfile);
|
||||
SdnodeIfo* getDnodeInfo(int32_t dnodeId);
|
||||
void modDnodeIpList(char* dnodeIpList);
|
||||
void modDnodeEpSet(char* dnodeEpSet);
|
||||
void modAllVnode(char *vnodeDir);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -23,10 +23,10 @@
|
|||
|
||||
static SDMMnodeInfos tsDnodeIpInfos = {0};
|
||||
|
||||
static bool dnodeReadMnodeInfos(char* dnodeIpList) {
|
||||
FILE *fp = fopen(dnodeIpList, "r");
|
||||
static bool dnodeReadMnodeInfos(char* dnodeEpSet) {
|
||||
FILE *fp = fopen(dnodeEpSet, "r");
|
||||
if (!fp) {
|
||||
printf("failed to read mnodeIpList.json, file not exist\n");
|
||||
printf("failed to read mnodeEpSet.json, file not exist\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -37,40 +37,40 @@ static bool dnodeReadMnodeInfos(char* dnodeIpList) {
|
|||
if (len <= 0) {
|
||||
free(content);
|
||||
fclose(fp);
|
||||
printf("failed to read mnodeIpList.json, content is null\n");
|
||||
printf("failed to read mnodeEpSet.json, content is null\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
content[len] = 0;
|
||||
cJSON* root = cJSON_Parse(content);
|
||||
if (root == NULL) {
|
||||
printf("failed to read mnodeIpList.json, invalid json format\n");
|
||||
printf("failed to read mnodeEpSet.json, invalid json format\n");
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
cJSON* inUse = cJSON_GetObjectItem(root, "inUse");
|
||||
if (!inUse || inUse->type != cJSON_Number) {
|
||||
printf("failed to read mnodeIpList.json, inUse not found\n");
|
||||
printf("failed to read mnodeEpSet.json, inUse not found\n");
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
tsDnodeIpInfos.inUse = inUse->valueint;
|
||||
|
||||
cJSON* nodeNum = cJSON_GetObjectItem(root, "nodeNum");
|
||||
if (!nodeNum || nodeNum->type != cJSON_Number) {
|
||||
printf("failed to read mnodeIpList.json, nodeNum not found\n");
|
||||
printf("failed to read mnodeEpSet.json, nodeNum not found\n");
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
tsDnodeIpInfos.nodeNum = nodeNum->valueint;
|
||||
|
||||
cJSON* nodeInfos = cJSON_GetObjectItem(root, "nodeInfos");
|
||||
if (!nodeInfos || nodeInfos->type != cJSON_Array) {
|
||||
printf("failed to read mnodeIpList.json, nodeInfos not found\n");
|
||||
printf("failed to read mnodeEpSet.json, nodeInfos not found\n");
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
int size = cJSON_GetArraySize(nodeInfos);
|
||||
if (size != tsDnodeIpInfos.nodeNum) {
|
||||
printf("failed to read mnodeIpList.json, nodeInfos size not matched\n");
|
||||
printf("failed to read mnodeEpSet.json, nodeInfos size not matched\n");
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
|
||||
|
@ -80,14 +80,14 @@ static bool dnodeReadMnodeInfos(char* dnodeIpList) {
|
|||
|
||||
cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId");
|
||||
if (!nodeId || nodeId->type != cJSON_Number) {
|
||||
printf("failed to read mnodeIpList.json, nodeId not found\n");
|
||||
printf("failed to read mnodeEpSet.json, nodeId not found\n");
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
tsDnodeIpInfos.nodeInfos[i].nodeId = nodeId->valueint;
|
||||
|
||||
cJSON *nodeEp = cJSON_GetObjectItem(nodeInfo, "nodeEp");
|
||||
if (!nodeEp || nodeEp->type != cJSON_String || nodeEp->valuestring == NULL) {
|
||||
printf("failed to read mnodeIpList.json, nodeName not found\n");
|
||||
printf("failed to read mnodeEpSet.json, nodeName not found\n");
|
||||
goto PARSE_OVER;
|
||||
}
|
||||
strncpy(tsDnodeIpInfos.nodeInfos[i].nodeEp, nodeEp->valuestring, TSDB_EP_LEN);
|
||||
|
@ -102,7 +102,7 @@ static bool dnodeReadMnodeInfos(char* dnodeIpList) {
|
|||
|
||||
ret = true;
|
||||
|
||||
//printf("read mnode iplist successed, numOfIps:%d inUse:%d\n", tsDnodeIpInfos.nodeNum, tsDnodeIpInfos.inUse);
|
||||
//printf("read mnode epSet successed, numOfEps:%d inUse:%d\n", tsDnodeIpInfos.nodeNum, tsDnodeIpInfos.inUse);
|
||||
//for (int32_t i = 0; i < tsDnodeIpInfos.nodeNum; i++) {
|
||||
// printf("mnode:%d, %s\n", tsDnodeIpInfos.nodeInfos[i].nodeId, tsDnodeIpInfos.nodeInfos[i].nodeEp);
|
||||
//}
|
||||
|
@ -115,8 +115,8 @@ PARSE_OVER:
|
|||
}
|
||||
|
||||
|
||||
static void dnodeSaveMnodeInfos(char* dnodeIpList) {
|
||||
FILE *fp = fopen(dnodeIpList, "w");
|
||||
static void dnodeSaveMnodeInfos(char* dnodeEpSet) {
|
||||
FILE *fp = fopen(dnodeEpSet, "w");
|
||||
if (!fp) return;
|
||||
|
||||
int32_t len = 0;
|
||||
|
@ -143,13 +143,13 @@ static void dnodeSaveMnodeInfos(char* dnodeIpList) {
|
|||
fclose(fp);
|
||||
free(content);
|
||||
|
||||
printf("mod mnode iplist successed\n");
|
||||
printf("mod mnode epSet successed\n");
|
||||
}
|
||||
|
||||
void modDnodeIpList(char* dnodeIpList)
|
||||
void modDnodeEpSet(char* dnodeEpSet)
|
||||
{
|
||||
(void)dnodeReadMnodeInfos(dnodeIpList);
|
||||
dnodeSaveMnodeInfos(dnodeIpList);
|
||||
(void)dnodeReadMnodeInfos(dnodeEpSet);
|
||||
dnodeSaveMnodeInfos(dnodeEpSet);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ typedef struct SDnodeObj {
|
|||
int32_t dnodeId;
|
||||
int32_t openVnodes;
|
||||
int64_t createdTime;
|
||||
int32_t totalVnodes; // from dnode status msg, config information
|
||||
int32_t resever0; // from dnode status msg, config information
|
||||
int32_t customScore; // config by user
|
||||
uint32_t lastAccess;
|
||||
uint16_t numOfCores; // from dnode status msg
|
||||
|
@ -50,7 +50,7 @@ typedef struct SDnodeObj {
|
|||
int8_t alternativeRole; // from dnode status msg, 0-any, 1-mgmt, 2-dnode
|
||||
int8_t status; // set in balance function
|
||||
int8_t isMgmt;
|
||||
int8_t reserved0[14];
|
||||
int8_t reserve1[14];
|
||||
int8_t updateEnd[1];
|
||||
int32_t refCount;
|
||||
uint32_t moduleStatus;
|
||||
|
@ -61,7 +61,7 @@ typedef struct SDnodeObj {
|
|||
int16_t cpuAvgUsage; // calc from sys.cpu
|
||||
int16_t memoryAvgUsage; // calc from sys.mem
|
||||
int16_t bandwidthUsage; // calc from sys.band
|
||||
int8_t reserved1[2];
|
||||
int8_t reserved2[2];
|
||||
} SDnodeObj;
|
||||
|
||||
typedef struct SMnodeObj {
|
||||
|
|
|
@ -42,12 +42,12 @@ void mnodeIncMnodeRef(struct SMnodeObj *pMnode);
|
|||
void mnodeDecMnodeRef(struct SMnodeObj *pMnode);
|
||||
|
||||
char * mnodeGetMnodeRoleStr();
|
||||
void mnodeGetMnodeIpSetForPeer(SRpcIpSet *ipSet);
|
||||
void mnodeGetMnodeIpSetForShell(SRpcIpSet *ipSet);
|
||||
void mnodeGetMnodeEpSetForPeer(SRpcEpSet *epSet);
|
||||
void mnodeGetMnodeEpSetForShell(SRpcEpSet *epSet);
|
||||
char* mnodeGetMnodeMasterEp();
|
||||
|
||||
void mnodeGetMnodeInfos(void *mnodes);
|
||||
void mnodeUpdateMnodeIpSet();
|
||||
void mnodeUpdateMnodeEpSet();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -44,12 +44,12 @@ int32_t mnodeGetAvailableVgroup(struct SMnodeMsg *pMsg, SVgObj **pVgroup, int32_
|
|||
|
||||
void mnodeAddTableIntoVgroup(SVgObj *pVgroup, SChildTableObj *pTable);
|
||||
void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SChildTableObj *pTable);
|
||||
void mnodeSendDropVnodeMsg(int32_t vgId, SRpcIpSet *ipSet, void *ahandle);
|
||||
void mnodeSendDropVnodeMsg(int32_t vgId, SRpcEpSet *epSet, void *ahandle);
|
||||
void mnodeSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle);
|
||||
void mnodeSendAlterVgroupMsg(SVgObj *pVgroup);
|
||||
|
||||
SRpcIpSet mnodeGetIpSetFromVgroup(SVgObj *pVgroup);
|
||||
SRpcIpSet mnodeGetIpSetFromIp(char *ep);
|
||||
SRpcEpSet mnodeGetEpSetFromVgroup(SVgObj *pVgroup);
|
||||
SRpcEpSet mnodeGetEpSetFromIp(char *ep);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -39,11 +39,11 @@ int32_t balanceAllocVnodes(SVgObj *pVgroup) {
|
|||
pIter = mnodeGetNextDnode(pIter, &pDnode);
|
||||
if (pDnode == NULL) break;
|
||||
|
||||
if (pDnode->totalVnodes > 0 && pDnode->openVnodes < pDnode->totalVnodes) {
|
||||
if (pDnode->numOfCores > 0 && pDnode->openVnodes < TSDB_MAX_VNODES) {
|
||||
float openVnodes = pDnode->openVnodes;
|
||||
if (pDnode->isMgmt) openVnodes += tsMnodeEqualVnodeNum;
|
||||
|
||||
float usage = openVnodes / pDnode->totalVnodes;
|
||||
float usage = openVnodes / pDnode->numOfCores;
|
||||
if (usage <= vnodeUsage) {
|
||||
pSelDnode = pDnode;
|
||||
vnodeUsage = usage;
|
||||
|
|
|
@ -289,14 +289,14 @@ static int32_t mnodeProcessCfgDnodeMsg(SMnodeMsg *pMsg) {
|
|||
}
|
||||
}
|
||||
|
||||
SRpcIpSet ipSet = mnodeGetIpSetFromIp(pCmCfgDnode->ep);
|
||||
SRpcEpSet epSet = mnodeGetEpSetFromIp(pCmCfgDnode->ep);
|
||||
if (dnodeId != 0) {
|
||||
SDnodeObj *pDnode = mnodeGetDnode(dnodeId);
|
||||
if (pDnode == NULL) {
|
||||
mError("failed to cfg dnode, invalid dnodeId:%d", dnodeId);
|
||||
return TSDB_CODE_MND_DNODE_NOT_EXIST;
|
||||
}
|
||||
ipSet = mnodeGetIpSetFromIp(pDnode->dnodeEp);
|
||||
epSet = mnodeGetEpSetFromIp(pDnode->dnodeEp);
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
}
|
||||
|
||||
|
@ -313,7 +313,7 @@ static int32_t mnodeProcessCfgDnodeMsg(SMnodeMsg *pMsg) {
|
|||
};
|
||||
|
||||
mInfo("dnode:%s, is configured by %s", pCmCfgDnode->ep, pMsg->pUser->user);
|
||||
dnodeSendMsgToDnode(&ipSet, &rpcMdCfgDnodeMsg);
|
||||
dnodeSendMsgToDnode(&epSet, &rpcMdCfgDnodeMsg);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -345,8 +345,7 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
|
|||
pStatus->moduleStatus = htonl(pStatus->moduleStatus);
|
||||
pStatus->lastReboot = htonl(pStatus->lastReboot);
|
||||
pStatus->numOfCores = htons(pStatus->numOfCores);
|
||||
pStatus->numOfTotalVnodes = htons(pStatus->numOfTotalVnodes);
|
||||
|
||||
|
||||
uint32_t version = htonl(pStatus->version);
|
||||
if (version != tsVersion) {
|
||||
mError("status msg version:%d not equal with mnode:%d", version, tsVersion);
|
||||
|
@ -372,7 +371,6 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
|
|||
pDnode->numOfCores = pStatus->numOfCores;
|
||||
pDnode->diskAvailable = pStatus->diskAvailable;
|
||||
pDnode->alternativeRole = pStatus->alternativeRole;
|
||||
pDnode->totalVnodes = pStatus->numOfTotalVnodes;
|
||||
pDnode->moduleStatus = pStatus->moduleStatus;
|
||||
|
||||
if (pStatus->dnodeId == 0) {
|
||||
|
@ -401,9 +399,9 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
|
|||
|
||||
SVgObj *pVgroup = mnodeGetVgroup(pVload->vgId);
|
||||
if (pVgroup == NULL) {
|
||||
SRpcIpSet ipSet = mnodeGetIpSetFromIp(pDnode->dnodeEp);
|
||||
SRpcEpSet epSet = mnodeGetEpSetFromIp(pDnode->dnodeEp);
|
||||
mInfo("dnode:%d, vgId:%d not exist in mnode, drop it", pDnode->dnodeId, pVload->vgId);
|
||||
mnodeSendDropVnodeMsg(pVload->vgId, &ipSet, NULL);
|
||||
mnodeSendDropVnodeMsg(pVload->vgId, &epSet, NULL);
|
||||
} else {
|
||||
mnodeUpdateVgroupStatus(pVgroup, pDnode, pVload);
|
||||
pAccess->vgId = htonl(pVload->vgId);
|
||||
|
@ -452,17 +450,29 @@ static int32_t mnodeCreateDnode(char *ep, SMnodeMsg *pMsg) {
|
|||
return grantCode;
|
||||
}
|
||||
|
||||
char dnodeEp[TSDB_EP_LEN] = {0};
|
||||
tstrncpy(dnodeEp, ep, TSDB_EP_LEN);
|
||||
strtrim(dnodeEp);
|
||||
|
||||
char *temp = strchr(dnodeEp, ':');
|
||||
if (!temp) {
|
||||
int len = strlen(dnodeEp);
|
||||
if (dnodeEp[len - 1] == ';') dnodeEp[len - 1] = 0;
|
||||
len = strlen(dnodeEp);
|
||||
snprintf(dnodeEp + len, TSDB_EP_LEN - len, ":%d", tsServerPort);
|
||||
}
|
||||
ep = dnodeEp;
|
||||
|
||||
SDnodeObj *pDnode = mnodeGetDnodeByEp(ep);
|
||||
if (pDnode != NULL) {
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
mError("dnode:%d is alredy exist, %s:%d", pDnode->dnodeId, pDnode->dnodeFqdn, pDnode->dnodePort);
|
||||
mError("dnode:%d is already exist, %s:%d", pDnode->dnodeId, pDnode->dnodeFqdn, pDnode->dnodePort);
|
||||
return TSDB_CODE_MND_DNODE_ALREADY_EXIST;
|
||||
}
|
||||
|
||||
pDnode = (SDnodeObj *) calloc(1, sizeof(SDnodeObj));
|
||||
pDnode->createdTime = taosGetTimestampMs();
|
||||
pDnode->status = TAOS_DN_STATUS_OFFLINE;
|
||||
pDnode->totalVnodes = TSDB_INVALID_VNODE_NUM;
|
||||
tstrncpy(pDnode->dnodeEp, ep, TSDB_EP_LEN);
|
||||
taosGetFqdnPortFromEp(ep, pDnode->dnodeFqdn, &pDnode->dnodePort);
|
||||
|
||||
|
@ -507,8 +517,12 @@ int32_t mnodeDropDnode(SDnodeObj *pDnode, void *pMsg) {
|
|||
static int32_t mnodeDropDnodeByEp(char *ep, SMnodeMsg *pMsg) {
|
||||
SDnodeObj *pDnode = mnodeGetDnodeByEp(ep);
|
||||
if (pDnode == NULL) {
|
||||
mError("dnode:%s, is not exist", ep);
|
||||
return TSDB_CODE_MND_DNODE_NOT_EXIST;
|
||||
int32_t dnodeId = (int32_t)strtol(ep, NULL, 10);
|
||||
pDnode = mnodeGetDnode(dnodeId);
|
||||
if (pDnode == NULL) {
|
||||
mError("dnode:%s, is not exist", ep);
|
||||
return TSDB_CODE_MND_DNODE_NOT_EXIST;
|
||||
}
|
||||
}
|
||||
|
||||
if (strcmp(pDnode->dnodeEp, mnodeGetMnodeMasterEp()) == 0) {
|
||||
|
@ -575,13 +589,13 @@ static int32_t mnodeGetDnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
|
|||
|
||||
pShow->bytes[cols] = 2;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT;
|
||||
strcpy(pSchema[cols].name, "open_vnodes");
|
||||
strcpy(pSchema[cols].name, "vnodes");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = 2;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT;
|
||||
strcpy(pSchema[cols].name, "total_vnodes");
|
||||
strcpy(pSchema[cols].name, "cores");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
|
@ -593,7 +607,7 @@ static int32_t mnodeGetDnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC
|
|||
|
||||
pShow->bytes[cols] = 6 + VARSTR_HEADER_SIZE;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
||||
strcpy(pSchema[cols].name, "alternativeRole");
|
||||
strcpy(pSchema[cols].name, "role");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
|
@ -645,7 +659,7 @@ static int32_t mnodeRetrieveDnodes(SShowObj *pShow, char *data, int32_t rows, vo
|
|||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(int16_t *)pWrite = pDnode->totalVnodes;
|
||||
*(int16_t *)pWrite = pDnode->numOfCores;
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
|
|
|
@ -35,8 +35,8 @@
|
|||
|
||||
static void * tsMnodeSdb = NULL;
|
||||
static int32_t tsMnodeUpdateSize = 0;
|
||||
static SRpcIpSet tsMnodeIpSetForShell;
|
||||
static SRpcIpSet tsMnodeIpSetForPeer;
|
||||
static SRpcEpSet tsMnodeEpSetForShell;
|
||||
static SRpcEpSet tsMnodeEpSetForPeer;
|
||||
static SDMMnodeInfos tsMnodeInfos;
|
||||
static int32_t mnodeGetMnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
|
||||
static int32_t mnodeRetrieveMnodes(SShowObj *pShow, char *data, int32_t rows, void *pConn);
|
||||
|
@ -123,7 +123,7 @@ static int32_t mnodeMnodeActionRestored() {
|
|||
sdbFreeIter(pIter);
|
||||
}
|
||||
|
||||
mnodeUpdateMnodeIpSet();
|
||||
mnodeUpdateMnodeEpSet();
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -204,13 +204,13 @@ char *mnodeGetMnodeRoleStr(int32_t role) {
|
|||
}
|
||||
}
|
||||
|
||||
void mnodeUpdateMnodeIpSet() {
|
||||
mInfo("update mnodes ipset, numOfIps:%d ", mnodeGetMnodesNum());
|
||||
void mnodeUpdateMnodeEpSet() {
|
||||
mInfo("update mnodes epSet, numOfEps:%d ", mnodeGetMnodesNum());
|
||||
|
||||
mnodeMnodeWrLock();
|
||||
|
||||
memset(&tsMnodeIpSetForShell, 0, sizeof(SRpcIpSet));
|
||||
memset(&tsMnodeIpSetForPeer, 0, sizeof(SRpcIpSet));
|
||||
memset(&tsMnodeEpSetForShell, 0, sizeof(SRpcEpSet));
|
||||
memset(&tsMnodeEpSetForPeer, 0, sizeof(SRpcEpSet));
|
||||
memset(&tsMnodeInfos, 0, sizeof(SDMMnodeInfos));
|
||||
|
||||
int32_t index = 0;
|
||||
|
@ -222,20 +222,20 @@ void mnodeUpdateMnodeIpSet() {
|
|||
|
||||
SDnodeObj *pDnode = mnodeGetDnode(pMnode->mnodeId);
|
||||
if (pDnode != NULL) {
|
||||
strcpy(tsMnodeIpSetForShell.fqdn[index], pDnode->dnodeFqdn);
|
||||
tsMnodeIpSetForShell.port[index] = htons(pDnode->dnodePort);
|
||||
mDebug("mnode:%d, for shell fqdn:%s %d", pDnode->dnodeId, tsMnodeIpSetForShell.fqdn[index], htons(tsMnodeIpSetForShell.port[index]));
|
||||
strcpy(tsMnodeEpSetForShell.fqdn[index], pDnode->dnodeFqdn);
|
||||
tsMnodeEpSetForShell.port[index] = htons(pDnode->dnodePort);
|
||||
mDebug("mnode:%d, for shell fqdn:%s %d", pDnode->dnodeId, tsMnodeEpSetForShell.fqdn[index], htons(tsMnodeEpSetForShell.port[index]));
|
||||
|
||||
strcpy(tsMnodeIpSetForPeer.fqdn[index], pDnode->dnodeFqdn);
|
||||
tsMnodeIpSetForPeer.port[index] = htons(pDnode->dnodePort + TSDB_PORT_DNODEDNODE);
|
||||
mDebug("mnode:%d, for peer fqdn:%s %d", pDnode->dnodeId, tsMnodeIpSetForPeer.fqdn[index], htons(tsMnodeIpSetForPeer.port[index]));
|
||||
strcpy(tsMnodeEpSetForPeer.fqdn[index], pDnode->dnodeFqdn);
|
||||
tsMnodeEpSetForPeer.port[index] = htons(pDnode->dnodePort + TSDB_PORT_DNODEDNODE);
|
||||
mDebug("mnode:%d, for peer fqdn:%s %d", pDnode->dnodeId, tsMnodeEpSetForPeer.fqdn[index], htons(tsMnodeEpSetForPeer.port[index]));
|
||||
|
||||
tsMnodeInfos.nodeInfos[index].nodeId = htonl(pMnode->mnodeId);
|
||||
strcpy(tsMnodeInfos.nodeInfos[index].nodeEp, pDnode->dnodeEp);
|
||||
|
||||
if (pMnode->role == TAOS_SYNC_ROLE_MASTER) {
|
||||
tsMnodeIpSetForShell.inUse = index;
|
||||
tsMnodeIpSetForPeer.inUse = index;
|
||||
tsMnodeEpSetForShell.inUse = index;
|
||||
tsMnodeEpSetForPeer.inUse = index;
|
||||
tsMnodeInfos.inUse = index;
|
||||
}
|
||||
|
||||
|
@ -248,23 +248,23 @@ void mnodeUpdateMnodeIpSet() {
|
|||
}
|
||||
|
||||
tsMnodeInfos.nodeNum = index;
|
||||
tsMnodeIpSetForShell.numOfIps = index;
|
||||
tsMnodeIpSetForPeer.numOfIps = index;
|
||||
tsMnodeEpSetForShell.numOfEps = index;
|
||||
tsMnodeEpSetForPeer.numOfEps = index;
|
||||
|
||||
sdbFreeIter(pIter);
|
||||
|
||||
mnodeMnodeUnLock();
|
||||
}
|
||||
|
||||
void mnodeGetMnodeIpSetForPeer(SRpcIpSet *ipSet) {
|
||||
void mnodeGetMnodeEpSetForPeer(SRpcEpSet *epSet) {
|
||||
mnodeMnodeRdLock();
|
||||
*ipSet = tsMnodeIpSetForPeer;
|
||||
*epSet = tsMnodeEpSetForPeer;
|
||||
mnodeMnodeUnLock();
|
||||
}
|
||||
|
||||
void mnodeGetMnodeIpSetForShell(SRpcIpSet *ipSet) {
|
||||
void mnodeGetMnodeEpSetForShell(SRpcEpSet *epSet) {
|
||||
mnodeMnodeRdLock();
|
||||
*ipSet = tsMnodeIpSetForShell;
|
||||
*epSet = tsMnodeEpSetForShell;
|
||||
mnodeMnodeUnLock();
|
||||
}
|
||||
|
||||
|
@ -295,7 +295,7 @@ int32_t mnodeAddMnode(int32_t dnodeId) {
|
|||
code = TSDB_CODE_MND_SDB_ERROR;
|
||||
}
|
||||
|
||||
mnodeUpdateMnodeIpSet();
|
||||
mnodeUpdateMnodeEpSet();
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -308,7 +308,7 @@ void mnodeDropMnodeLocal(int32_t dnodeId) {
|
|||
mnodeDecMnodeRef(pMnode);
|
||||
}
|
||||
|
||||
mnodeUpdateMnodeIpSet();
|
||||
mnodeUpdateMnodeEpSet();
|
||||
}
|
||||
|
||||
int32_t mnodeDropMnode(int32_t dnodeId) {
|
||||
|
@ -330,7 +330,7 @@ int32_t mnodeDropMnode(int32_t dnodeId) {
|
|||
|
||||
sdbDecRef(tsMnodeSdb, pMnode);
|
||||
|
||||
mnodeUpdateMnodeIpSet();
|
||||
mnodeUpdateMnodeEpSet();
|
||||
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -53,14 +53,14 @@ int32_t mnodeProcessPeerReq(SMnodeMsg *pMsg) {
|
|||
|
||||
if (!sdbIsMaster()) {
|
||||
SMnodeRsp *rpcRsp = &pMsg->rpcRsp;
|
||||
SRpcIpSet *ipSet = rpcMallocCont(sizeof(SRpcIpSet));
|
||||
mnodeGetMnodeIpSetForPeer(ipSet);
|
||||
rpcRsp->rsp = ipSet;
|
||||
rpcRsp->len = sizeof(SRpcIpSet);
|
||||
SRpcEpSet *epSet = rpcMallocCont(sizeof(SRpcEpSet));
|
||||
mnodeGetMnodeEpSetForPeer(epSet);
|
||||
rpcRsp->rsp = epSet;
|
||||
rpcRsp->len = sizeof(SRpcEpSet);
|
||||
|
||||
mDebug("%p, msg:%s in mpeer queue, will be redireced inUse:%d", pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType], ipSet->inUse);
|
||||
for (int32_t i = 0; i < ipSet->numOfIps; ++i) {
|
||||
mDebug("mnode index:%d ip:%s:%d", i, ipSet->fqdn[i], htons(ipSet->port[i]));
|
||||
mDebug("%p, msg:%s in mpeer queue, will be redireced inUse:%d", pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType], epSet->inUse);
|
||||
for (int32_t i = 0; i < epSet->numOfEps; ++i) {
|
||||
mDebug("mnode index:%d ep:%s:%d", i, epSet->fqdn[i], htons(epSet->port[i]));
|
||||
}
|
||||
|
||||
return TSDB_CODE_RPC_REDIRECT;
|
||||
|
|
|
@ -49,14 +49,14 @@ int32_t mnodeProcessRead(SMnodeMsg *pMsg) {
|
|||
|
||||
if (!sdbIsMaster()) {
|
||||
SMnodeRsp *rpcRsp = &pMsg->rpcRsp;
|
||||
SRpcIpSet *ipSet = rpcMallocCont(sizeof(SRpcIpSet));
|
||||
mnodeGetMnodeIpSetForShell(ipSet);
|
||||
rpcRsp->rsp = ipSet;
|
||||
rpcRsp->len = sizeof(SRpcIpSet);
|
||||
SRpcEpSet *epSet = rpcMallocCont(sizeof(SRpcEpSet));
|
||||
mnodeGetMnodeEpSetForShell(epSet);
|
||||
rpcRsp->rsp = epSet;
|
||||
rpcRsp->len = sizeof(SRpcEpSet);
|
||||
|
||||
mDebug("%p, msg:%s in mread queue, will be redireced, inUse:%d", pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType], ipSet->inUse);
|
||||
for (int32_t i = 0; i < ipSet->numOfIps; ++i) {
|
||||
mDebug("mnode index:%d ip:%s:%d", i, ipSet->fqdn[i], htons(ipSet->port[i]));
|
||||
mDebug("%p, msg:%s in mread queue, will be redireced, inUse:%d", pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType], epSet->inUse);
|
||||
for (int32_t i = 0; i < epSet->numOfEps; ++i) {
|
||||
mDebug("mnode index:%d ep:%s:%d", i, epSet->fqdn[i], htons(epSet->port[i]));
|
||||
}
|
||||
|
||||
return TSDB_CODE_RPC_REDIRECT;
|
||||
|
|
|
@ -219,7 +219,7 @@ void sdbUpdateMnodeRoles() {
|
|||
}
|
||||
}
|
||||
|
||||
mnodeUpdateMnodeIpSet();
|
||||
mnodeUpdateMnodeEpSet();
|
||||
}
|
||||
|
||||
static uint32_t sdbGetFileInfo(void *ahandle, char *name, uint32_t *index, uint32_t eindex, int32_t *size, uint64_t *fversion) {
|
||||
|
|
|
@ -270,7 +270,7 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) {
|
|||
|
||||
pHBRsp->onlineDnodes = htonl(mnodeGetOnlinDnodesNum());
|
||||
pHBRsp->totalDnodes = htonl(mnodeGetDnodesNum());
|
||||
mnodeGetMnodeIpSetForShell(&pHBRsp->ipList);
|
||||
mnodeGetMnodeEpSetForShell(&pHBRsp->epSet);
|
||||
|
||||
pMsg->rpcRsp.rsp = pHBRsp;
|
||||
pMsg->rpcRsp.len = sizeof(SCMHeartBeatRsp);
|
||||
|
@ -335,7 +335,7 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) {
|
|||
pConnectRsp->writeAuth = pUser->writeAuth;
|
||||
pConnectRsp->superAuth = pUser->superAuth;
|
||||
|
||||
mnodeGetMnodeIpSetForShell(&pConnectRsp->ipList);
|
||||
mnodeGetMnodeEpSetForShell(&pConnectRsp->epSet);
|
||||
|
||||
connect_over:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
|
|
@ -910,9 +910,9 @@ static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) {
|
|||
|
||||
mInfo("app:%p:%p, stable:%s, send drop stable msg to vgId:%d", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId,
|
||||
pVgroup->vgId);
|
||||
SRpcIpSet ipSet = mnodeGetIpSetFromVgroup(pVgroup);
|
||||
SRpcEpSet epSet = mnodeGetEpSetFromVgroup(pVgroup);
|
||||
SRpcMsg rpcMsg = {.pCont = pDrop, .contLen = sizeof(SMDDropSTableMsg), .msgType = TSDB_MSG_TYPE_MD_DROP_STABLE};
|
||||
dnodeSendMsgToDnode(&ipSet, &rpcMsg);
|
||||
dnodeSendMsgToDnode(&epSet, &rpcMsg);
|
||||
mnodeDecVgroupRef(pVgroup);
|
||||
}
|
||||
taosHashDestroyIter(pIter);
|
||||
|
@ -1484,10 +1484,10 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) {
|
|||
SDnodeObj *pDnode = pVgroup->vnodeGid[vn].pDnode;
|
||||
if (pDnode == NULL) break;
|
||||
|
||||
tstrncpy(pVgroupInfo->vgroups[vgSize].ipAddr[vn].fqdn, pDnode->dnodeFqdn, TSDB_FQDN_LEN);
|
||||
pVgroupInfo->vgroups[vgSize].ipAddr[vn].port = htons(pDnode->dnodePort);
|
||||
tstrncpy(pVgroupInfo->vgroups[vgSize].epAddr[vn].fqdn, pDnode->dnodeFqdn, TSDB_FQDN_LEN);
|
||||
pVgroupInfo->vgroups[vgSize].epAddr[vn].port = htons(pDnode->dnodePort);
|
||||
|
||||
pVgroupInfo->vgroups[vgSize].numOfIps++;
|
||||
pVgroupInfo->vgroups[vgSize].numOfEps++;
|
||||
}
|
||||
|
||||
vgSize++;
|
||||
|
@ -1615,7 +1615,7 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) {
|
|||
return terrno;
|
||||
}
|
||||
|
||||
SRpcIpSet ipSet = mnodeGetIpSetFromVgroup(pMsg->pVgroup);
|
||||
SRpcEpSet epSet = mnodeGetEpSetFromVgroup(pMsg->pVgroup);
|
||||
SRpcMsg rpcMsg = {
|
||||
.ahandle = pMsg,
|
||||
.pCont = pMDCreate,
|
||||
|
@ -1624,7 +1624,7 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) {
|
|||
.msgType = TSDB_MSG_TYPE_MD_CREATE_TABLE
|
||||
};
|
||||
|
||||
dnodeSendMsgToDnode(&ipSet, &rpcMsg);
|
||||
dnodeSendMsgToDnode(&epSet, &rpcMsg);
|
||||
return TSDB_CODE_MND_ACTION_IN_PROGRESS;
|
||||
}
|
||||
|
||||
|
@ -1788,7 +1788,7 @@ static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) {
|
|||
pDrop->sid = htonl(pTable->sid);
|
||||
pDrop->uid = htobe64(pTable->uid);
|
||||
|
||||
SRpcIpSet ipSet = mnodeGetIpSetFromVgroup(pMsg->pVgroup);
|
||||
SRpcEpSet epSet = mnodeGetEpSetFromVgroup(pMsg->pVgroup);
|
||||
|
||||
mInfo("app:%p:%p, table:%s, send drop ctable msg, vgId:%d sid:%d uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg,
|
||||
pDrop->tableId, pTable->vgId, pTable->sid, pTable->uid);
|
||||
|
@ -1803,7 +1803,7 @@ static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg, bool needReturn) {
|
|||
|
||||
if (!needReturn) rpcMsg.ahandle = NULL;
|
||||
|
||||
dnodeSendMsgToDnode(&ipSet, &rpcMsg);
|
||||
dnodeSendMsgToDnode(&epSet, &rpcMsg);
|
||||
|
||||
return TSDB_CODE_MND_ACTION_IN_PROGRESS;
|
||||
}
|
||||
|
@ -1842,7 +1842,7 @@ static int32_t mnodeAlterNormalTableColumnCb(SMnodeMsg *pMsg, int32_t code) {
|
|||
}
|
||||
}
|
||||
|
||||
SRpcIpSet ipSet = mnodeGetIpSetFromVgroup(pMsg->pVgroup);
|
||||
SRpcEpSet epSet = mnodeGetEpSetFromVgroup(pMsg->pVgroup);
|
||||
SRpcMsg rpcMsg = {
|
||||
.ahandle = pMsg,
|
||||
.pCont = pMDCreate,
|
||||
|
@ -1854,7 +1854,7 @@ static int32_t mnodeAlterNormalTableColumnCb(SMnodeMsg *pMsg, int32_t code) {
|
|||
mDebug("app:%p:%p, ctable %s, send alter column msg to vgId:%d", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
|
||||
pMsg->pVgroup->vgId);
|
||||
|
||||
dnodeSendMsgToDnode(&ipSet, &rpcMsg);
|
||||
dnodeSendMsgToDnode(&epSet, &rpcMsg);
|
||||
return TSDB_CODE_MND_ACTION_IN_PROGRESS;
|
||||
}
|
||||
|
||||
|
@ -1996,9 +1996,9 @@ static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) {
|
|||
for (int32_t i = 0; i < pMsg->pVgroup->numOfVnodes; ++i) {
|
||||
SDnodeObj *pDnode = mnodeGetDnode(pMsg->pVgroup->vnodeGid[i].dnodeId);
|
||||
if (pDnode == NULL) break;
|
||||
strcpy(pMeta->vgroup.ipAddr[i].fqdn, pDnode->dnodeFqdn);
|
||||
pMeta->vgroup.ipAddr[i].port = htons(pDnode->dnodePort + TSDB_PORT_DNODESHELL);
|
||||
pMeta->vgroup.numOfIps++;
|
||||
strcpy(pMeta->vgroup.epAddr[i].fqdn, pDnode->dnodeFqdn);
|
||||
pMeta->vgroup.epAddr[i].port = htons(pDnode->dnodePort + TSDB_PORT_DNODESHELL);
|
||||
pMeta->vgroup.numOfEps++;
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
}
|
||||
pMeta->vgroup.vgId = htonl(pMsg->pVgroup->vgId);
|
||||
|
|
|
@ -317,9 +317,9 @@ void mnodeUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVl
|
|||
}
|
||||
|
||||
if (!dnodeExist) {
|
||||
SRpcIpSet ipSet = mnodeGetIpSetFromIp(pDnode->dnodeEp);
|
||||
SRpcEpSet epSet = mnodeGetEpSetFromIp(pDnode->dnodeEp);
|
||||
mError("vgId:%d, dnode:%d not exist in mnode, drop it", pVload->vgId, pDnode->dnodeId);
|
||||
mnodeSendDropVnodeMsg(pVload->vgId, &ipSet, NULL);
|
||||
mnodeSendDropVnodeMsg(pVload->vgId, &epSet, NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -585,9 +585,9 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
|
|||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
pShow->bytes[cols] = 4;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_INT;
|
||||
strcpy(pSchema[cols].name, "poolSize");
|
||||
pShow->bytes[cols] = 12 + VARSTR_HEADER_SIZE;
|
||||
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
|
||||
strcpy(pSchema[cols].name, "status");
|
||||
pSchema[cols].bytes = htons(pShow->bytes[cols]);
|
||||
cols++;
|
||||
|
||||
|
@ -688,8 +688,9 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v
|
|||
*(int32_t *) pWrite = pVgroup->numOfTables;
|
||||
cols++;
|
||||
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
*(int32_t *)pWrite = taosIdPoolMaxSize(pVgroup->idPool);
|
||||
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
|
||||
char* status = vgroupStatus[pVgroup->status];
|
||||
STR_TO_VARSTR(pWrite, status);
|
||||
cols++;
|
||||
|
||||
int32_t onlineVnodes = 0;
|
||||
|
@ -808,29 +809,29 @@ static SMDCreateVnodeMsg *mnodeBuildVnodeMsg(SVgObj *pVgroup) {
|
|||
return pVnode;
|
||||
}
|
||||
|
||||
SRpcIpSet mnodeGetIpSetFromVgroup(SVgObj *pVgroup) {
|
||||
SRpcIpSet ipSet = {
|
||||
.numOfIps = pVgroup->numOfVnodes,
|
||||
SRpcEpSet mnodeGetEpSetFromVgroup(SVgObj *pVgroup) {
|
||||
SRpcEpSet epSet = {
|
||||
.numOfEps = pVgroup->numOfVnodes,
|
||||
.inUse = 0,
|
||||
};
|
||||
for (int i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
strcpy(ipSet.fqdn[i], pVgroup->vnodeGid[i].pDnode->dnodeFqdn);
|
||||
ipSet.port[i] = pVgroup->vnodeGid[i].pDnode->dnodePort + TSDB_PORT_DNODEDNODE;
|
||||
strcpy(epSet.fqdn[i], pVgroup->vnodeGid[i].pDnode->dnodeFqdn);
|
||||
epSet.port[i] = pVgroup->vnodeGid[i].pDnode->dnodePort + TSDB_PORT_DNODEDNODE;
|
||||
}
|
||||
return ipSet;
|
||||
return epSet;
|
||||
}
|
||||
|
||||
SRpcIpSet mnodeGetIpSetFromIp(char *ep) {
|
||||
SRpcIpSet ipSet;
|
||||
SRpcEpSet mnodeGetEpSetFromIp(char *ep) {
|
||||
SRpcEpSet epSet;
|
||||
|
||||
ipSet.numOfIps = 1;
|
||||
ipSet.inUse = 0;
|
||||
taosGetFqdnPortFromEp(ep, ipSet.fqdn[0], &ipSet.port[0]);
|
||||
ipSet.port[0] += TSDB_PORT_DNODEDNODE;
|
||||
return ipSet;
|
||||
epSet.numOfEps = 1;
|
||||
epSet.inUse = 0;
|
||||
taosGetFqdnPortFromEp(ep, epSet.fqdn[0], &epSet.port[0]);
|
||||
epSet.port[0] += TSDB_PORT_DNODEDNODE;
|
||||
return epSet;
|
||||
}
|
||||
|
||||
static void mnodeSendAlterVnodeMsg(SVgObj *pVgroup, SRpcIpSet *ipSet) {
|
||||
static void mnodeSendAlterVnodeMsg(SVgObj *pVgroup, SRpcEpSet *epSet) {
|
||||
SMDAlterVnodeMsg *pAlter = mnodeBuildVnodeMsg(pVgroup);
|
||||
SRpcMsg rpcMsg = {
|
||||
.ahandle = NULL,
|
||||
|
@ -839,21 +840,21 @@ static void mnodeSendAlterVnodeMsg(SVgObj *pVgroup, SRpcIpSet *ipSet) {
|
|||
.code = 0,
|
||||
.msgType = TSDB_MSG_TYPE_MD_ALTER_VNODE
|
||||
};
|
||||
dnodeSendMsgToDnode(ipSet, &rpcMsg);
|
||||
dnodeSendMsgToDnode(epSet, &rpcMsg);
|
||||
}
|
||||
|
||||
void mnodeSendAlterVgroupMsg(SVgObj *pVgroup) {
|
||||
mDebug("vgId:%d, send alter all vnodes msg, numOfVnodes:%d db:%s", pVgroup->vgId, pVgroup->numOfVnodes,
|
||||
pVgroup->dbName);
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SRpcIpSet ipSet = mnodeGetIpSetFromIp(pVgroup->vnodeGid[i].pDnode->dnodeEp);
|
||||
SRpcEpSet epSet = mnodeGetEpSetFromIp(pVgroup->vnodeGid[i].pDnode->dnodeEp);
|
||||
mDebug("vgId:%d, index:%d, send alter vnode msg to dnode %s", pVgroup->vgId, i,
|
||||
pVgroup->vnodeGid[i].pDnode->dnodeEp);
|
||||
mnodeSendAlterVnodeMsg(pVgroup, &ipSet);
|
||||
mnodeSendAlterVnodeMsg(pVgroup, &epSet);
|
||||
}
|
||||
}
|
||||
|
||||
static void mnodeSendCreateVnodeMsg(SVgObj *pVgroup, SRpcIpSet *ipSet, void *ahandle) {
|
||||
static void mnodeSendCreateVnodeMsg(SVgObj *pVgroup, SRpcEpSet *epSet, void *ahandle) {
|
||||
SMDCreateVnodeMsg *pCreate = mnodeBuildVnodeMsg(pVgroup);
|
||||
SRpcMsg rpcMsg = {
|
||||
.ahandle = ahandle,
|
||||
|
@ -862,17 +863,17 @@ static void mnodeSendCreateVnodeMsg(SVgObj *pVgroup, SRpcIpSet *ipSet, void *aha
|
|||
.code = 0,
|
||||
.msgType = TSDB_MSG_TYPE_MD_CREATE_VNODE
|
||||
};
|
||||
dnodeSendMsgToDnode(ipSet, &rpcMsg);
|
||||
dnodeSendMsgToDnode(epSet, &rpcMsg);
|
||||
}
|
||||
|
||||
void mnodeSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle) {
|
||||
mDebug("vgId:%d, send create all vnodes msg, numOfVnodes:%d db:%s", pVgroup->vgId, pVgroup->numOfVnodes,
|
||||
pVgroup->dbName);
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SRpcIpSet ipSet = mnodeGetIpSetFromIp(pVgroup->vnodeGid[i].pDnode->dnodeEp);
|
||||
SRpcEpSet epSet = mnodeGetEpSetFromIp(pVgroup->vnodeGid[i].pDnode->dnodeEp);
|
||||
mDebug("vgId:%d, index:%d, send create vnode msg to dnode %s, ahandle:%p", pVgroup->vgId,
|
||||
i, pVgroup->vnodeGid[i].pDnode->dnodeEp, ahandle);
|
||||
mnodeSendCreateVnodeMsg(pVgroup, &ipSet, ahandle);
|
||||
mnodeSendCreateVnodeMsg(pVgroup, &epSet, ahandle);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -925,7 +926,7 @@ static SMDDropVnodeMsg *mnodeBuildDropVnodeMsg(int32_t vgId) {
|
|||
return pDrop;
|
||||
}
|
||||
|
||||
void mnodeSendDropVnodeMsg(int32_t vgId, SRpcIpSet *ipSet, void *ahandle) {
|
||||
void mnodeSendDropVnodeMsg(int32_t vgId, SRpcEpSet *epSet, void *ahandle) {
|
||||
SMDDropVnodeMsg *pDrop = mnodeBuildDropVnodeMsg(vgId);
|
||||
SRpcMsg rpcMsg = {
|
||||
.ahandle = ahandle,
|
||||
|
@ -934,16 +935,16 @@ void mnodeSendDropVnodeMsg(int32_t vgId, SRpcIpSet *ipSet, void *ahandle) {
|
|||
.code = 0,
|
||||
.msgType = TSDB_MSG_TYPE_MD_DROP_VNODE
|
||||
};
|
||||
dnodeSendMsgToDnode(ipSet, &rpcMsg);
|
||||
dnodeSendMsgToDnode(epSet, &rpcMsg);
|
||||
}
|
||||
|
||||
static void mnodeSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle) {
|
||||
pVgroup->status = TAOS_VG_STATUS_DROPPING; // deleting
|
||||
mDebug("vgId:%d, send drop all vnodes msg, ahandle:%p", pVgroup->vgId, ahandle);
|
||||
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
|
||||
SRpcIpSet ipSet = mnodeGetIpSetFromIp(pVgroup->vnodeGid[i].pDnode->dnodeEp);
|
||||
SRpcEpSet epSet = mnodeGetEpSetFromIp(pVgroup->vnodeGid[i].pDnode->dnodeEp);
|
||||
mDebug("vgId:%d, send drop vnode msg to dnode:%d, ahandle:%p", pVgroup->vgId, pVgroup->vnodeGid[i].dnodeId, ahandle);
|
||||
mnodeSendDropVnodeMsg(pVgroup->vgId, &ipSet, ahandle);
|
||||
mnodeSendDropVnodeMsg(pVgroup->vgId, &epSet, ahandle);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -997,8 +998,8 @@ static int32_t mnodeProcessVnodeCfgMsg(SMnodeMsg *pMsg) {
|
|||
}
|
||||
|
||||
mDebug("vgId:%d, send create vnode msg to dnode %s for vnode cfg msg", pVgroup->vgId, pDnode->dnodeEp);
|
||||
SRpcIpSet ipSet = mnodeGetIpSetFromIp(pDnode->dnodeEp);
|
||||
mnodeSendCreateVnodeMsg(pVgroup, &ipSet, NULL);
|
||||
SRpcEpSet epSet = mnodeGetEpSetFromIp(pDnode->dnodeEp);
|
||||
mnodeSendCreateVnodeMsg(pVgroup, &epSet, NULL);
|
||||
|
||||
mnodeDecDnodeRef(pDnode);
|
||||
mnodeDecVgroupRef(pVgroup);
|
||||
|
|
|
@ -49,16 +49,16 @@ int32_t mnodeProcessWrite(SMnodeMsg *pMsg) {
|
|||
|
||||
if (!sdbIsMaster()) {
|
||||
SMnodeRsp *rpcRsp = &pMsg->rpcRsp;
|
||||
SRpcIpSet *ipSet = rpcMallocCont(sizeof(SRpcIpSet));
|
||||
mnodeGetMnodeIpSetForShell(ipSet);
|
||||
rpcRsp->rsp = ipSet;
|
||||
rpcRsp->len = sizeof(SRpcIpSet);
|
||||
SRpcEpSet *epSet = rpcMallocCont(sizeof(SRpcEpSet));
|
||||
mnodeGetMnodeEpSetForShell(epSet);
|
||||
rpcRsp->rsp = epSet;
|
||||
rpcRsp->len = sizeof(SRpcEpSet);
|
||||
|
||||
mDebug("app:%p:%p, msg:%s will be redireced inUse:%d", pMsg->rpcMsg.ahandle, pMsg, taosMsg[pMsg->rpcMsg.msgType],
|
||||
ipSet->inUse);
|
||||
for (int32_t i = 0; i < ipSet->numOfIps; ++i) {
|
||||
mDebug("app:%p:%p, mnode index:%d ip:%s:%d", pMsg->rpcMsg.ahandle, pMsg, i, ipSet->fqdn[i],
|
||||
htons(ipSet->port[i]));
|
||||
epSet->inUse);
|
||||
for (int32_t i = 0; i < epSet->numOfEps; ++i) {
|
||||
mDebug("app:%p:%p, mnode index:%d ep:%s:%d", pMsg->rpcMsg.ahandle, pMsg, i, epSet->fqdn[i],
|
||||
htons(epSet->port[i]));
|
||||
}
|
||||
|
||||
return TSDB_CODE_RPC_REDIRECT;
|
||||
|
|
|
@ -76,6 +76,8 @@ static void monitorInitDatabase();
|
|||
static void monitorInitDatabaseCb(void *param, TAOS_RES *result, int32_t code);
|
||||
static void monitorStartTimer();
|
||||
static void monitorSaveSystemInfo();
|
||||
extern int32_t (*monitorStartSystemFp)();
|
||||
extern void (*monitorStopSystemFp)();
|
||||
|
||||
static void monitorCheckDiskUsage(void *para, void *unused) {
|
||||
taosGetDisk();
|
||||
|
@ -85,6 +87,8 @@ static void monitorCheckDiskUsage(void *para, void *unused) {
|
|||
int32_t monitorInitSystem() {
|
||||
taos_init();
|
||||
taosTmrReset(monitorCheckDiskUsage, CHECK_INTERVAL, NULL, tscTmr, &tsMonitorConn.diskTimer);
|
||||
monitorStartSystemFp = monitorStartSystem;
|
||||
monitorStopSystemFp = monitorStopSystem;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -121,7 +121,6 @@ typedef struct SQueryCostInfo {
|
|||
uint32_t loadBlockStatis;
|
||||
uint32_t discardBlocks;
|
||||
uint64_t elapsedTime;
|
||||
uint64_t ioTime;
|
||||
uint64_t computTime;
|
||||
} SQueryCostInfo;
|
||||
|
||||
|
@ -201,7 +200,7 @@ typedef struct SQInfo {
|
|||
*/
|
||||
int32_t tableIndex;
|
||||
int32_t numOfGroupResultPages;
|
||||
void* pBuf; // allocated buffer for STableQueryInfo, sizeof(STableQueryInfo)*numOfTables;
|
||||
void* pBuf; // allocated buffer for STableQueryInfo, sizeof(STableQueryInfo)*numOfTables;
|
||||
|
||||
} SQInfo;
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ void copyTimeWindowResBuf(SQueryRuntimeEnv* pRuntimeEnv, SWindowResult* dst, con
|
|||
int32_t initWindowResInfo(SWindowResInfo* pWindowResInfo, SQueryRuntimeEnv* pRuntimeEnv, int32_t size,
|
||||
int32_t threshold, int16_t type);
|
||||
|
||||
void cleanupTimeWindowInfo(SWindowResInfo* pWindowResInfo, int32_t numOfCols);
|
||||
void cleanupTimeWindowInfo(SWindowResInfo* pWindowResInfo);
|
||||
void resetTimeWindowInfo(SQueryRuntimeEnv* pRuntimeEnv, SWindowResInfo* pWindowResInfo);
|
||||
void clearFirstNTimeWindow(SQueryRuntimeEnv *pRuntimeEnv, int32_t num);
|
||||
|
||||
|
@ -32,14 +32,29 @@ int32_t numOfClosedTimeWindow(SWindowResInfo* pWindowResInfo);
|
|||
void closeTimeWindow(SWindowResInfo* pWindowResInfo, int32_t slot);
|
||||
void closeAllTimeWindow(SWindowResInfo* pWindowResInfo);
|
||||
void removeRedundantWindow(SWindowResInfo *pWindowResInfo, TSKEY lastKey, int32_t order);
|
||||
SWindowResult *getWindowResult(SWindowResInfo *pWindowResInfo, int32_t slot);
|
||||
|
||||
static FORCE_INLINE SWindowResult *getWindowResult(SWindowResInfo *pWindowResInfo, int32_t slot) {
|
||||
assert(pWindowResInfo != NULL && slot >= 0 && slot < pWindowResInfo->size);
|
||||
return &pWindowResInfo->pResult[slot];
|
||||
}
|
||||
|
||||
#define curTimeWindow(_winres) ((_winres)->curIndex)
|
||||
#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pSelectExpr[1].base.arg->argValue.i64:1)
|
||||
|
||||
bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot);
|
||||
|
||||
int32_t createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, SPosInfo *posInfo, size_t interBufSize);
|
||||
int32_t createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, size_t interBufSize);
|
||||
|
||||
char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int32_t columnIndex, SWindowResult *pResult);
|
||||
static FORCE_INLINE char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int32_t columnIndex, SWindowResult *pResult) {
|
||||
assert(pResult != NULL && pRuntimeEnv != NULL);
|
||||
|
||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||
tFilePage *page = GET_RES_BUF_PAGE_BY_ID(pRuntimeEnv->pResultBuf, pResult->pos.pageId);
|
||||
int32_t realRowId = pResult->pos.rowId * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery);
|
||||
|
||||
return ((char *)page->data) + pRuntimeEnv->offset[columnIndex] * pRuntimeEnv->numOfRowsPerPage +
|
||||
pQuery->pSelectExpr[columnIndex].bytes * realRowId;
|
||||
}
|
||||
|
||||
__filter_func_t *getRangeFilterFuncArray(int32_t type);
|
||||
__filter_func_t *getValueFilterFuncArray(int32_t type);
|
||||
|
|
|
@ -22,26 +22,22 @@ extern "C" {
|
|||
|
||||
#include "os.h"
|
||||
#include "qextbuffer.h"
|
||||
#include "hash.h"
|
||||
|
||||
typedef struct SIDList {
|
||||
uint32_t alloc;
|
||||
int32_t size;
|
||||
int32_t* pData;
|
||||
} SIDList;
|
||||
typedef struct SArray* SIDList;
|
||||
|
||||
typedef struct SDiskbasedResultBuf {
|
||||
int32_t numOfRowsPerPage;
|
||||
int32_t numOfPages;
|
||||
int64_t totalBufSize;
|
||||
int32_t fd; // data file fd
|
||||
int32_t allocateId; // allocated page id
|
||||
int32_t incStep; // minimum allocated pages
|
||||
char* pBuf; // mmap buffer pointer
|
||||
char* path; // file path
|
||||
int32_t numOfRowsPerPage;
|
||||
int32_t numOfPages;
|
||||
int64_t totalBufSize;
|
||||
int32_t fd; // data file fd
|
||||
int32_t allocateId; // allocated page id
|
||||
int32_t incStep; // minimum allocated pages
|
||||
char* pBuf; // mmap buffer pointer
|
||||
char* path; // file path
|
||||
|
||||
uint32_t numOfAllocGroupIds; // number of allocated id list
|
||||
void* idsTable; // id hash table
|
||||
SIDList* list; // for each id, there is a page id list
|
||||
SHashObj* idsTable; // id hash table
|
||||
SIDList list; // for each id, there is a page id list
|
||||
} SDiskbasedResultBuf;
|
||||
|
||||
#define DEFAULT_INTERN_BUF_PAGE_SIZE (8192L*5)
|
||||
|
@ -112,7 +108,7 @@ void destroyResultBuf(SDiskbasedResultBuf* pResultBuf, void* handle);
|
|||
* @param pList
|
||||
* @return
|
||||
*/
|
||||
int32_t getLastPageId(SIDList *pList);
|
||||
int32_t getLastPageId(SIDList pList);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -132,13 +132,10 @@ typedef struct SQLPreAggVal {
|
|||
|
||||
typedef struct SInterpInfoDetail {
|
||||
TSKEY ts; // interp specified timestamp
|
||||
int8_t hasResult;
|
||||
int8_t type;
|
||||
int8_t primaryCol;
|
||||
} SInterpInfoDetail;
|
||||
|
||||
typedef struct SInterpInfo { SInterpInfoDetail *pInterpDetail; } SInterpInfo;
|
||||
|
||||
typedef struct SResultInfo {
|
||||
int8_t hasResult; // result generated, not NULL value
|
||||
bool initialized; // output buffer has been initialized
|
||||
|
@ -146,7 +143,7 @@ typedef struct SResultInfo {
|
|||
bool superTableQ; // is super table query
|
||||
int32_t numOfRes; // num of output result in current buffer
|
||||
int32_t bufLen; // buffer size
|
||||
void * interResultBuf; // output result buffer
|
||||
void* interResultBuf; // output result buffer
|
||||
} SResultInfo;
|
||||
|
||||
struct SQLFunctionCtx;
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
#include "query.h"
|
||||
#include "queryLog.h"
|
||||
#include "qast.h"
|
||||
#include "tfile.h"
|
||||
#include "tlosertree.h"
|
||||
#include "tscompression.h"
|
||||
#include "ttime.h"
|
||||
|
@ -35,8 +34,8 @@
|
|||
* check if the primary column is load by default, otherwise, the program will
|
||||
* forced to load primary column explicitly.
|
||||
*/
|
||||
#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0)
|
||||
#define TSDB_COL_IS_TAG(f) (((f)&TSDB_COL_TAG) != 0)
|
||||
#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0)
|
||||
#define TSDB_COL_IS_TAG(f) (((f)&TSDB_COL_TAG) != 0)
|
||||
#define QUERY_IS_ASC_QUERY(q) (GET_FORWARD_DIRECTION_FACTOR((q)->order.order) == QUERY_ASC_FORWARD_STEP)
|
||||
|
||||
#define IS_MASTER_SCAN(runtime) ((runtime)->scanFlag == MASTER_SCAN)
|
||||
|
@ -144,7 +143,7 @@ static void setExecParams(SQuery *pQuery, SQLFunctionCtx *pCtx, void* inputData,
|
|||
SDataStatis *pStatis, void *param, int32_t colIndex);
|
||||
|
||||
static void initCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv);
|
||||
static void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo, int32_t numOfCols);
|
||||
static void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo);
|
||||
static void resetCtxOutputBuf(SQueryRuntimeEnv *pRuntimeEnv);
|
||||
static bool hasMainOutput(SQuery *pQuery);
|
||||
static void buildTagQueryResult(SQInfo *pQInfo);
|
||||
|
@ -361,17 +360,17 @@ static bool hasTagValOutput(SQuery* pQuery) {
|
|||
* @return
|
||||
*/
|
||||
static bool hasNullValue(SColIndex* pColIndex, SDataStatis *pStatis, SDataStatis **pColStatis) {
|
||||
if (TSDB_COL_IS_TAG(pColIndex->flag) || pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pStatis != NULL) {
|
||||
if (pStatis != NULL && !TSDB_COL_IS_TAG(pColIndex->flag)) {
|
||||
*pColStatis = &pStatis[pColIndex->colIndex];
|
||||
assert((*pColStatis)->colId == pColIndex->colId);
|
||||
} else {
|
||||
*pColStatis = NULL;
|
||||
}
|
||||
|
||||
if (TSDB_COL_IS_TAG(pColIndex->flag) || pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((*pColStatis) != NULL && (*pColStatis)->numOfNull == 0) {
|
||||
return false;
|
||||
}
|
||||
|
@ -387,31 +386,33 @@ static SWindowResult *doSetTimeWindowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SWin
|
|||
if (p1 != NULL) {
|
||||
pWindowResInfo->curIndex = *p1;
|
||||
} else {
|
||||
if (masterscan) { // more than the capacity, reallocate the resources
|
||||
if (pWindowResInfo->size >= pWindowResInfo->capacity) {
|
||||
int64_t newCap = pWindowResInfo->capacity * 2;
|
||||
|
||||
char *t = realloc(pWindowResInfo->pResult, newCap * sizeof(SWindowResult));
|
||||
if (t != NULL) {
|
||||
pWindowResInfo->pResult = (SWindowResult *)t;
|
||||
memset(&pWindowResInfo->pResult[pWindowResInfo->capacity], 0, sizeof(SWindowResult) * pWindowResInfo->capacity);
|
||||
} else {
|
||||
// todo
|
||||
}
|
||||
|
||||
for (int32_t i = pWindowResInfo->capacity; i < newCap; ++i) {
|
||||
SPosInfo pos = {-1, -1};
|
||||
createQueryResultInfo(pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, &pos, pRuntimeEnv->interBufSize);
|
||||
}
|
||||
pWindowResInfo->capacity = newCap;
|
||||
}
|
||||
|
||||
// add a new result set for a new group
|
||||
pWindowResInfo->curIndex = pWindowResInfo->size++;
|
||||
taosHashPut(pWindowResInfo->hashList, pData, bytes, (char *)&pWindowResInfo->curIndex, sizeof(int32_t));
|
||||
} else {
|
||||
if (!masterscan) { // not master scan, do not add new timewindow
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// more than the capacity, reallocate the resources
|
||||
if (pWindowResInfo->size >= pWindowResInfo->capacity) {
|
||||
int64_t newCap = pWindowResInfo->capacity * 1.5;
|
||||
char *t = realloc(pWindowResInfo->pResult, newCap * sizeof(SWindowResult));
|
||||
if (t != NULL) {
|
||||
pWindowResInfo->pResult = (SWindowResult *)t;
|
||||
|
||||
int32_t inc = newCap - pWindowResInfo->capacity;
|
||||
memset(&pWindowResInfo->pResult[pWindowResInfo->capacity], 0, sizeof(SWindowResult) * inc);
|
||||
} else {
|
||||
// todo
|
||||
}
|
||||
|
||||
for (int32_t i = pWindowResInfo->capacity; i < newCap; ++i) {
|
||||
createQueryResultInfo(pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, pRuntimeEnv->interBufSize);
|
||||
}
|
||||
|
||||
pWindowResInfo->capacity = newCap;
|
||||
}
|
||||
|
||||
// add a new result set for a new group
|
||||
pWindowResInfo->curIndex = pWindowResInfo->size++;
|
||||
taosHashPut(pWindowResInfo->hashList, pData, bytes, (char *)&pWindowResInfo->curIndex, sizeof(int32_t));
|
||||
}
|
||||
|
||||
return getWindowResult(pWindowResInfo, pWindowResInfo->curIndex);
|
||||
|
@ -470,10 +471,10 @@ static int32_t addNewWindowResultBuf(SWindowResult *pWindowRes, SDiskbasedResult
|
|||
int32_t pageId = -1;
|
||||
SIDList list = getDataBufPagesIdList(pResultBuf, sid);
|
||||
|
||||
if (list.size == 0) {
|
||||
if (taosArrayGetSize(list) == 0) {
|
||||
pData = getNewDataBuf(pResultBuf, sid, &pageId);
|
||||
} else {
|
||||
pageId = getLastPageId(&list);
|
||||
pageId = getLastPageId(list);
|
||||
pData = GET_RES_BUF_PAGE_BY_ID(pResultBuf, pageId);
|
||||
|
||||
if (pData->num >= numOfRowsPerPage) {
|
||||
|
@ -511,10 +512,11 @@ static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SWindowRes
|
|||
}
|
||||
|
||||
*newWind = true;
|
||||
|
||||
// not assign result buffer yet, add new result buffer
|
||||
if (pWindowRes->pos.pageId == -1) {
|
||||
int32_t ret = addNewWindowResultBuf(pWindowRes, pResultBuf, sid, pRuntimeEnv->numOfRowsPerPage);
|
||||
if (ret != 0) {
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
@ -531,7 +533,7 @@ static SWindowStatus *getTimeWindowResStatus(SWindowResInfo *pWindowResInfo, int
|
|||
return &pWindowResInfo->pResult[slot].status;
|
||||
}
|
||||
|
||||
static int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey, int16_t pos,
|
||||
static FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey, int16_t pos,
|
||||
int16_t order, int64_t *pData) {
|
||||
int32_t forwardStep = 0;
|
||||
|
||||
|
@ -647,12 +649,8 @@ static int32_t getNumOfRowsInTimeWindow(SQuery *pQuery, SDataBlockInfo *pDataBlo
|
|||
if (QUERY_IS_ASC_QUERY(pQuery)) {
|
||||
if (ekey < pDataBlockInfo->window.ekey) {
|
||||
num = getForwardStepsInBlock(pDataBlockInfo->rows, searchFn, ekey, startPos, order, pPrimaryColumn);
|
||||
if (num == 0) { // no qualified data in current block, do not update the lastKey value
|
||||
assert(ekey < pPrimaryColumn[startPos]);
|
||||
} else {
|
||||
if (updateLastKey) {
|
||||
item->lastKey = pPrimaryColumn[startPos + (num - 1)] + step;
|
||||
}
|
||||
if (updateLastKey) { // update the last key
|
||||
item->lastKey = pPrimaryColumn[startPos + (num - 1)] + step;
|
||||
}
|
||||
} else {
|
||||
num = pDataBlockInfo->rows - startPos;
|
||||
|
@ -663,12 +661,8 @@ static int32_t getNumOfRowsInTimeWindow(SQuery *pQuery, SDataBlockInfo *pDataBlo
|
|||
} else { // desc
|
||||
if (ekey > pDataBlockInfo->window.skey) {
|
||||
num = getForwardStepsInBlock(pDataBlockInfo->rows, searchFn, ekey, startPos, order, pPrimaryColumn);
|
||||
if (num == 0) { // no qualified data in current block, do not update the lastKey value
|
||||
assert(ekey > pPrimaryColumn[startPos]);
|
||||
} else {
|
||||
if (updateLastKey) {
|
||||
item->lastKey = pPrimaryColumn[startPos - (num - 1)] + step;
|
||||
}
|
||||
if (updateLastKey) { // update the last key
|
||||
item->lastKey = pPrimaryColumn[startPos - (num - 1)] + step;
|
||||
}
|
||||
} else {
|
||||
num = startPos + 1;
|
||||
|
@ -912,13 +906,20 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
|
|||
}
|
||||
|
||||
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
|
||||
if (QUERY_IS_INTERVAL_QUERY(pQuery) && tsCols != NULL) {
|
||||
int32_t offset = GET_COL_DATA_POS(pQuery, 0, step);
|
||||
TSKEY ts = tsCols[offset];
|
||||
if (QUERY_IS_INTERVAL_QUERY(pQuery)/* && tsCols != NULL*/) {
|
||||
TSKEY ts = TSKEY_INITIAL_VAL;
|
||||
|
||||
bool hasTimeWindow = false;
|
||||
if (tsCols == NULL) {
|
||||
ts = QUERY_IS_ASC_QUERY(pQuery)? pDataBlockInfo->window.skey:pDataBlockInfo->window.ekey;
|
||||
} else {
|
||||
int32_t offset = GET_COL_DATA_POS(pQuery, 0, step);
|
||||
ts = tsCols[offset];
|
||||
}
|
||||
|
||||
bool hasTimeWindow = false;
|
||||
STimeWindow win = getActiveTimeWindow(pWindowResInfo, ts, pQuery);
|
||||
if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &win, masterScan, &hasTimeWindow) != TSDB_CODE_SUCCESS) {
|
||||
if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &win, masterScan, &hasTimeWindow) !=
|
||||
TSDB_CODE_SUCCESS) {
|
||||
tfree(sasArray);
|
||||
return;
|
||||
}
|
||||
|
@ -927,7 +928,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
|
|||
int32_t startPos = pQuery->pos;
|
||||
|
||||
if (hasTimeWindow) {
|
||||
TSKEY ekey = reviseWindowEkey(pQuery, &win);
|
||||
TSKEY ekey = reviseWindowEkey(pQuery, &win);
|
||||
forwardStep = getNumOfRowsInTimeWindow(pQuery, pDataBlockInfo, tsCols, pQuery->pos, ekey, searchFn, true);
|
||||
|
||||
SWindowStatus *pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo));
|
||||
|
@ -946,7 +947,8 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
|
|||
|
||||
// null data, failed to allocate more memory buffer
|
||||
hasTimeWindow = false;
|
||||
if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &nextWin, masterScan, &hasTimeWindow) != TSDB_CODE_SUCCESS) {
|
||||
if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pDataBlockInfo->tid, &nextWin, masterScan,
|
||||
&hasTimeWindow) != TSDB_CODE_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -957,7 +959,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *
|
|||
TSKEY ekey = reviseWindowEkey(pQuery, &nextWin);
|
||||
forwardStep = getNumOfRowsInTimeWindow(pQuery, pDataBlockInfo, tsCols, startPos, ekey, searchFn, true);
|
||||
|
||||
SWindowStatus* pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo));
|
||||
SWindowStatus *pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo));
|
||||
doBlockwiseApplyFunctions(pRuntimeEnv, pStatus, &nextWin, startPos, forwardStep, tsCols, pDataBlockInfo->rows);
|
||||
}
|
||||
|
||||
|
@ -1478,7 +1480,9 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order
|
|||
qDebug("QInfo:%p setup runtime env", GET_QINFO_ADDR(pRuntimeEnv));
|
||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||
|
||||
pRuntimeEnv->resultInfo = calloc(pQuery->numOfOutput, sizeof(SResultInfo));
|
||||
size_t size = pRuntimeEnv->interBufSize + pQuery->numOfOutput * sizeof(SResultInfo);
|
||||
|
||||
pRuntimeEnv->resultInfo = calloc(1, size);
|
||||
pRuntimeEnv->pCtx = (SQLFunctionCtx *)calloc(pQuery->numOfOutput, sizeof(SQLFunctionCtx));
|
||||
|
||||
if (pRuntimeEnv->resultInfo == NULL || pRuntimeEnv->pCtx == NULL) {
|
||||
|
@ -1549,7 +1553,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int16_t order
|
|||
}
|
||||
}
|
||||
|
||||
char* buf = calloc(1, pRuntimeEnv->interBufSize);
|
||||
char* buf = (char*) pRuntimeEnv->resultInfo + sizeof(SResultInfo) * pQuery->numOfOutput;
|
||||
|
||||
// set the intermediate result output buffer
|
||||
setWindowResultInfo(pRuntimeEnv->resultInfo, pQuery, pRuntimeEnv->stableQuery, buf);
|
||||
|
@ -1578,7 +1582,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
|
|||
SQInfo* pQInfo = (SQInfo*) GET_QINFO_ADDR(pRuntimeEnv);
|
||||
|
||||
qDebug("QInfo:%p teardown runtime env", pQInfo);
|
||||
cleanupTimeWindowInfo(&pRuntimeEnv->windowResInfo, pQuery->numOfOutput);
|
||||
cleanupTimeWindowInfo(&pRuntimeEnv->windowResInfo);
|
||||
|
||||
if (pRuntimeEnv->pCtx != NULL) {
|
||||
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
|
||||
|
@ -1592,7 +1596,6 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
|
|||
tfree(pCtx->tagInfo.pTagCtxList);
|
||||
}
|
||||
|
||||
tfree(pRuntimeEnv->resultInfo[0].interResultBuf);
|
||||
tfree(pRuntimeEnv->resultInfo);
|
||||
tfree(pRuntimeEnv->pCtx);
|
||||
}
|
||||
|
@ -1608,7 +1611,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
|
|||
|
||||
#define IS_QUERY_KILLED(_q) ((_q)->code == TSDB_CODE_TSC_QUERY_CANCELLED)
|
||||
|
||||
static void setQueryKilled(SQInfo *pQInfo) { pQInfo->code = TSDB_CODE_TSC_QUERY_CANCELLED; }
|
||||
static void setQueryKilled(SQInfo *pQInfo) { pQInfo->code = TSDB_CODE_TSC_QUERY_CANCELLED;}
|
||||
|
||||
static bool isFixedOutputQuery(SQueryRuntimeEnv* pRuntimeEnv) {
|
||||
SQuery* pQuery = pRuntimeEnv->pQuery;
|
||||
|
@ -1912,24 +1915,11 @@ static int32_t getInitialPageNum(SQInfo *pQInfo) {
|
|||
return num;
|
||||
}
|
||||
|
||||
#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pSelectExpr[1].base.arg->argValue.i64:1)
|
||||
|
||||
static FORCE_INLINE int32_t getNumOfRowsInResultPage(SQuery *pQuery, bool topBotQuery, bool isSTableQuery) {
|
||||
int32_t rowSize = pQuery->rowSize * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, topBotQuery, isSTableQuery);
|
||||
return (DEFAULT_INTERN_BUF_PAGE_SIZE - sizeof(tFilePage)) / rowSize;
|
||||
}
|
||||
|
||||
char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int32_t columnIndex, SWindowResult *pResult) {
|
||||
assert(pResult != NULL && pRuntimeEnv != NULL);
|
||||
|
||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||
tFilePage *page = GET_RES_BUF_PAGE_BY_ID(pRuntimeEnv->pResultBuf, pResult->pos.pageId);
|
||||
int32_t realRowId = pResult->pos.rowId * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery);
|
||||
|
||||
return ((char *)page->data) + pRuntimeEnv->offset[columnIndex] * pRuntimeEnv->numOfRowsPerPage +
|
||||
pQuery->pSelectExpr[columnIndex].bytes * realRowId;
|
||||
}
|
||||
|
||||
#define IS_PREFILTER_TYPE(_t) ((_t) != TSDB_DATA_TYPE_BINARY && (_t) != TSDB_DATA_TYPE_NCHAR)
|
||||
|
||||
static bool needToLoadDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SDataStatis *pDataStatis, SQLFunctionCtx *pCtx,
|
||||
|
@ -1997,23 +1987,80 @@ static bool needToLoadDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SDataStatis *pDat
|
|||
return false;
|
||||
}
|
||||
|
||||
#define PT_IN_WINDOW(_p, _w) ((_p) > (_w).skey && (_p) < (_w).ekey)
|
||||
|
||||
static bool overlapWithTimeWindow(SQuery* pQuery, SDataBlockInfo* pBlockInfo) {
|
||||
STimeWindow w = {0};
|
||||
|
||||
TSKEY sk = MIN(pQuery->window.skey, pQuery->window.ekey);
|
||||
TSKEY ek = MAX(pQuery->window.skey, pQuery->window.ekey);
|
||||
|
||||
|
||||
if (QUERY_IS_ASC_QUERY(pQuery)) {
|
||||
getAlignQueryTimeWindow(pQuery, pBlockInfo->window.skey, sk, ek, &w);
|
||||
|
||||
if (PT_IN_WINDOW(w.ekey, pBlockInfo->window)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
while(1) {
|
||||
GET_NEXT_TIMEWINDOW(pQuery, &w);
|
||||
if (w.skey > pBlockInfo->window.skey) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (PT_IN_WINDOW(w.skey, pBlockInfo->window) || PT_IN_WINDOW(w.ekey, pBlockInfo->window)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
getAlignQueryTimeWindow(pQuery, pBlockInfo->window.ekey, sk, ek, &w);
|
||||
if (PT_IN_WINDOW(w.skey, pBlockInfo->window)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
while(1) {
|
||||
GET_NEXT_TIMEWINDOW(pQuery, &w);
|
||||
if (w.ekey < pBlockInfo->window.skey) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (PT_IN_WINDOW(w.skey, pBlockInfo->window) || PT_IN_WINDOW(w.ekey, pBlockInfo->window)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle, SDataBlockInfo* pBlockInfo, SDataStatis **pStatis, SArray** pDataBlock) {
|
||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||
|
||||
uint32_t status = 0;
|
||||
if (pQuery->numOfFilterCols > 0) {
|
||||
if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf > 0) {
|
||||
status = BLK_DATA_ALL_NEEDED;
|
||||
} else { // check if this data block is required to load
|
||||
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
|
||||
SSqlFuncMsg* pSqlFunc = &pQuery->pSelectExpr[i].base;
|
||||
|
||||
int32_t functionId = pSqlFunc->functionId;
|
||||
int32_t colId = pSqlFunc->colInfo.colId;
|
||||
status |= aAggs[functionId].dataReqFunc(&pRuntimeEnv->pCtx[i], pBlockInfo->window.skey, pBlockInfo->window.ekey, colId);
|
||||
// Calculate all time windows that are overlapping or contain current data block.
|
||||
// If current data block is contained by all possible time window, loading current
|
||||
// data block is not needed.
|
||||
if (QUERY_IS_INTERVAL_QUERY(pQuery) && overlapWithTimeWindow(pQuery, pBlockInfo)) {
|
||||
status = BLK_DATA_ALL_NEEDED;
|
||||
}
|
||||
|
||||
if (pRuntimeEnv->pTSBuf > 0 || QUERY_IS_INTERVAL_QUERY(pQuery)) {
|
||||
status |= BLK_DATA_ALL_NEEDED;
|
||||
if (status != BLK_DATA_ALL_NEEDED) {
|
||||
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
|
||||
SSqlFuncMsg* pSqlFunc = &pQuery->pSelectExpr[i].base;
|
||||
|
||||
int32_t functionId = pSqlFunc->functionId;
|
||||
int32_t colId = pSqlFunc->colInfo.colId;
|
||||
|
||||
status |= aAggs[functionId].dataReqFunc(&pRuntimeEnv->pCtx[i], pBlockInfo->window.skey, pBlockInfo->window.ekey, colId);
|
||||
if ((status & BLK_DATA_ALL_NEEDED) == BLK_DATA_ALL_NEEDED) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2189,6 +2236,27 @@ static void ensureOutputBuffer(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pB
|
|||
}
|
||||
}
|
||||
|
||||
static void doSetInitialTimewindow(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pBlockInfo) {
|
||||
SQuery* pQuery = pRuntimeEnv->pQuery;
|
||||
|
||||
if (QUERY_IS_INTERVAL_QUERY(pQuery) && pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL) {
|
||||
STimeWindow w = TSWINDOW_INITIALIZER;
|
||||
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
|
||||
|
||||
if (QUERY_IS_ASC_QUERY(pQuery)) {
|
||||
getAlignQueryTimeWindow(pQuery, pBlockInfo->window.skey, pBlockInfo->window.skey, pQuery->window.ekey, &w);
|
||||
pWindowResInfo->startTime = w.skey;
|
||||
pWindowResInfo->prevSKey = w.skey;
|
||||
} else {
|
||||
// the start position of the first time window in the endpoint that spreads beyond the queried last timestamp
|
||||
getAlignQueryTimeWindow(pQuery, pBlockInfo->window.ekey, pQuery->window.ekey, pBlockInfo->window.ekey, &w);
|
||||
|
||||
pWindowResInfo->startTime = pQuery->window.skey;
|
||||
pWindowResInfo->prevSKey = w.skey;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
|
||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||
STableQueryInfo* pTableQueryInfo = pQuery->current;
|
||||
|
@ -2216,24 +2284,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
|
|||
}
|
||||
|
||||
tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo);
|
||||
|
||||
// todo extract methods
|
||||
if (QUERY_IS_INTERVAL_QUERY(pQuery) && pRuntimeEnv->windowResInfo.prevSKey == TSKEY_INITIAL_VAL) {
|
||||
STimeWindow w = TSWINDOW_INITIALIZER;
|
||||
SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo;
|
||||
|
||||
if (QUERY_IS_ASC_QUERY(pQuery)) {
|
||||
getAlignQueryTimeWindow(pQuery, blockInfo.window.skey, blockInfo.window.skey, pQuery->window.ekey, &w);
|
||||
pWindowResInfo->startTime = w.skey;
|
||||
pWindowResInfo->prevSKey = w.skey;
|
||||
} else {
|
||||
// the start position of the first time window in the endpoint that spreads beyond the queried last timestamp
|
||||
getAlignQueryTimeWindow(pQuery, blockInfo.window.ekey, pQuery->window.ekey, blockInfo.window.ekey, &w);
|
||||
|
||||
pWindowResInfo->startTime = pQuery->window.skey;
|
||||
pWindowResInfo->prevSKey = w.skey;
|
||||
}
|
||||
}
|
||||
doSetInitialTimewindow(pRuntimeEnv, &blockInfo);
|
||||
|
||||
// in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block
|
||||
ensureOutputBuffer(pRuntimeEnv, &blockInfo);
|
||||
|
@ -2267,7 +2318,6 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
|
|||
if (QUERY_IS_INTERVAL_QUERY(pQuery) && IS_MASTER_SCAN(pRuntimeEnv)) {
|
||||
if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
|
||||
closeAllTimeWindow(&pRuntimeEnv->windowResInfo);
|
||||
// removeRedundantWindow(&pRuntimeEnv->windowResInfo, pTableQueryInfo->lastKey - step, step);
|
||||
pRuntimeEnv->windowResInfo.curIndex = pRuntimeEnv->windowResInfo.size - 1; // point to the last time window
|
||||
} else {
|
||||
assert(Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL));
|
||||
|
@ -2490,11 +2540,7 @@ void UNUSED_FUNC displayInterResult(tFilePage **pdata, SQueryRuntimeEnv* pRuntim
|
|||
|
||||
switch (pQuery->pSelectExpr[i].type) {
|
||||
case TSDB_DATA_TYPE_BINARY: {
|
||||
// int32_t colIndex = pQuery->pSelectExpr[i].base.colInfo.colIndex;
|
||||
int32_t type = pQuery->pSelectExpr[i].type;
|
||||
// } else {
|
||||
// type = pMeterObj->schema[colIndex].type;
|
||||
// }
|
||||
printBinaryData(pQuery->pSelectExpr[i].base.functionId, pdata[i]->data + pQuery->pSelectExpr[i].bytes * j,
|
||||
type);
|
||||
break;
|
||||
|
@ -2617,16 +2663,19 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) {
|
|||
SIDList list = getDataBufPagesIdList(pResultBuf, pQInfo->offset + id);
|
||||
|
||||
int32_t total = 0;
|
||||
for (int32_t i = 0; i < list.size; ++i) {
|
||||
tFilePage *pData = GET_RES_BUF_PAGE_BY_ID(pResultBuf, list.pData[i]);
|
||||
int32_t size = taosArrayGetSize(list);
|
||||
for (int32_t i = 0; i < size; ++i) {
|
||||
int32_t* pgId = taosArrayGet(list, i);
|
||||
tFilePage *pData = GET_RES_BUF_PAGE_BY_ID(pResultBuf, *pgId);
|
||||
total += pData->num;
|
||||
}
|
||||
|
||||
int32_t rows = total;
|
||||
|
||||
int32_t offset = 0;
|
||||
for (int32_t num = 0; num < list.size; ++num) {
|
||||
tFilePage *pData = GET_RES_BUF_PAGE_BY_ID(pResultBuf, list.pData[num]);
|
||||
for (int32_t j = 0; j < size; ++j) {
|
||||
int32_t* pgId = taosArrayGet(list, j);
|
||||
tFilePage *pData = GET_RES_BUF_PAGE_BY_ID(pResultBuf, *pgId);
|
||||
|
||||
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
|
||||
int32_t bytes = pRuntimeEnv->pCtx[i].outputBytes;
|
||||
|
@ -2692,7 +2741,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
|
|||
STableQueryInfo *item = taosArrayGetP(pGroup, i);
|
||||
|
||||
SIDList list = getDataBufPagesIdList(pRuntimeEnv->pResultBuf, TSDB_TABLEID(item->pTable)->tid);
|
||||
if (list.size > 0 && item->windowResInfo.size > 0) {
|
||||
if (taosArrayGetSize(list) > 0 && item->windowResInfo.size > 0) {
|
||||
pTableList[numOfTables] = item;
|
||||
numOfTables += 1;
|
||||
}
|
||||
|
@ -2960,19 +3009,18 @@ void switchCtxOrder(SQueryRuntimeEnv *pRuntimeEnv) {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, SPosInfo *posInfo, size_t interBufSize) {
|
||||
int32_t createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, size_t interBufSize) {
|
||||
int32_t numOfCols = pQuery->numOfOutput;
|
||||
|
||||
pResultRow->resultInfo = calloc((size_t)numOfCols, sizeof(SResultInfo));
|
||||
size_t size = numOfCols * sizeof(SResultInfo) + interBufSize;
|
||||
pResultRow->resultInfo = calloc(1, size);
|
||||
if (pResultRow->resultInfo == NULL) {
|
||||
return TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
}
|
||||
pResultRow->pos = *posInfo;
|
||||
|
||||
char* buf = calloc(1, interBufSize);
|
||||
if (buf == NULL) {
|
||||
return TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
}
|
||||
pResultRow->pos = (SPosInfo) {-1, -1};
|
||||
|
||||
char* buf = (char*) pResultRow->resultInfo + numOfCols * sizeof(SResultInfo);
|
||||
|
||||
// set the intermediate result output buffer
|
||||
setWindowResultInfo(pResultRow->resultInfo, pQuery, isSTableQuery, buf);
|
||||
|
@ -3178,6 +3226,13 @@ static void setEnvBeforeReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatusI
|
|||
SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY);
|
||||
|
||||
SWITCH_ORDER(pQuery->order.order);
|
||||
|
||||
if (QUERY_IS_ASC_QUERY(pQuery)) {
|
||||
assert(pQuery->window.skey <= pQuery->window.ekey);
|
||||
} else {
|
||||
assert(pQuery->window.skey >= pQuery->window.ekey);
|
||||
}
|
||||
|
||||
SET_REVERSE_SCAN_FLAG(pRuntimeEnv);
|
||||
|
||||
STsdbQueryCond cond = {
|
||||
|
@ -3217,8 +3272,7 @@ static void clearEnvAfterReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatus
|
|||
|
||||
SET_MASTER_SCAN_FLAG(pRuntimeEnv);
|
||||
|
||||
// update the pQuery->window.skey and pQuery->window.ekey to limit the scan scope of sliding query
|
||||
// during reverse scan
|
||||
// update the pQuery->window.skey and pQuery->window.ekey to limit the scan scope of sliding query during reverse scan
|
||||
pTableQueryInfo->lastKey = pStatus->lastKey;
|
||||
pQuery->status = pStatus->status;
|
||||
|
||||
|
@ -3244,7 +3298,12 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) {
|
|||
|
||||
if (pRuntimeEnv->scanFlag == MASTER_SCAN) {
|
||||
qstatus.status = pQuery->status;
|
||||
qstatus.curWindow.ekey = pTableQueryInfo->lastKey - step;
|
||||
|
||||
// do nothing if no data blocks are found qualified during scan
|
||||
if (qstatus.lastKey != pTableQueryInfo->lastKey) {
|
||||
qstatus.curWindow.ekey = pTableQueryInfo->lastKey - step;
|
||||
}
|
||||
|
||||
qstatus.lastKey = pTableQueryInfo->lastKey;
|
||||
}
|
||||
|
||||
|
@ -3373,12 +3432,12 @@ static STableQueryInfo *createTableQueryInfo(SQueryRuntimeEnv *pRuntimeEnv, void
|
|||
return pTableQueryInfo;
|
||||
}
|
||||
|
||||
void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo, int32_t numOfCols) {
|
||||
void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo) {
|
||||
if (pTableQueryInfo == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
cleanupTimeWindowInfo(&pTableQueryInfo->windowResInfo, numOfCols);
|
||||
cleanupTimeWindowInfo(&pTableQueryInfo->windowResInfo);
|
||||
}
|
||||
|
||||
#define CHECK_QUERY_TIME_RANGE(_q, _tableInfo) \
|
||||
|
@ -3837,45 +3896,11 @@ int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int
|
|||
static void queryCostStatis(SQInfo *pQInfo) {
|
||||
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
SQueryCostInfo *pSummary = &pRuntimeEnv->summary;
|
||||
// if (pRuntimeEnv->pResultBuf == NULL) {
|
||||
//// pSummary->tmpBufferInDisk = 0;
|
||||
// } else {
|
||||
//// pSummary->tmpBufferInDisk = getResBufSize(pRuntimeEnv->pResultBuf);
|
||||
// }
|
||||
//
|
||||
// qDebug("QInfo:%p cost: comp blocks:%d, size:%d Bytes, elapsed time:%.2f ms", pQInfo, pSummary->readCompInfo,
|
||||
// pSummary->totalCompInfoSize, pSummary->loadCompInfoUs / 1000.0);
|
||||
//
|
||||
// qDebug("QInfo:%p cost: field info: %d, size:%d Bytes, avg size:%.2f Bytes, elapsed time:%.2f ms", pQInfo,
|
||||
// pSummary->readField, pSummary->totalFieldSize, (double)pSummary->totalFieldSize / pSummary->readField,
|
||||
// pSummary->loadFieldUs / 1000.0);
|
||||
//
|
||||
// qDebug(
|
||||
// "QInfo:%p cost: file blocks:%d, size:%d Bytes, elapsed time:%.2f ms, skipped:%d, in-memory gen null:%d Bytes",
|
||||
// pQInfo, pSummary->readDiskBlocks, pSummary->totalBlockSize, pSummary->loadBlocksUs / 1000.0,
|
||||
// pSummary->skippedFileBlocks, pSummary->totalGenData);
|
||||
|
||||
qDebug("QInfo:%p :cost summary: elapsed time:%"PRId64" us, io time:%"PRId64" us, total blocks:%d, load block statis:%d,"
|
||||
" load data block:%d, total rows:%"PRId64 ", check rows:%"PRId64,
|
||||
pQInfo, pSummary->elapsedTime, pSummary->ioTime, pSummary->totalBlocks, pSummary->loadBlockStatis,
|
||||
pSummary->loadBlocks, pSummary->totalRows, pSummary->totalCheckedRows);
|
||||
|
||||
// qDebug("QInfo:%p cost: temp file:%d Bytes", pQInfo, pSummary->tmpBufferInDisk);
|
||||
//
|
||||
// qDebug("QInfo:%p cost: file:%d, table:%d", pQInfo, pSummary->numOfFiles, pSummary->numOfTables);
|
||||
// qDebug("QInfo:%p cost: seek ops:%d", pQInfo, pSummary->numOfSeek);
|
||||
//
|
||||
// double total = pSummary->fileTimeUs + pSummary->cacheTimeUs;
|
||||
// double io = pSummary->loadCompInfoUs + pSummary->loadBlocksUs + pSummary->loadFieldUs;
|
||||
|
||||
// double computing = total - io;
|
||||
//
|
||||
// qDebug(
|
||||
// "QInfo:%p cost: total elapsed time:%.2f ms, file:%.2f ms(%.2f%), cache:%.2f ms(%.2f%). io:%.2f ms(%.2f%),"
|
||||
// "comput:%.2fms(%.2f%)",
|
||||
// pQInfo, total / 1000.0, pSummary->fileTimeUs / 1000.0, pSummary->fileTimeUs * 100 / total,
|
||||
// pSummary->cacheTimeUs / 1000.0, pSummary->cacheTimeUs * 100 / total, io / 1000.0, io * 100 / total,
|
||||
// computing / 1000.0, computing * 100 / total);
|
||||
qDebug("QInfo:%p :cost summary: elapsed time:%"PRId64" us, total blocks:%d, load block statis:%d,"
|
||||
" load data block:%d, total rows:%"PRId64 ", check rows:%"PRId64,
|
||||
pQInfo, pSummary->elapsedTime, pSummary->totalBlocks, pSummary->loadBlockStatis,
|
||||
pSummary->loadBlocks, pSummary->totalRows, pSummary->totalCheckedRows);
|
||||
}
|
||||
|
||||
static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBlockInfo) {
|
||||
|
@ -4189,14 +4214,14 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo
|
|||
|
||||
pRuntimeEnv->numOfRowsPerPage = getNumOfRowsInResultPage(pQuery, pRuntimeEnv->topBotQuery, isSTableQuery);
|
||||
|
||||
if (isSTableQuery) {
|
||||
if (isSTableQuery && !onlyQueryTags(pRuntimeEnv->pQuery)) {
|
||||
int32_t rows = getInitialPageNum(pQInfo);
|
||||
code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, rows, pQuery->rowSize, pQInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
if (pQuery->intervalTime == 0) {
|
||||
if (!QUERY_IS_INTERVAL_QUERY(pQuery)) {
|
||||
int16_t type = TSDB_DATA_TYPE_NULL;
|
||||
|
||||
if (pRuntimeEnv->groupbyNormalCol) { // group by columns not tags;
|
||||
|
@ -4317,7 +4342,6 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) {
|
|||
|
||||
SDataStatis *pStatis = NULL;
|
||||
SArray *pDataBlock = NULL;
|
||||
|
||||
if (loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis, &pDataBlock) == BLK_DATA_DISCARD) {
|
||||
pQuery->current->lastKey = QUERY_IS_ASC_QUERY(pQuery)? blockInfo.window.ekey + step:blockInfo.window.skey + step;
|
||||
continue;
|
||||
|
@ -4452,7 +4476,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
|
|||
if (pRuntimeEnv->pQueryHandle == NULL) {
|
||||
longjmp(pRuntimeEnv->env, terrno);
|
||||
}
|
||||
|
||||
|
||||
initCtxOutputBuf(pRuntimeEnv);
|
||||
|
||||
SArray* s = tsdbGetQueriedTableList(pRuntimeEnv->pQueryHandle);
|
||||
|
@ -4603,7 +4627,6 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
|
|||
|
||||
// TODO handle the limit offset problem
|
||||
if (pQuery->numOfFilterCols == 0 && pQuery->limit.offset > 0) {
|
||||
// skipBlocks(pRuntimeEnv);
|
||||
if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
|
||||
pQInfo->tableIndex++;
|
||||
continue;
|
||||
|
@ -4717,7 +4740,7 @@ static void doSaveContext(SQInfo *pQInfo) {
|
|||
if (pRuntimeEnv->pSecQueryHandle == NULL) {
|
||||
longjmp(pRuntimeEnv->env, terrno);
|
||||
}
|
||||
|
||||
|
||||
setQueryStatus(pQuery, QUERY_NOT_COMPLETED);
|
||||
switchCtxOrder(pRuntimeEnv);
|
||||
disableFuncInReverseScan(pQInfo);
|
||||
|
@ -4740,8 +4763,6 @@ static void doRestoreContext(SQInfo *pQInfo) {
|
|||
static void doCloseAllTimeWindowAfterScan(SQInfo *pQInfo) {
|
||||
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
|
||||
|
||||
// int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
|
||||
|
||||
if (QUERY_IS_INTERVAL_QUERY(pQuery)) {
|
||||
size_t numOfGroup = GET_NUM_OF_TABLEGROUP(pQInfo);
|
||||
for (int32_t i = 0; i < numOfGroup; ++i) {
|
||||
|
@ -4751,7 +4772,6 @@ static void doCloseAllTimeWindowAfterScan(SQInfo *pQInfo) {
|
|||
for (int32_t j = 0; j < num; ++j) {
|
||||
STableQueryInfo* item = taosArrayGetP(group, j);
|
||||
closeAllTimeWindow(&item->windowResInfo);
|
||||
// removeRedundantWindow(&item->windowResInfo, item->lastKey - step, step);
|
||||
}
|
||||
}
|
||||
} else { // close results for group result
|
||||
|
@ -4844,7 +4864,7 @@ static void tableFixedOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo)
|
|||
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
|
||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||
if (!isTopBottomQuery(pQuery) && pQuery->limit.offset > 0) { // no need to execute, since the output will be ignore.
|
||||
if (!pRuntimeEnv->topBotQuery && pQuery->limit.offset > 0) { // no need to execute, since the output will be ignore.
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -5684,6 +5704,20 @@ static int compareTableIdInfo(const void* a, const void* b) {
|
|||
|
||||
static void freeQInfo(SQInfo *pQInfo);
|
||||
|
||||
static void calResultBufSize(SQuery* pQuery) {
|
||||
const int32_t RESULT_MSG_MIN_SIZE = 1024 * (1024 + 512); // bytes
|
||||
const int32_t RESULT_MSG_MIN_ROWS = 8192;
|
||||
const float RESULT_THRESHOLD_RATIO = 0.85;
|
||||
|
||||
int32_t numOfRes = RESULT_MSG_MIN_SIZE / pQuery->rowSize;
|
||||
if (numOfRes < RESULT_MSG_MIN_ROWS) {
|
||||
numOfRes = RESULT_MSG_MIN_ROWS;
|
||||
}
|
||||
|
||||
pQuery->rec.capacity = numOfRes;
|
||||
pQuery->rec.threshold = numOfRes * RESULT_THRESHOLD_RATIO;
|
||||
}
|
||||
|
||||
static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs,
|
||||
STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols) {
|
||||
int16_t numOfCols = pQueryMsg->numOfCols;
|
||||
|
@ -5717,8 +5751,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
|
|||
pQuery->fillType = pQueryMsg->fillType;
|
||||
pQuery->numOfTags = pQueryMsg->numOfTags;
|
||||
pQuery->tagColList = pTagCols;
|
||||
|
||||
// todo do not allocate ??
|
||||
|
||||
pQuery->colList = calloc(numOfCols, sizeof(SSingleColumnFilterInfo));
|
||||
if (pQuery->colList == NULL) {
|
||||
goto _cleanup;
|
||||
|
@ -5748,9 +5781,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
|
|||
goto _cleanup;
|
||||
}
|
||||
|
||||
// set the output buffer capacity
|
||||
pQuery->rec.capacity = 4096;
|
||||
pQuery->rec.threshold = 4000;
|
||||
calResultBufSize(pQuery);
|
||||
|
||||
for (int32_t col = 0; col < pQuery->numOfOutput; ++col) {
|
||||
assert(pExprs[col].interBytes >= pExprs[col].bytes);
|
||||
|
@ -5799,7 +5830,6 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
|
|||
if (p1 == NULL) {
|
||||
goto _cleanup;
|
||||
}
|
||||
taosArrayPush(pQInfo->tableqinfoGroupInfo.pGroupList, &p1);
|
||||
|
||||
for(int32_t j = 0; j < s; ++j) {
|
||||
void* pTable = taosArrayGetP(pa, j);
|
||||
|
@ -5822,6 +5852,8 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
|
|||
taosHashPut(pQInfo->tableqinfoGroupInfo.map, &id->tid, sizeof(id->tid), &item, POINTER_BYTES);
|
||||
index += 1;
|
||||
}
|
||||
|
||||
taosArrayPush(pQInfo->tableqinfoGroupInfo.pGroupList, &p1);
|
||||
}
|
||||
|
||||
pQInfo->arrTableIdInfo = taosArrayInit(tableIndex, sizeof(STableIdInfo));
|
||||
|
@ -5841,7 +5873,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
|
|||
return pQInfo;
|
||||
|
||||
_cleanup_qinfo:
|
||||
tsdbDestoryTableGroup(pTableGroupInfo);
|
||||
tsdbDestroyTableGroup(pTableGroupInfo);
|
||||
|
||||
_cleanup_query:
|
||||
taosArrayDestroy(pGroupbyExpr->columnInfo);
|
||||
|
@ -5981,9 +6013,7 @@ static void freeQInfo(SQInfo *pQInfo) {
|
|||
size_t num = taosArrayGetSize(p);
|
||||
for(int32_t j = 0; j < num; ++j) {
|
||||
STableQueryInfo* item = taosArrayGetP(p, j);
|
||||
if (item != NULL) {
|
||||
destroyTableQueryInfo(item, pQuery->numOfOutput);
|
||||
}
|
||||
destroyTableQueryInfo(item);
|
||||
}
|
||||
|
||||
taosArrayDestroy(p);
|
||||
|
@ -5993,7 +6023,7 @@ static void freeQInfo(SQInfo *pQInfo) {
|
|||
tfree(pQInfo->pBuf);
|
||||
taosArrayDestroy(pQInfo->tableqinfoGroupInfo.pGroupList);
|
||||
taosHashCleanup(pQInfo->tableqinfoGroupInfo.map);
|
||||
tsdbDestoryTableGroup(&pQInfo->tableGroupInfo);
|
||||
tsdbDestroyTableGroup(&pQInfo->tableGroupInfo);
|
||||
taosArrayDestroy(pQInfo->arrTableIdInfo);
|
||||
|
||||
if (pQuery->pGroupbyExpr != NULL) {
|
||||
|
@ -6013,7 +6043,6 @@ static void freeQInfo(SQInfo *pQInfo) {
|
|||
}
|
||||
|
||||
tfree(pQuery->sdata);
|
||||
|
||||
tfree(pQuery);
|
||||
|
||||
qDebug("QInfo:%p QInfo is freed", pQInfo);
|
||||
|
@ -6168,9 +6197,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, vo
|
|||
code = tsdbQuerySTableByTagCond(tsdb, id->uid, tagCond, pQueryMsg->tagCondLen, pQueryMsg->tagNameRelType, tbnameCond, &tableGroupInfo, pGroupColIndex,
|
||||
numOfGroupByCols);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
if (code == TSDB_CODE_QRY_EXCEED_TAGS_LIMIT) {
|
||||
qError("qmsg:%p failed to QueryStable, reason: %s", pQueryMsg, tstrerror(code));
|
||||
}
|
||||
qError("qmsg:%p failed to QueryStable, reason: %s", pQueryMsg, tstrerror(code));
|
||||
goto _over;
|
||||
}
|
||||
} else {
|
||||
|
@ -6269,7 +6296,7 @@ void qTableQuery(qinfo_t qinfo) {
|
|||
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
if (onlyQueryTags(pQInfo->runtimeEnv.pQuery)) {
|
||||
assert(pQInfo->runtimeEnv.pQueryHandle == NULL);
|
||||
buildTagQueryResult(pQInfo); // todo support the limit/offset
|
||||
buildTagQueryResult(pQInfo);
|
||||
} else if (pQInfo->runtimeEnv.stableQuery) {
|
||||
stableQueryImpl(pQInfo);
|
||||
} else {
|
||||
|
@ -6330,7 +6357,6 @@ bool qHasMoreResultsToRetrieve(qinfo_t qinfo) {
|
|||
}
|
||||
|
||||
if (ret) {
|
||||
// T_REF_INC(pQInfo);
|
||||
qDebug("QInfo:%p has more results waits for client retrieve", pQInfo);
|
||||
}
|
||||
|
||||
|
@ -6391,6 +6417,22 @@ int32_t qKillQuery(qinfo_t qinfo) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type, int16_t bytes) {
|
||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||
if (val == NULL) {
|
||||
setVardataNull(output, type);
|
||||
} else {
|
||||
memcpy(output, val, varDataTLen(val));
|
||||
}
|
||||
} else {
|
||||
if (val == NULL) {
|
||||
setNull(output, type, bytes);
|
||||
} else { // todo here stop will cause client crash
|
||||
memcpy(output, val, bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void buildTagQueryResult(SQInfo* pQInfo) {
|
||||
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
SQuery * pQuery = pRuntimeEnv->pQuery;
|
||||
|
@ -6447,25 +6489,11 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
|
|||
output += sizeof(pQInfo->vgId);
|
||||
|
||||
if (pExprInfo->base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) {
|
||||
char *data = tsdbGetTableName(item->pTable);
|
||||
char* data = tsdbGetTableName(item->pTable);
|
||||
memcpy(output, data, varDataTLen(data));
|
||||
} else {
|
||||
char *val = tsdbGetTableTagVal(item->pTable, pExprInfo->base.colInfo.colId, type, bytes);
|
||||
|
||||
// todo refactor
|
||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||
if (val == NULL) {
|
||||
setVardataNull(output, type);
|
||||
} else {
|
||||
memcpy(output, val, varDataTLen(val));
|
||||
}
|
||||
} else {
|
||||
if (val == NULL) {
|
||||
setNull(output, type, bytes);
|
||||
} else { // todo here stop will cause client crash
|
||||
memcpy(output, val, bytes);
|
||||
}
|
||||
}
|
||||
char* data = tsdbGetTableTagVal(item->pTable, pExprInfo->base.colInfo.colId, type, bytes);
|
||||
doSetTagValueToResultBuf(output, data, type, bytes);
|
||||
}
|
||||
|
||||
count += 1;
|
||||
|
@ -6482,38 +6510,44 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
|
|||
} else { // return only the tags|table name etc.
|
||||
count = 0;
|
||||
SSchema tbnameSchema = tGetTableNameColumnSchema();
|
||||
while(pQInfo->tableIndex < num && count < pQuery->rec.capacity) {
|
||||
|
||||
int32_t maxNumOfTables = pQuery->rec.capacity;
|
||||
if (pQuery->limit.limit >= 0 && pQuery->limit.limit < pQuery->rec.capacity) {
|
||||
maxNumOfTables = pQuery->limit.limit;
|
||||
}
|
||||
|
||||
while(pQInfo->tableIndex < num && count < maxNumOfTables) {
|
||||
int32_t i = pQInfo->tableIndex++;
|
||||
|
||||
// discard current result due to offset
|
||||
if (pQuery->limit.offset > 0) {
|
||||
pQuery->limit.offset -= 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
SExprInfo* pExprInfo = pQuery->pSelectExpr;
|
||||
STableQueryInfo* item = taosArrayGetP(pa, i);
|
||||
|
||||
char *data = NULL, *dst = NULL;
|
||||
int16_t type = 0, bytes = 0;
|
||||
for(int32_t j = 0; j < pQuery->numOfOutput; ++j) {
|
||||
if (pExprInfo[j].base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) {
|
||||
char* data = tsdbGetTableName(item->pTable);
|
||||
char* dst = pQuery->sdata[j]->data + count * tbnameSchema.bytes;
|
||||
memcpy(dst, data, varDataTLen(data));
|
||||
} else {// todo refactor
|
||||
int16_t type = pExprInfo[j].type;
|
||||
int16_t bytes = pExprInfo[j].bytes;
|
||||
|
||||
char* data = tsdbGetTableTagVal(item->pTable, pExprInfo[j].base.colInfo.colId, type, bytes);
|
||||
char* dst = pQuery->sdata[j]->data + count * pExprInfo[j].bytes;
|
||||
|
||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||
if (data == NULL) {
|
||||
setVardataNull(dst, type);
|
||||
} else {
|
||||
memcpy(dst, data, varDataTLen(data));
|
||||
}
|
||||
} else {
|
||||
if (data == NULL) {
|
||||
setNull(dst, type, bytes);
|
||||
} else {
|
||||
memcpy(dst, data, pExprInfo[j].bytes);
|
||||
}
|
||||
}
|
||||
if (pExprInfo[j].base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) {
|
||||
bytes = tbnameSchema.bytes;
|
||||
type = tbnameSchema.type;
|
||||
|
||||
data = tsdbGetTableName(item->pTable);
|
||||
dst = pQuery->sdata[j]->data + count * tbnameSchema.bytes;
|
||||
} else {
|
||||
type = pExprInfo[j].type;
|
||||
bytes = pExprInfo[j].bytes;
|
||||
|
||||
data = tsdbGetTableTagVal(item->pTable, pExprInfo[j].base.colInfo.colId, type, bytes);
|
||||
dst = pQuery->sdata[j]->data + count * pExprInfo[j].bytes;
|
||||
|
||||
}
|
||||
|
||||
doSetTagValueToResultBuf(dst, data, type, bytes);
|
||||
}
|
||||
count += 1;
|
||||
}
|
||||
|
|
|
@ -53,9 +53,9 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun
|
|||
if (pWindowResInfo->pResult == NULL) {
|
||||
return TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pWindowResInfo->capacity; ++i) {
|
||||
SPosInfo posInfo = {-1, -1};
|
||||
int32_t code = createQueryResultInfo(pRuntimeEnv->pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, &posInfo, pRuntimeEnv->interBufSize);
|
||||
int32_t code = createQueryResultInfo(pRuntimeEnv->pQuery, &pWindowResInfo->pResult[i], pRuntimeEnv->stableQuery, pRuntimeEnv->interBufSize);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -64,16 +64,15 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void destroyTimeWindowRes(SWindowResult *pWindowRes, int32_t nOutputCols) {
|
||||
void destroyTimeWindowRes(SWindowResult *pWindowRes) {
|
||||
if (pWindowRes == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
free(pWindowRes->resultInfo[0].interResultBuf);
|
||||
free(pWindowRes->resultInfo);
|
||||
}
|
||||
|
||||
void cleanupTimeWindowInfo(SWindowResInfo *pWindowResInfo, int32_t numOfCols) {
|
||||
void cleanupTimeWindowInfo(SWindowResInfo *pWindowResInfo) {
|
||||
if (pWindowResInfo == NULL) {
|
||||
return;
|
||||
}
|
||||
|
@ -84,8 +83,7 @@ void cleanupTimeWindowInfo(SWindowResInfo *pWindowResInfo, int32_t numOfCols) {
|
|||
|
||||
if (pWindowResInfo->pResult != NULL) {
|
||||
for (int32_t i = 0; i < pWindowResInfo->capacity; ++i) {
|
||||
SWindowResult *pResult = &pWindowResInfo->pResult[i];
|
||||
destroyTimeWindowRes(pResult, numOfCols);
|
||||
destroyTimeWindowRes(&pWindowResInfo->pResult[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -225,11 +223,6 @@ void removeRedundantWindow(SWindowResInfo *pWindowResInfo, TSKEY lastKey, int32_
|
|||
}
|
||||
}
|
||||
|
||||
SWindowResult *getWindowResult(SWindowResInfo *pWindowResInfo, int32_t slot) {
|
||||
assert(pWindowResInfo != NULL && slot >= 0 && slot < pWindowResInfo->size);
|
||||
return &pWindowResInfo->pResult[slot];
|
||||
}
|
||||
|
||||
bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot) {
|
||||
return (getWindowResult(pWindowResInfo, slot)->status.closed == true);
|
||||
}
|
||||
|
|
|
@ -1064,10 +1064,9 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
|
|||
if (*e == TS_PATH_DELIMITER[0]) {
|
||||
cond = e + 1;
|
||||
} else if (*e == ',') {
|
||||
size_t len = e - cond + VARSTR_HEADER_SIZE;
|
||||
char* p = exception_malloc(len);
|
||||
varDataSetLen(p, len - VARSTR_HEADER_SIZE);
|
||||
memcpy(varDataVal(p), cond, len);
|
||||
size_t len = e - cond;
|
||||
char* p = exception_malloc(len + VARSTR_HEADER_SIZE);
|
||||
STR_WITH_SIZE_TO_VARSTR(p, cond, len);
|
||||
cond += len;
|
||||
taosArrayPush(pVal->arr, &p);
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
#include "hash.h"
|
||||
#include "qextbuffer.h"
|
||||
#include "taoserror.h"
|
||||
#include "tsqlfunction.h"
|
||||
#include "queryLog.h"
|
||||
|
||||
int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t size, int32_t rowSize, void* handle) {
|
||||
|
@ -20,35 +19,31 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t si
|
|||
|
||||
// init id hash table
|
||||
pResBuf->idsTable = taosHashInit(size, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false);
|
||||
pResBuf->list = calloc(size, sizeof(SIDList));
|
||||
pResBuf->numOfAllocGroupIds = size;
|
||||
pResBuf->list = taosArrayInit(size, POINTER_BYTES);
|
||||
|
||||
char path[4096] = {0};
|
||||
getTmpfilePath("tsdb_q_buf", path);
|
||||
getTmpfilePath("tsdb_qbuf", path);
|
||||
pResBuf->path = strdup(path);
|
||||
|
||||
pResBuf->fd = open(pResBuf->path, O_CREAT | O_RDWR, 0666);
|
||||
|
||||
memset(path, 0, tListLen(path));
|
||||
|
||||
if (!FD_VALID(pResBuf->fd)) {
|
||||
qError("failed to create tmp file: %s on disk. %s", pResBuf->path, strerror(errno));
|
||||
return TSDB_CODE_QRY_NO_DISKSPACE;
|
||||
return TAOS_SYSTEM_ERROR(errno);
|
||||
}
|
||||
|
||||
int32_t ret = ftruncate(pResBuf->fd, pResBuf->numOfPages * DEFAULT_INTERN_BUF_PAGE_SIZE);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
qError("failed to create tmp file: %s on disk. %s", pResBuf->path, strerror(errno));
|
||||
return TSDB_CODE_QRY_NO_DISKSPACE;
|
||||
return TAOS_SYSTEM_ERROR(errno);
|
||||
}
|
||||
|
||||
pResBuf->pBuf = mmap(NULL, pResBuf->totalBufSize, PROT_READ | PROT_WRITE, MAP_SHARED, pResBuf->fd, 0);
|
||||
if (pResBuf->pBuf == MAP_FAILED) {
|
||||
qError("QInfo:%p failed to map temp file: %s. %s", handle, pResBuf->path, strerror(errno));
|
||||
return TSDB_CODE_QRY_OUT_OF_MEMORY; // todo change error code
|
||||
return TAOS_SYSTEM_ERROR(errno);
|
||||
}
|
||||
|
||||
qDebug("QInfo:%p create tmp file for output result, %s, %" PRId64 "bytes", handle, pResBuf->path,
|
||||
qDebug("QInfo:%p create tmp file for output result:%s, %" PRId64 "bytes", handle, pResBuf->path,
|
||||
pResBuf->totalBufSize);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -86,11 +81,11 @@ static int32_t extendDiskFileSize(SDiskbasedResultBuf* pResultBuf, int32_t numOf
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static bool noMoreAvailablePages(SDiskbasedResultBuf* pResultBuf) {
|
||||
static FORCE_INLINE bool noMoreAvailablePages(SDiskbasedResultBuf* pResultBuf) {
|
||||
return (pResultBuf->allocateId == pResultBuf->numOfPages - 1);
|
||||
}
|
||||
|
||||
static int32_t getGroupIndex(SDiskbasedResultBuf* pResultBuf, int32_t groupId) {
|
||||
static FORCE_INLINE int32_t getGroupIndex(SDiskbasedResultBuf* pResultBuf, int32_t groupId) {
|
||||
assert(pResultBuf != NULL);
|
||||
|
||||
char* p = taosHashGet(pResultBuf->idsTable, (const char*)&groupId, sizeof(int32_t));
|
||||
|
@ -99,61 +94,30 @@ static int32_t getGroupIndex(SDiskbasedResultBuf* pResultBuf, int32_t groupId) {
|
|||
}
|
||||
|
||||
int32_t slot = GET_INT32_VAL(p);
|
||||
assert(slot >= 0 && slot < pResultBuf->numOfAllocGroupIds);
|
||||
assert(slot >= 0 && slot < taosHashGetSize(pResultBuf->idsTable));
|
||||
|
||||
return slot;
|
||||
}
|
||||
|
||||
static int32_t addNewGroupId(SDiskbasedResultBuf* pResultBuf, int32_t groupId) {
|
||||
int32_t num = getNumOfResultBufGroupId(pResultBuf); // the num is the newest allocated group id slot
|
||||
|
||||
if (pResultBuf->numOfAllocGroupIds <= num) {
|
||||
size_t n = pResultBuf->numOfAllocGroupIds << 1u;
|
||||
|
||||
SIDList* p = (SIDList*)realloc(pResultBuf->list, sizeof(SIDList) * n);
|
||||
assert(p != NULL);
|
||||
|
||||
memset(&p[pResultBuf->numOfAllocGroupIds], 0, sizeof(SIDList) * pResultBuf->numOfAllocGroupIds);
|
||||
|
||||
pResultBuf->list = p;
|
||||
pResultBuf->numOfAllocGroupIds = n;
|
||||
}
|
||||
|
||||
taosHashPut(pResultBuf->idsTable, (const char*)&groupId, sizeof(int32_t), &num, sizeof(int32_t));
|
||||
|
||||
SArray* pa = taosArrayInit(1, sizeof(int32_t));
|
||||
taosArrayPush(pResultBuf->list, &pa);
|
||||
|
||||
assert(taosArrayGetSize(pResultBuf->list) == taosHashGetSize(pResultBuf->idsTable));
|
||||
return num;
|
||||
}
|
||||
|
||||
static int32_t doRegisterId(SIDList* pList, int32_t id) {
|
||||
if (pList->size >= pList->alloc) {
|
||||
int32_t s = 0;
|
||||
if (pList->alloc == 0) {
|
||||
s = 4;
|
||||
assert(pList->pData == NULL);
|
||||
} else {
|
||||
s = pList->alloc << 1u;
|
||||
}
|
||||
|
||||
int32_t* c = realloc(pList->pData, s * sizeof(int32_t));
|
||||
assert(c);
|
||||
|
||||
memset(&c[pList->alloc], 0, sizeof(int32_t) * pList->alloc);
|
||||
|
||||
pList->pData = c;
|
||||
pList->alloc = s;
|
||||
}
|
||||
|
||||
pList->pData[pList->size++] = id;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void registerPageId(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32_t pageId) {
|
||||
int32_t slot = getGroupIndex(pResultBuf, groupId);
|
||||
if (slot < 0) {
|
||||
slot = addNewGroupId(pResultBuf, groupId);
|
||||
}
|
||||
|
||||
SIDList* pList = &pResultBuf->list[slot];
|
||||
doRegisterId(pList, pageId);
|
||||
SIDList pList = taosArrayGetP(pResultBuf->list, slot);
|
||||
taosArrayPush(pList, &pageId);
|
||||
}
|
||||
|
||||
tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32_t* pageId) {
|
||||
|
@ -178,12 +142,11 @@ tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32
|
|||
int32_t getNumOfRowsPerPage(SDiskbasedResultBuf* pResultBuf) { return pResultBuf->numOfRowsPerPage; }
|
||||
|
||||
SIDList getDataBufPagesIdList(SDiskbasedResultBuf* pResultBuf, int32_t groupId) {
|
||||
SIDList list = {0};
|
||||
int32_t slot = getGroupIndex(pResultBuf, groupId);
|
||||
if (slot < 0) {
|
||||
return list;
|
||||
return taosArrayInit(1, sizeof(int32_t));
|
||||
} else {
|
||||
return pResultBuf->list[slot];
|
||||
return taosArrayGetP(pResultBuf->list, slot);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -202,22 +165,20 @@ void destroyResultBuf(SDiskbasedResultBuf* pResultBuf, void* handle) {
|
|||
|
||||
tfree(pResultBuf->path);
|
||||
|
||||
for (int32_t i = 0; i < pResultBuf->numOfAllocGroupIds; ++i) {
|
||||
SIDList* pList = &pResultBuf->list[i];
|
||||
tfree(pList->pData);
|
||||
size_t size = taosArrayGetSize(pResultBuf->list);
|
||||
for (int32_t i = 0; i < size; ++i) {
|
||||
SArray* pa = taosArrayGetP(pResultBuf->list, i);
|
||||
taosArrayDestroy(pa);
|
||||
}
|
||||
|
||||
tfree(pResultBuf->list);
|
||||
taosArrayDestroy(pResultBuf->list);
|
||||
taosHashCleanup(pResultBuf->idsTable);
|
||||
|
||||
tfree(pResultBuf);
|
||||
}
|
||||
|
||||
int32_t getLastPageId(SIDList *pList) {
|
||||
if (pList == NULL || pList->size <= 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return pList->pData[pList->size - 1];
|
||||
int32_t getLastPageId(SIDList pList) {
|
||||
size_t size = taosArrayGetSize(pList);
|
||||
return *(int32_t*) taosArrayGet(pList, size - 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ typedef struct {
|
|||
char secret[TSDB_KEY_LEN]; // secret for the link
|
||||
char ckey[TSDB_KEY_LEN]; // ciphering key
|
||||
|
||||
void (*cfp)(SRpcMsg *, SRpcIpSet *);
|
||||
void (*cfp)(SRpcMsg *, SRpcEpSet *);
|
||||
int (*afp)(char *user, char *spi, char *encrypt, char *secret, char *ckey);
|
||||
|
||||
int32_t refCount;
|
||||
|
@ -71,7 +71,7 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
SRpcInfo *pRpc; // associated SRpcInfo
|
||||
SRpcIpSet ipSet; // ip list provided by app
|
||||
SRpcEpSet epSet; // ip list provided by app
|
||||
void *ahandle; // handle provided by app
|
||||
void *signature; // for validation
|
||||
struct SRpcConn *pConn; // pConn allocated
|
||||
|
@ -80,12 +80,12 @@ typedef struct {
|
|||
int32_t contLen; // content length
|
||||
int32_t code; // error code
|
||||
int16_t numOfTry; // number of try for different servers
|
||||
int8_t oldInUse; // server IP inUse passed by app
|
||||
int8_t oldInUse; // server EP inUse passed by app
|
||||
int8_t redirect; // flag to indicate redirect
|
||||
int8_t connType; // connection type
|
||||
SRpcMsg *pRsp; // for synchronous API
|
||||
tsem_t *pSem; // for synchronous API
|
||||
SRpcIpSet *pSet; // for synchronous API
|
||||
SRpcEpSet *pSet; // for synchronous API
|
||||
char msg[0]; // RpcHead starts from here
|
||||
} SRpcReqContext;
|
||||
|
||||
|
@ -355,7 +355,7 @@ void *rpcReallocCont(void *ptr, int contLen) {
|
|||
return start + sizeof(SRpcReqContext) + sizeof(SRpcHead);
|
||||
}
|
||||
|
||||
void rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, SRpcMsg *pMsg) {
|
||||
void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg) {
|
||||
SRpcInfo *pRpc = (SRpcInfo *)shandle;
|
||||
SRpcReqContext *pContext;
|
||||
|
||||
|
@ -364,11 +364,11 @@ void rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, SRpcMsg *pMsg) {
|
|||
pContext->ahandle = pMsg->ahandle;
|
||||
pContext->signature = pContext;
|
||||
pContext->pRpc = (SRpcInfo *)shandle;
|
||||
pContext->ipSet = *pIpSet;
|
||||
pContext->epSet = *pEpSet;
|
||||
pContext->contLen = contLen;
|
||||
pContext->pCont = pMsg->pCont;
|
||||
pContext->msgType = pMsg->msgType;
|
||||
pContext->oldInUse = pIpSet->inUse;
|
||||
pContext->oldInUse = pEpSet->inUse;
|
||||
|
||||
pContext->connType = RPC_CONN_UDPC;
|
||||
if (contLen > tsRpcMaxUdpSize) pContext->connType = RPC_CONN_TCPC;
|
||||
|
@ -458,15 +458,15 @@ void rpcSendResponse(const SRpcMsg *pRsp) {
|
|||
return;
|
||||
}
|
||||
|
||||
void rpcSendRedirectRsp(void *thandle, const SRpcIpSet *pIpSet) {
|
||||
void rpcSendRedirectRsp(void *thandle, const SRpcEpSet *pEpSet) {
|
||||
SRpcMsg rpcMsg;
|
||||
memset(&rpcMsg, 0, sizeof(rpcMsg));
|
||||
|
||||
rpcMsg.contLen = sizeof(SRpcIpSet);
|
||||
rpcMsg.contLen = sizeof(SRpcEpSet);
|
||||
rpcMsg.pCont = rpcMallocCont(rpcMsg.contLen);
|
||||
if (rpcMsg.pCont == NULL) return;
|
||||
|
||||
memcpy(rpcMsg.pCont, pIpSet, sizeof(SRpcIpSet));
|
||||
memcpy(rpcMsg.pCont, pEpSet, sizeof(SRpcEpSet));
|
||||
|
||||
rpcMsg.code = TSDB_CODE_RPC_REDIRECT;
|
||||
rpcMsg.handle = thandle;
|
||||
|
@ -488,7 +488,7 @@ int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void rpcSendRecv(void *shandle, SRpcIpSet *pIpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) {
|
||||
void rpcSendRecv(void *shandle, SRpcEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) {
|
||||
SRpcReqContext *pContext;
|
||||
pContext = (SRpcReqContext *) (pMsg->pCont-sizeof(SRpcHead)-sizeof(SRpcReqContext));
|
||||
|
||||
|
@ -498,9 +498,9 @@ void rpcSendRecv(void *shandle, SRpcIpSet *pIpSet, SRpcMsg *pMsg, SRpcMsg *pRsp)
|
|||
tsem_init(&sem, 0, 0);
|
||||
pContext->pSem = &sem;
|
||||
pContext->pRsp = pRsp;
|
||||
pContext->pSet = pIpSet;
|
||||
pContext->pSet = pEpSet;
|
||||
|
||||
rpcSendRequest(shandle, pIpSet, pMsg);
|
||||
rpcSendRequest(shandle, pEpSet, pMsg);
|
||||
|
||||
tsem_wait(&sem);
|
||||
tsem_destroy(&sem);
|
||||
|
@ -755,11 +755,11 @@ static SRpcConn *rpcGetConnObj(SRpcInfo *pRpc, int sid, SRecvInfo *pRecv) {
|
|||
static SRpcConn *rpcSetupConnToServer(SRpcReqContext *pContext) {
|
||||
SRpcConn *pConn;
|
||||
SRpcInfo *pRpc = pContext->pRpc;
|
||||
SRpcIpSet *pIpSet = &pContext->ipSet;
|
||||
SRpcEpSet *pEpSet = &pContext->epSet;
|
||||
|
||||
pConn = rpcGetConnFromCache(pRpc->pCache, pIpSet->fqdn[pIpSet->inUse], pIpSet->port[pIpSet->inUse], pContext->connType);
|
||||
pConn = rpcGetConnFromCache(pRpc->pCache, pEpSet->fqdn[pEpSet->inUse], pEpSet->port[pEpSet->inUse], pContext->connType);
|
||||
if ( pConn == NULL || pConn->user[0] == 0) {
|
||||
pConn = rpcOpenConn(pRpc, pIpSet->fqdn[pIpSet->inUse], pIpSet->port[pIpSet->inUse], pContext->connType);
|
||||
pConn = rpcOpenConn(pRpc, pEpSet->fqdn[pEpSet->inUse], pEpSet->port[pEpSet->inUse], pContext->connType);
|
||||
}
|
||||
|
||||
if (pConn) {
|
||||
|
@ -1020,16 +1020,16 @@ static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) {
|
|||
pContext->pConn = NULL;
|
||||
if (pContext->pRsp) {
|
||||
// for synchronous API
|
||||
memcpy(pContext->pSet, &pContext->ipSet, sizeof(SRpcIpSet));
|
||||
memcpy(pContext->pSet, &pContext->epSet, sizeof(SRpcEpSet));
|
||||
memcpy(pContext->pRsp, pMsg, sizeof(SRpcMsg));
|
||||
tsem_post(pContext->pSem);
|
||||
} else {
|
||||
// for asynchronous API
|
||||
SRpcIpSet *pIpSet = NULL;
|
||||
if (pContext->ipSet.inUse != pContext->oldInUse || pContext->redirect)
|
||||
pIpSet = &pContext->ipSet;
|
||||
SRpcEpSet *pEpSet = NULL;
|
||||
if (pContext->epSet.inUse != pContext->oldInUse || pContext->redirect)
|
||||
pEpSet = &pContext->epSet;
|
||||
|
||||
(*pRpc->cfp)(pMsg, pIpSet);
|
||||
(*pRpc->cfp)(pMsg, pEpSet);
|
||||
}
|
||||
|
||||
// free the request message
|
||||
|
@ -1070,9 +1070,9 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead) {
|
|||
pConn->pContext = NULL;
|
||||
pConn->pReqMsg = NULL;
|
||||
|
||||
// for UDP, port may be changed by server, the port in ipSet shall be used for cache
|
||||
// for UDP, port may be changed by server, the port in epSet shall be used for cache
|
||||
if (pHead->code != TSDB_CODE_RPC_TOO_SLOW) {
|
||||
rpcAddConnIntoCache(pRpc->pCache, pConn, pConn->peerFqdn, pContext->ipSet.port[pContext->ipSet.inUse], pConn->connType);
|
||||
rpcAddConnIntoCache(pRpc->pCache, pConn, pConn->peerFqdn, pContext->epSet.port[pContext->epSet.inUse], pConn->connType);
|
||||
} else {
|
||||
rpcCloseConn(pConn);
|
||||
}
|
||||
|
@ -1087,10 +1087,10 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead) {
|
|||
|
||||
if (pHead->code == TSDB_CODE_RPC_REDIRECT) {
|
||||
pContext->numOfTry = 0;
|
||||
memcpy(&pContext->ipSet, pHead->content, sizeof(pContext->ipSet));
|
||||
tDebug("%s, redirect is received, numOfIps:%d", pConn->info, pContext->ipSet.numOfIps);
|
||||
for (int i=0; i<pContext->ipSet.numOfIps; ++i)
|
||||
pContext->ipSet.port[i] = htons(pContext->ipSet.port[i]);
|
||||
memcpy(&pContext->epSet, pHead->content, sizeof(pContext->epSet));
|
||||
tDebug("%s, redirect is received, numOfEps:%d", pConn->info, pContext->epSet.numOfEps);
|
||||
for (int i=0; i<pContext->epSet.numOfEps; ++i)
|
||||
pContext->epSet.port[i] = htons(pContext->epSet.port[i]);
|
||||
rpcSendReqToServer(pRpc, pContext);
|
||||
rpcFreeCont(rpcMsg.pCont);
|
||||
} else if (pHead->code == TSDB_CODE_RPC_NOT_READY) {
|
||||
|
@ -1269,7 +1269,7 @@ static void rpcProcessConnError(void *param, void *id) {
|
|||
|
||||
tDebug("%s %p, connection error happens", pRpc->label, pContext->ahandle);
|
||||
|
||||
if (pContext->numOfTry >= pContext->ipSet.numOfIps) {
|
||||
if (pContext->numOfTry >= pContext->epSet.numOfEps) {
|
||||
rpcMsg.msgType = pContext->msgType+1;
|
||||
rpcMsg.ahandle = pContext->ahandle;
|
||||
rpcMsg.code = pContext->code;
|
||||
|
@ -1279,8 +1279,8 @@ static void rpcProcessConnError(void *param, void *id) {
|
|||
rpcNotifyClient(pContext, &rpcMsg);
|
||||
} else {
|
||||
// move to next IP
|
||||
pContext->ipSet.inUse++;
|
||||
pContext->ipSet.inUse = pContext->ipSet.inUse % pContext->ipSet.numOfIps;
|
||||
pContext->epSet.inUse++;
|
||||
pContext->epSet.inUse = pContext->epSet.inUse % pContext->epSet.numOfEps;
|
||||
rpcSendReqToServer(pRpc, pContext);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -374,7 +374,7 @@ int taosSendTcpData(uint32_t ip, uint16_t port, void *data, int len, void *chand
|
|||
|
||||
if (chandle == NULL) return -1;
|
||||
|
||||
return (int)send(pFdObj->fd, data, (size_t)len, 0);
|
||||
return taosWriteMsg(pFdObj->fd, data, len);
|
||||
}
|
||||
|
||||
static void taosReportBrokenLink(SFdObj *pFdObj) {
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
typedef struct {
|
||||
int index;
|
||||
SRpcIpSet ipSet;
|
||||
SRpcEpSet epSet;
|
||||
int num;
|
||||
int numOfReqs;
|
||||
int msgSize;
|
||||
|
@ -32,11 +32,11 @@ typedef struct {
|
|||
void *pRpc;
|
||||
} SInfo;
|
||||
|
||||
static void processResponse(SRpcMsg *pMsg, SRpcIpSet *pIpSet) {
|
||||
static void processResponse(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
|
||||
SInfo *pInfo = (SInfo *)pMsg->ahandle;
|
||||
tDebug("thread:%d, response is received, type:%d contLen:%d code:0x%x", pInfo->index, pMsg->msgType, pMsg->contLen, pMsg->code);
|
||||
|
||||
if (pIpSet) pInfo->ipSet = *pIpSet;
|
||||
if (pEpSet) pInfo->epSet = *pEpSet;
|
||||
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
sem_post(&pInfo->rspSem);
|
||||
|
@ -57,7 +57,7 @@ static void *sendRequest(void *param) {
|
|||
rpcMsg.ahandle = pInfo;
|
||||
rpcMsg.msgType = 1;
|
||||
tDebug("thread:%d, send request, contLen:%d num:%d", pInfo->index, pInfo->msgSize, pInfo->num);
|
||||
rpcSendRequest(pInfo->pRpc, &pInfo->ipSet, &rpcMsg);
|
||||
rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg);
|
||||
if ( pInfo->num % 20000 == 0 )
|
||||
tInfo("thread:%d, %d requests have been sent", pInfo->index, pInfo->num);
|
||||
sem_wait(&pInfo->rspSem);
|
||||
|
@ -71,7 +71,7 @@ static void *sendRequest(void *param) {
|
|||
|
||||
int main(int argc, char *argv[]) {
|
||||
SRpcInit rpcInit;
|
||||
SRpcIpSet ipSet;
|
||||
SRpcEpSet epSet;
|
||||
int msgSize = 128;
|
||||
int numOfReqs = 0;
|
||||
int appThreads = 1;
|
||||
|
@ -82,12 +82,12 @@ int main(int argc, char *argv[]) {
|
|||
pthread_attr_t thattr;
|
||||
|
||||
// server info
|
||||
ipSet.numOfIps = 1;
|
||||
ipSet.inUse = 0;
|
||||
ipSet.port[0] = 7000;
|
||||
ipSet.port[1] = 7000;
|
||||
strcpy(ipSet.fqdn[0], serverIp);
|
||||
strcpy(ipSet.fqdn[1], "192.168.0.1");
|
||||
epSet.numOfEps = 1;
|
||||
epSet.inUse = 0;
|
||||
epSet.port[0] = 7000;
|
||||
epSet.port[1] = 7000;
|
||||
strcpy(epSet.fqdn[0], serverIp);
|
||||
strcpy(epSet.fqdn[1], "192.168.0.1");
|
||||
|
||||
// client info
|
||||
memset(&rpcInit, 0, sizeof(rpcInit));
|
||||
|
@ -105,9 +105,9 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
for (int i=1; i<argc; ++i) {
|
||||
if (strcmp(argv[i], "-p")==0 && i < argc-1) {
|
||||
ipSet.port[0] = atoi(argv[++i]);
|
||||
epSet.port[0] = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-i") ==0 && i < argc-1) {
|
||||
tstrncpy(ipSet.fqdn[0], argv[++i], sizeof(ipSet.fqdn[0]));
|
||||
tstrncpy(epSet.fqdn[0], argv[++i], sizeof(epSet.fqdn[0]));
|
||||
} else if (strcmp(argv[i], "-t")==0 && i < argc-1) {
|
||||
rpcInit.numOfThreads = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-m")==0 && i < argc-1) {
|
||||
|
@ -131,7 +131,7 @@ int main(int argc, char *argv[]) {
|
|||
} else {
|
||||
printf("\nusage: %s [options] \n", argv[0]);
|
||||
printf(" [-i ip]: first server IP address, default is:%s\n", serverIp);
|
||||
printf(" [-p port]: server port number, default is:%d\n", ipSet.port[0]);
|
||||
printf(" [-p port]: server port number, default is:%d\n", epSet.port[0]);
|
||||
printf(" [-t threads]: number of rpc threads, default is:%d\n", rpcInit.numOfThreads);
|
||||
printf(" [-s sessions]: number of rpc sessions, default is:%d\n", rpcInit.sessions);
|
||||
printf(" [-m msgSize]: message body size, default is:%d\n", msgSize);
|
||||
|
@ -168,7 +168,7 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
for (int i=0; i<appThreads; ++i) {
|
||||
pInfo->index = i;
|
||||
pInfo->ipSet = ipSet;
|
||||
pInfo->epSet = epSet;
|
||||
pInfo->numOfReqs = numOfReqs;
|
||||
pInfo->msgSize = msgSize;
|
||||
sem_init(&pInfo->rspSem, 0, 0);
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
typedef struct {
|
||||
int index;
|
||||
SRpcIpSet ipSet;
|
||||
SRpcEpSet epSet;
|
||||
int num;
|
||||
int numOfReqs;
|
||||
int msgSize;
|
||||
|
@ -51,7 +51,7 @@ static void *sendRequest(void *param) {
|
|||
rpcMsg.msgType = 1;
|
||||
tDebug("thread:%d, send request, contLen:%d num:%d", pInfo->index, pInfo->msgSize, pInfo->num);
|
||||
|
||||
rpcSendRecv(pInfo->pRpc, &pInfo->ipSet, &rpcMsg, &rspMsg);
|
||||
rpcSendRecv(pInfo->pRpc, &pInfo->epSet, &rpcMsg, &rspMsg);
|
||||
|
||||
// handle response
|
||||
if (rspMsg.code != 0) terror++;
|
||||
|
@ -72,7 +72,7 @@ static void *sendRequest(void *param) {
|
|||
|
||||
int main(int argc, char *argv[]) {
|
||||
SRpcInit rpcInit;
|
||||
SRpcIpSet ipSet;
|
||||
SRpcEpSet epSet;
|
||||
int msgSize = 128;
|
||||
int numOfReqs = 0;
|
||||
int appThreads = 1;
|
||||
|
@ -83,12 +83,12 @@ int main(int argc, char *argv[]) {
|
|||
pthread_attr_t thattr;
|
||||
|
||||
// server info
|
||||
ipSet.numOfIps = 1;
|
||||
ipSet.inUse = 0;
|
||||
ipSet.port[0] = 7000;
|
||||
ipSet.port[1] = 7000;
|
||||
strcpy(ipSet.fqdn[0], serverIp);
|
||||
strcpy(ipSet.fqdn[1], "192.168.0.1");
|
||||
epSet.numOfEps = 1;
|
||||
epSet.inUse = 0;
|
||||
epSet.port[0] = 7000;
|
||||
epSet.port[1] = 7000;
|
||||
strcpy(epSet.fqdn[0], serverIp);
|
||||
strcpy(epSet.fqdn[1], "192.168.0.1");
|
||||
|
||||
// client info
|
||||
memset(&rpcInit, 0, sizeof(rpcInit));
|
||||
|
@ -106,9 +106,9 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
for (int i=1; i<argc; ++i) {
|
||||
if (strcmp(argv[i], "-p")==0 && i < argc-1) {
|
||||
ipSet.port[0] = atoi(argv[++i]);
|
||||
epSet.port[0] = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-i") ==0 && i < argc-1) {
|
||||
tstrncpy(ipSet.fqdn[0], argv[++i], sizeof(ipSet.fqdn[0]));
|
||||
tstrncpy(epSet.fqdn[0], argv[++i], sizeof(epSet.fqdn[0]));
|
||||
} else if (strcmp(argv[i], "-t")==0 && i < argc-1) {
|
||||
rpcInit.numOfThreads = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-m")==0 && i < argc-1) {
|
||||
|
@ -132,7 +132,7 @@ int main(int argc, char *argv[]) {
|
|||
} else {
|
||||
printf("\nusage: %s [options] \n", argv[0]);
|
||||
printf(" [-i ip]: first server IP address, default is:%s\n", serverIp);
|
||||
printf(" [-p port]: server port number, default is:%d\n", ipSet.port[0]);
|
||||
printf(" [-p port]: server port number, default is:%d\n", epSet.port[0]);
|
||||
printf(" [-t threads]: number of rpc threads, default is:%d\n", rpcInit.numOfThreads);
|
||||
printf(" [-s sessions]: number of rpc sessions, default is:%d\n", rpcInit.sessions);
|
||||
printf(" [-m msgSize]: message body size, default is:%d\n", msgSize);
|
||||
|
@ -168,7 +168,7 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
for (int i=0; i<appThreads; ++i) {
|
||||
pInfo->index = i;
|
||||
pInfo->ipSet = ipSet;
|
||||
pInfo->epSet = epSet;
|
||||
pInfo->numOfReqs = numOfReqs;
|
||||
pInfo->msgSize = msgSize;
|
||||
sem_init(&pInfo->rspSem, 0, 0);
|
||||
|
|
|
@ -103,7 +103,7 @@ int retrieveAuthInfo(char *meterId, char *spi, char *encrypt, char *secret, char
|
|||
return ret;
|
||||
}
|
||||
|
||||
void processRequestMsg(SRpcMsg *pMsg, SRpcIpSet *pIpSet) {
|
||||
void processRequestMsg(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
|
||||
SRpcMsg *pTemp;
|
||||
|
||||
pTemp = taosAllocateQitem(sizeof(SRpcMsg));
|
||||
|
|
|
@ -148,7 +148,7 @@ void tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit) {
|
|||
STsdbRepo *pRepo = (STsdbRepo *)repo;
|
||||
int vgId = REPO_ID(pRepo);
|
||||
|
||||
tsdbStopStream(repo);
|
||||
tsdbStopStream(pRepo);
|
||||
|
||||
if (toCommit) {
|
||||
tsdbAsyncCommit(pRepo);
|
||||
|
@ -1126,6 +1126,7 @@ static void tsdbStartStream(STsdbRepo *pRepo) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
static void tsdbStopStream(STsdbRepo *pRepo) {
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
|
||||
|
|
|
@ -172,7 +172,7 @@ void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) {
|
|||
STsdbBufBlock *pBufBlock = tsdbGetCurrBufBlock(pRepo);
|
||||
|
||||
if (pBufBlock != NULL && pBufBlock->remain < bytes) {
|
||||
if (listNEles(pRepo->mem->bufBlockList) >= pCfg->totalBlocks / 2) { // need to commit mem
|
||||
if (listNEles(pRepo->mem->bufBlockList) >= pCfg->totalBlocks / 3) { // need to commit mem
|
||||
if (tsdbAsyncCommit(pRepo) < 0) return NULL;
|
||||
} else {
|
||||
if (tsdbLockRepo(pRepo) < 0) return NULL;
|
||||
|
@ -204,6 +204,9 @@ void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) {
|
|||
pBufBlock->offset += bytes;
|
||||
pBufBlock->remain -= bytes;
|
||||
|
||||
tsdbTrace("vgId:%d allocate %d bytes from buffer block, nBlocks %d offset %d remain %d", REPO_ID(pRepo), bytes,
|
||||
listNEles(pRepo->mem->bufBlockList), pBufBlock->offset, pBufBlock->remain);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
@ -324,6 +327,8 @@ static void tsdbFreeBytes(STsdbRepo *pRepo, void *ptr, int bytes) {
|
|||
pBufBlock->offset -= bytes;
|
||||
pBufBlock->remain += bytes;
|
||||
ASSERT(ptr == POINTER_SHIFT(pBufBlock->data, pBufBlock->offset));
|
||||
tsdbTrace("vgId:%d return %d bytes to buffer block, nBlocks %d offset %d remain %d", REPO_ID(pRepo), bytes,
|
||||
listNEles(pRepo->mem->bufBlockList), pBufBlock->offset, pBufBlock->remain);
|
||||
}
|
||||
|
||||
static SMemTable* tsdbNewMemTable(STsdbCfg* pCfg) {
|
||||
|
|
|
@ -592,7 +592,7 @@ void tsdbUpdateTableSchema(STsdbRepo *pRepo, STable *pTable, STSchema *pSchema,
|
|||
int tlen = tsdbGetTableEncodeSize(TSDB_UPDATE_META, pCTable);
|
||||
void *buf = tsdbAllocBytes(pRepo, tlen);
|
||||
ASSERT(buf != NULL);
|
||||
tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, pTable);
|
||||
tsdbInsertTableAct(pRepo, TSDB_UPDATE_META, buf, pCTable);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -589,20 +589,25 @@ void tsdbGetDataStatis(SRWHelper *pHelper, SDataStatis *pStatis, int numOfCols)
|
|||
|
||||
int tsdbLoadBlockDataCols(SRWHelper *pHelper, SCompBlock *pCompBlock, SCompInfo *pCompInfo, int16_t *colIds, int numOfColIds) {
|
||||
ASSERT(pCompBlock->numOfSubBlocks >= 1); // Must be super block
|
||||
SCompBlock *pTCompBlock = pCompBlock;
|
||||
|
||||
int numOfSubBlocks = pCompBlock->numOfSubBlocks;
|
||||
if (numOfSubBlocks > 1)
|
||||
pCompBlock = (SCompBlock *)POINTER_SHIFT((pCompInfo == NULL) ? pHelper->pCompInfo : pCompInfo, pCompBlock->offset);
|
||||
pTCompBlock = (SCompBlock *)POINTER_SHIFT((pCompInfo == NULL) ? pHelper->pCompInfo : pCompInfo, pCompBlock->offset);
|
||||
|
||||
tdResetDataCols(pHelper->pDataCols[0]);
|
||||
if (tsdbLoadBlockDataColsImpl(pHelper, pCompBlock, pHelper->pDataCols[0], colIds, numOfColIds) < 0) goto _err;
|
||||
if (tsdbLoadBlockDataColsImpl(pHelper, pTCompBlock, pHelper->pDataCols[0], colIds, numOfColIds) < 0) goto _err;
|
||||
for (int i = 1; i < numOfSubBlocks; i++) {
|
||||
tdResetDataCols(pHelper->pDataCols[1]);
|
||||
pCompBlock++;
|
||||
if (tsdbLoadBlockDataColsImpl(pHelper, pCompBlock, pHelper->pDataCols[1], colIds, numOfColIds) < 0) goto _err;
|
||||
pTCompBlock++;
|
||||
if (tsdbLoadBlockDataColsImpl(pHelper, pTCompBlock, pHelper->pDataCols[1], colIds, numOfColIds) < 0) goto _err;
|
||||
if (tdMergeDataCols(pHelper->pDataCols[0], pHelper->pDataCols[1], pHelper->pDataCols[1]->numOfRows) < 0) goto _err;
|
||||
}
|
||||
|
||||
ASSERT(pHelper->pDataCols[0]->numOfRows == pCompBlock->numOfRows &&
|
||||
dataColsKeyFirst(pHelper->pDataCols[0]) == pCompBlock->keyFirst &&
|
||||
dataColsKeyLast(pHelper->pDataCols[0]) == pCompBlock->keyLast);
|
||||
|
||||
return 0;
|
||||
|
||||
_err:
|
||||
|
@ -610,19 +615,25 @@ _err:
|
|||
}
|
||||
|
||||
int tsdbLoadBlockData(SRWHelper *pHelper, SCompBlock *pCompBlock, SCompInfo *pCompInfo) {
|
||||
SCompBlock *pTCompBlock = pCompBlock;
|
||||
|
||||
int numOfSubBlock = pCompBlock->numOfSubBlocks;
|
||||
if (numOfSubBlock > 1)
|
||||
pCompBlock = (SCompBlock *)POINTER_SHIFT((pCompInfo == NULL) ? pHelper->pCompInfo : pCompInfo, pCompBlock->offset);
|
||||
pTCompBlock = (SCompBlock *)POINTER_SHIFT((pCompInfo == NULL) ? pHelper->pCompInfo : pCompInfo, pCompBlock->offset);
|
||||
|
||||
tdResetDataCols(pHelper->pDataCols[0]);
|
||||
if (tsdbLoadBlockDataImpl(pHelper, pCompBlock, pHelper->pDataCols[0]) < 0) goto _err;
|
||||
if (tsdbLoadBlockDataImpl(pHelper, pTCompBlock, pHelper->pDataCols[0]) < 0) goto _err;
|
||||
for (int i = 1; i < numOfSubBlock; i++) {
|
||||
tdResetDataCols(pHelper->pDataCols[1]);
|
||||
pCompBlock++;
|
||||
if (tsdbLoadBlockDataImpl(pHelper, pCompBlock, pHelper->pDataCols[1]) < 0) goto _err;
|
||||
pTCompBlock++;
|
||||
if (tsdbLoadBlockDataImpl(pHelper, pTCompBlock, pHelper->pDataCols[1]) < 0) goto _err;
|
||||
if (tdMergeDataCols(pHelper->pDataCols[0], pHelper->pDataCols[1], pHelper->pDataCols[1]->numOfRows) < 0) goto _err;
|
||||
}
|
||||
|
||||
ASSERT(pHelper->pDataCols[0]->numOfRows == pCompBlock->numOfRows &&
|
||||
dataColsKeyFirst(pHelper->pDataCols[0]) == pCompBlock->keyFirst &&
|
||||
dataColsKeyLast(pHelper->pDataCols[0]) == pCompBlock->keyLast);
|
||||
|
||||
return 0;
|
||||
|
||||
_err:
|
||||
|
@ -1227,7 +1238,6 @@ static int tsdbLoadBlockDataImpl(SRWHelper *pHelper, SCompBlock *pCompBlock, SDa
|
|||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _err;
|
||||
}
|
||||
ASSERT(pCompData->numOfCols == pCompBlock->numOfCols);
|
||||
|
||||
int32_t tsize = TSDB_GET_COMPCOL_LEN(pCompBlock->numOfCols);
|
||||
if (!taosCheckChecksumWhole((uint8_t *)pCompData, tsize)) {
|
||||
|
@ -1236,6 +1246,7 @@ static int tsdbLoadBlockDataImpl(SRWHelper *pHelper, SCompBlock *pCompBlock, SDa
|
|||
terrno = TSDB_CODE_TDB_FILE_CORRUPTED;
|
||||
goto _err;
|
||||
}
|
||||
ASSERT(pCompData->numOfCols == pCompBlock->numOfCols);
|
||||
|
||||
pDataCols->numOfRows = pCompBlock->numOfRows;
|
||||
|
||||
|
|
|
@ -90,6 +90,12 @@ typedef struct SBlockOrderSupporter {
|
|||
int32_t* numOfBlocksPerTable;
|
||||
} SBlockOrderSupporter;
|
||||
|
||||
typedef struct SIOCostSummary {
|
||||
int64_t blockLoadTime;
|
||||
int64_t statisInfoLoadTime;
|
||||
int64_t checkForNextTime;
|
||||
} SIOCostSummary;
|
||||
|
||||
typedef struct STsdbQueryHandle {
|
||||
STsdbRepo* pTsdb;
|
||||
SQueryFilePos cur; // current position
|
||||
|
@ -101,7 +107,7 @@ typedef struct STsdbQueryHandle {
|
|||
bool locateStart;
|
||||
int32_t outputCapacity;
|
||||
int32_t realNumOfRows;
|
||||
SArray* pTableCheckInfo; //SArray<STableCheckInfo>
|
||||
SArray* pTableCheckInfo; // SArray<STableCheckInfo>
|
||||
int32_t activeIndex;
|
||||
bool checkFiles; // check file stage
|
||||
void* qinfo; // query info handle, for debug purpose
|
||||
|
@ -116,6 +122,8 @@ typedef struct STsdbQueryHandle {
|
|||
SArray* defaultLoadColumn;// default load column
|
||||
SDataBlockLoadInfo dataBlockLoadInfo; /* record current block load information */
|
||||
SLoadCompBlockInfo compBlockLoadInfo; /* record current compblock information in SQuery */
|
||||
|
||||
SIOCostSummary cost;
|
||||
} STsdbQueryHandle;
|
||||
|
||||
static void changeQueryHandleForLastrowQuery(TsdbQueryHandleT pqHandle);
|
||||
|
@ -183,6 +191,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
|
|||
free(pQueryHandle);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tsdbTakeMemSnapshot(pQueryHandle->pTsdb, &pQueryHandle->mem, &pQueryHandle->imem);
|
||||
|
||||
size_t sizeOfGroup = taosArrayGetSize(groupList->pGroupList);
|
||||
|
@ -340,6 +349,11 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
|
|||
return true;
|
||||
}
|
||||
|
||||
static void destroyTableMemIterator(STableCheckInfo* pCheckInfo) {
|
||||
tSkipListDestroyIter(pCheckInfo->iter);
|
||||
tSkipListDestroyIter(pCheckInfo->iiter);
|
||||
}
|
||||
|
||||
SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo) {
|
||||
SDataRow rmem = NULL, rimem = NULL;
|
||||
if (pCheckInfo->iter) {
|
||||
|
@ -384,7 +398,7 @@ SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
bool moveToNextRow(STableCheckInfo* pCheckInfo) {
|
||||
static bool moveToNextRow(STableCheckInfo* pCheckInfo) {
|
||||
bool hasNext = false;
|
||||
if (pCheckInfo->chosen == 0) {
|
||||
if (pCheckInfo->iter != NULL) {
|
||||
|
@ -507,7 +521,7 @@ static int32_t binarySearchForBlock(SCompBlock* pBlock, int32_t numOfBlocks, TSK
|
|||
return midSlot;
|
||||
}
|
||||
|
||||
static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlocks, int32_t type) {
|
||||
static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlocks) {
|
||||
SFileGroup* fileGroup = pQueryHandle->pFileGroup;
|
||||
assert(fileGroup->files[TSDB_FILE_TYPE_HEAD].fname > 0);
|
||||
|
||||
|
@ -524,52 +538,61 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo
|
|||
|
||||
for (int32_t i = 0; i < numOfTables; ++i) {
|
||||
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
|
||||
pCheckInfo->numOfBlocks = 0;
|
||||
|
||||
SCompIdx* compIndex = &pQueryHandle->rhelper.pCompIdx[pCheckInfo->tableId.tid];
|
||||
if (compIndex->len == 0 || compIndex->numOfBlocks == 0 ||
|
||||
compIndex->uid != pCheckInfo->tableId.uid) { // no data block in this file, try next file
|
||||
pCheckInfo->numOfBlocks = 0;
|
||||
continue; // no data blocks in the file belongs to pCheckInfo->pTable
|
||||
} else {
|
||||
if (pCheckInfo->compSize < compIndex->len) {
|
||||
assert(compIndex->len > 0);
|
||||
|
||||
char* t = realloc(pCheckInfo->pCompInfo, compIndex->len);
|
||||
assert(t != NULL);
|
||||
|
||||
pCheckInfo->pCompInfo = (SCompInfo*) t;
|
||||
pCheckInfo->compSize = compIndex->len;
|
||||
}
|
||||
|
||||
tsdbSetHelperTable(&pQueryHandle->rhelper, pCheckInfo->pTableObj, pQueryHandle->pTsdb);
|
||||
|
||||
tsdbLoadCompInfo(&(pQueryHandle->rhelper), (void *)(pCheckInfo->pCompInfo));
|
||||
SCompInfo* pCompInfo = pCheckInfo->pCompInfo;
|
||||
|
||||
TSKEY s = MIN(pCheckInfo->lastKey, pQueryHandle->window.ekey);
|
||||
TSKEY e = MAX(pCheckInfo->lastKey, pQueryHandle->window.ekey);
|
||||
|
||||
// discard the unqualified data block based on the query time window
|
||||
int32_t start = binarySearchForBlock(pCompInfo->blocks, compIndex->numOfBlocks, s, TSDB_ORDER_ASC);
|
||||
int32_t end = start;
|
||||
|
||||
if (s > pCompInfo->blocks[start].keyLast) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// todo speedup the procedure of located end block
|
||||
while (end < compIndex->numOfBlocks && (pCompInfo->blocks[end].keyFirst <= e)) {
|
||||
end += 1;
|
||||
}
|
||||
|
||||
pCheckInfo->numOfBlocks = (end - start);
|
||||
|
||||
if (start > 0) {
|
||||
memmove(pCompInfo->blocks, &pCompInfo->blocks[start], pCheckInfo->numOfBlocks * sizeof(SCompBlock));
|
||||
}
|
||||
|
||||
(*numOfBlocks) += pCheckInfo->numOfBlocks;
|
||||
// no data block in this file, try next file
|
||||
if (compIndex->len == 0 || compIndex->numOfBlocks == 0 || compIndex->uid != pCheckInfo->tableId.uid) {
|
||||
continue; // no data blocks in the file belongs to pCheckInfo->pTable
|
||||
}
|
||||
|
||||
if (pCheckInfo->compSize < compIndex->len) {
|
||||
assert(compIndex->len > 0);
|
||||
|
||||
char* t = realloc(pCheckInfo->pCompInfo, compIndex->len);
|
||||
assert(t != NULL);
|
||||
|
||||
pCheckInfo->pCompInfo = (SCompInfo*) t;
|
||||
pCheckInfo->compSize = compIndex->len;
|
||||
}
|
||||
|
||||
tsdbSetHelperTable(&pQueryHandle->rhelper, pCheckInfo->pTableObj, pQueryHandle->pTsdb);
|
||||
|
||||
tsdbLoadCompInfo(&(pQueryHandle->rhelper), (void *)(pCheckInfo->pCompInfo));
|
||||
SCompInfo* pCompInfo = pCheckInfo->pCompInfo;
|
||||
|
||||
TSKEY s = TSKEY_INITIAL_VAL, e = TSKEY_INITIAL_VAL;
|
||||
|
||||
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
|
||||
assert(pCheckInfo->lastKey <= pQueryHandle->window.ekey && pQueryHandle->window.skey <= pQueryHandle->window.ekey);
|
||||
} else {
|
||||
assert(pCheckInfo->lastKey >= pQueryHandle->window.ekey && pQueryHandle->window.skey >= pQueryHandle->window.ekey);
|
||||
}
|
||||
|
||||
s = MIN(pCheckInfo->lastKey, pQueryHandle->window.ekey);
|
||||
e = MAX(pCheckInfo->lastKey, pQueryHandle->window.ekey);
|
||||
|
||||
// discard the unqualified data block based on the query time window
|
||||
int32_t start = binarySearchForBlock(pCompInfo->blocks, compIndex->numOfBlocks, s, TSDB_ORDER_ASC);
|
||||
int32_t end = start;
|
||||
|
||||
if (s > pCompInfo->blocks[start].keyLast) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// todo speedup the procedure of located end block
|
||||
while (end < compIndex->numOfBlocks && (pCompInfo->blocks[end].keyFirst <= e)) {
|
||||
end += 1;
|
||||
}
|
||||
|
||||
pCheckInfo->numOfBlocks = (end - start);
|
||||
|
||||
if (start > 0) {
|
||||
memmove(pCompInfo->blocks, &pCompInfo->blocks[start], pCheckInfo->numOfBlocks * sizeof(SCompBlock));
|
||||
}
|
||||
|
||||
(*numOfBlocks) += pCheckInfo->numOfBlocks;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -583,22 +606,13 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo
|
|||
.uid = (_checkInfo)->tableId.uid})
|
||||
|
||||
|
||||
|
||||
static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock, STableCheckInfo* pCheckInfo) {
|
||||
STsdbRepo *pRepo = pQueryHandle->pTsdb;
|
||||
|
||||
// TODO refactor
|
||||
SCompData* data = calloc(1, sizeof(SCompData) + sizeof(SCompCol) * pBlock->numOfCols);
|
||||
|
||||
data->numOfCols = pBlock->numOfCols;
|
||||
data->uid = pCheckInfo->pTableObj->tableId.uid;
|
||||
|
||||
bool blockLoaded = false;
|
||||
int64_t st = taosGetTimestampUs();
|
||||
bool blockLoaded = false;
|
||||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
if (pCheckInfo->pDataCols == NULL) {
|
||||
STsdbMeta* pMeta = tsdbGetMeta(pRepo);
|
||||
// TODO
|
||||
pCheckInfo->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock);
|
||||
}
|
||||
|
||||
|
@ -607,7 +621,9 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
|
|||
tdInitDataCols(pQueryHandle->rhelper.pDataCols[0], pSchema);
|
||||
tdInitDataCols(pQueryHandle->rhelper.pDataCols[1], pSchema);
|
||||
|
||||
if (tsdbLoadBlockData(&(pQueryHandle->rhelper), pBlock, pCheckInfo->pCompInfo) == 0) {
|
||||
int16_t* colIds = pQueryHandle->defaultLoadColumn->pData;
|
||||
int32_t ret = tsdbLoadBlockDataCols(&(pQueryHandle->rhelper), pBlock, pCheckInfo->pCompInfo, colIds, QH_GET_NUM_OF_COLS(pQueryHandle));
|
||||
if (ret == TSDB_CODE_SUCCESS) {
|
||||
SDataBlockLoadInfo* pBlockLoadInfo = &pQueryHandle->dataBlockLoadInfo;
|
||||
|
||||
pBlockLoadInfo->fileGroup = pQueryHandle->pFileGroup;
|
||||
|
@ -621,10 +637,10 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
|
|||
assert(pCols->numOfRows != 0 && pCols->numOfRows <= pBlock->numOfRows);
|
||||
|
||||
pBlock->numOfRows = pCols->numOfRows;
|
||||
tfree(data);
|
||||
|
||||
int64_t et = taosGetTimestampUs() - st;
|
||||
tsdbDebug("%p load file block into buffer, elapsed time:%"PRId64 " us", pQueryHandle, et);
|
||||
int64_t elapsedTime = (taosGetTimestampUs() - st);
|
||||
pQueryHandle->cost.blockLoadTime += elapsedTime;
|
||||
tsdbDebug("%p load file block into buffer, elapsed time:%"PRId64 " us", pQueryHandle, elapsedTime);
|
||||
|
||||
return blockLoaded;
|
||||
}
|
||||
|
@ -1246,7 +1262,7 @@ static int32_t dataBlockOrderCompar(const void* pLeft, const void* pRight, void*
|
|||
pLeftBlockInfoEx->compBlock->last == pRightBlockInfoEx->compBlock->last) {
|
||||
tsdbError("error in header file, two block with same offset:%" PRId64, (int64_t)pLeftBlockInfoEx->compBlock->offset);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
return pLeftBlockInfoEx->compBlock->offset > pRightBlockInfoEx->compBlock->offset ? 1 : -1;
|
||||
}
|
||||
|
@ -1367,7 +1383,6 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// todo opt for only one table case
|
||||
static int32_t getDataBlocksInFilesImpl(STsdbQueryHandle* pQueryHandle, bool* exists) {
|
||||
pQueryHandle->numOfBlocks = 0;
|
||||
SQueryFilePos* cur = &pQueryHandle->cur;
|
||||
|
@ -1378,8 +1393,7 @@ static int32_t getDataBlocksInFilesImpl(STsdbQueryHandle* pQueryHandle, bool* ex
|
|||
int32_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
||||
|
||||
while ((pQueryHandle->pFileGroup = tsdbGetFileGroupNext(&pQueryHandle->fileIter)) != NULL) {
|
||||
int32_t type = ASCENDING_TRAVERSE(pQueryHandle->order)? QUERY_RANGE_GREATER_EQUAL:QUERY_RANGE_LESS_EQUAL;
|
||||
if ((code = getFileCompInfo(pQueryHandle, &numOfBlocks, type)) != TSDB_CODE_SUCCESS) {
|
||||
if ((code = getFileCompInfo(pQueryHandle, &numOfBlocks)) != TSDB_CODE_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1487,7 +1501,10 @@ static bool doHasDataInBuffer(STsdbQueryHandle* pQueryHandle) {
|
|||
// handle data in cache situation
|
||||
bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
|
||||
STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pHandle;
|
||||
|
||||
|
||||
int64_t stime = taosGetTimestampUs();
|
||||
int64_t elapsedTime = stime;
|
||||
|
||||
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
||||
assert(numOfTables > 0);
|
||||
|
||||
|
@ -1514,7 +1531,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
|
|||
pQueryHandle->cur.win = (STimeWindow){pQueryHandle->window.skey, pQueryHandle->window.skey};
|
||||
pQueryHandle->window = pQueryHandle->cur.win;
|
||||
pQueryHandle->cur.rows = 1;
|
||||
pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL;
|
||||
pQueryHandle->type = TSDB_QUERY_TYPE_ALL;
|
||||
return true;
|
||||
} else {
|
||||
STsdbQueryHandle* pSecQueryHandle = calloc(1, sizeof(STsdbQueryHandle));
|
||||
|
@ -1532,6 +1549,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
|
|||
free(pSecQueryHandle);
|
||||
return false;
|
||||
}
|
||||
|
||||
tsdbTakeMemSnapshot(pSecQueryHandle->pTsdb, &pSecQueryHandle->mem, &pSecQueryHandle->imem);
|
||||
|
||||
// allocate buffer in order to load data blocks from file
|
||||
|
@ -1539,7 +1557,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
|
|||
|
||||
pSecQueryHandle->statis = calloc(numOfCols, sizeof(SDataStatis));
|
||||
pSecQueryHandle->pColumns = taosArrayInit(numOfCols, sizeof(SColumnInfoData));
|
||||
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData colInfo = {{0}, 0};
|
||||
SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i);
|
||||
|
@ -1547,7 +1565,6 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
|
|||
colInfo.info = pCol->info;
|
||||
colInfo.pData = calloc(1, EXTRA_BYTES + pQueryHandle->outputCapacity * pCol->info.bytes);
|
||||
taosArrayPush(pSecQueryHandle->pColumns, &colInfo);
|
||||
pSecQueryHandle->statis[i].colId = colInfo.info.colId;
|
||||
}
|
||||
|
||||
size_t si = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
||||
|
@ -1557,7 +1574,6 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
|
|||
|
||||
for (int32_t j = 0; j < si; ++j) {
|
||||
STableCheckInfo* pCheckInfo = (STableCheckInfo*) taosArrayGet(pQueryHandle->pTableCheckInfo, j);
|
||||
|
||||
STableCheckInfo info = {
|
||||
.lastKey = pSecQueryHandle->window.skey,
|
||||
.tableId = pCheckInfo->tableId,
|
||||
|
@ -1569,12 +1585,13 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
|
|||
|
||||
tsdbInitDataBlockLoadInfo(&pSecQueryHandle->dataBlockLoadInfo);
|
||||
tsdbInitCompBlockLoadInfo(&pSecQueryHandle->compBlockLoadInfo);
|
||||
|
||||
pSecQueryHandle->defaultLoadColumn = taosArrayClone(pQueryHandle->defaultLoadColumn);
|
||||
|
||||
bool ret = tsdbNextDataBlock((void*) pSecQueryHandle);
|
||||
assert(ret);
|
||||
|
||||
/*SDataBlockInfo* pBlockInfo =*/ tsdbRetrieveDataBlockInfo((void*) pSecQueryHandle, &blockInfo);
|
||||
/*SArray *pDataBlock = */tsdbRetrieveDataBlock((void*) pSecQueryHandle, pSecQueryHandle->defaultLoadColumn);
|
||||
tsdbRetrieveDataBlockInfo((void*) pSecQueryHandle, &blockInfo);
|
||||
tsdbRetrieveDataBlock((void*) pSecQueryHandle, pSecQueryHandle->defaultLoadColumn);
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pCol = taosArrayGet(pQueryHandle->pColumns, i);
|
||||
|
@ -1587,15 +1604,26 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
|
|||
}
|
||||
|
||||
SColumnInfoData* pTSCol = taosArrayGet(pQueryHandle->pColumns, 0);
|
||||
|
||||
|
||||
// it is ascending order
|
||||
pQueryHandle->cur.win = (STimeWindow){((TSKEY*)pTSCol->pData)[0], ((TSKEY*)pTSCol->pData)[1]};
|
||||
pQueryHandle->window = pQueryHandle->cur.win;
|
||||
pQueryHandle->cur.rows = 2;
|
||||
|
||||
pQueryHandle->cur.mixBlock = true;
|
||||
pQueryHandle->order = TSDB_ORDER_DESC;
|
||||
|
||||
int32_t step = -1;// one step for ascending order traverse
|
||||
for (int32_t j = 0; j < si; ++j) {
|
||||
STableCheckInfo* pCheckInfo = (STableCheckInfo*) taosArrayGet(pQueryHandle->pTableCheckInfo, j);
|
||||
pCheckInfo->lastKey = pQueryHandle->cur.win.ekey + step;
|
||||
}
|
||||
|
||||
tsdbCleanupQueryHandle(pSecQueryHandle);
|
||||
}
|
||||
|
||||
|
||||
//disable it after retrieve data
|
||||
pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL;
|
||||
pQueryHandle->checkFiles = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1607,6 +1635,8 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
|
|||
}
|
||||
|
||||
if (exists) {
|
||||
elapsedTime = taosGetTimestampUs() - stime;
|
||||
pQueryHandle->cost.checkForNextTime += elapsedTime;
|
||||
return exists;
|
||||
}
|
||||
|
||||
|
@ -1617,6 +1647,9 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
|
|||
// TODO: opt by consider the scan order
|
||||
bool ret = doHasDataInBuffer(pQueryHandle);
|
||||
terrno = TSDB_CODE_SUCCESS;
|
||||
|
||||
elapsedTime = taosGetTimestampUs() - stime;
|
||||
pQueryHandle->cost.checkForNextTime += elapsedTime;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1794,41 +1827,44 @@ void tsdbRetrieveDataBlockInfo(TsdbQueryHandleT* pQueryHandle, SDataBlockInfo* p
|
|||
int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataStatis** pBlockStatis) {
|
||||
STsdbQueryHandle* pHandle = (STsdbQueryHandle*) pQueryHandle;
|
||||
|
||||
SQueryFilePos* cur = &pHandle->cur;
|
||||
if (cur->mixBlock) {
|
||||
SQueryFilePos* c = &pHandle->cur;
|
||||
if (c->mixBlock) {
|
||||
*pBlockStatis = NULL;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
assert((cur->slot >= 0 && cur->slot < pHandle->numOfBlocks) ||
|
||||
((cur->slot == pHandle->numOfBlocks) && (cur->slot == 0)));
|
||||
|
||||
STableBlockInfo* pBlockInfo = &pHandle->pDataBlockInfo[cur->slot];
|
||||
|
||||
// file block with subblocks has no statistics data
|
||||
STableBlockInfo* pBlockInfo = &pHandle->pDataBlockInfo[c->slot];
|
||||
assert((c->slot >= 0 && c->slot < pHandle->numOfBlocks) || ((c->slot == pHandle->numOfBlocks) && (c->slot == 0)));
|
||||
|
||||
// file block with sub-blocks has no statistics data
|
||||
if (pBlockInfo->compBlock->numOfSubBlocks > 1) {
|
||||
*pBlockStatis = NULL;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
int64_t stime = taosGetTimestampUs();
|
||||
tsdbLoadCompData(&pHandle->rhelper, pBlockInfo->compBlock, NULL);
|
||||
|
||||
// todo opt perf
|
||||
int16_t* colIds = pHandle->defaultLoadColumn->pData;
|
||||
|
||||
size_t numOfCols = QH_GET_NUM_OF_COLS(pHandle);
|
||||
memset(pHandle->statis, 0, numOfCols * sizeof(SDataStatis));
|
||||
for(int32_t i = 0; i < numOfCols; ++i) {
|
||||
SDataStatis* st = &pHandle->statis[i];
|
||||
int32_t colId = st->colId;
|
||||
|
||||
memset(st, 0, sizeof(SDataStatis));
|
||||
st->colId = colId;
|
||||
pHandle->statis[i].colId = colIds[i];
|
||||
}
|
||||
|
||||
tsdbGetDataStatis(&pHandle->rhelper, pHandle->statis, numOfCols);
|
||||
|
||||
*pBlockStatis = pHandle->statis;
|
||||
|
||||
|
||||
// always load the first primary timestamp column data
|
||||
SDataStatis* pPrimaryColStatis = &pHandle->statis[0];
|
||||
assert(pPrimaryColStatis->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX);
|
||||
|
||||
pPrimaryColStatis->numOfNull = 0;
|
||||
pPrimaryColStatis->min = pBlockInfo->compBlock->keyFirst;
|
||||
pPrimaryColStatis->max = pBlockInfo->compBlock->keyLast;
|
||||
|
||||
//update the number of NULL data rows
|
||||
for(int32_t i = 0; i < numOfCols; ++i) {
|
||||
for(int32_t i = 1; i < numOfCols; ++i) {
|
||||
if (pHandle->statis[i].numOfNull == -1) { // set the column data are all NULL
|
||||
pHandle->statis[i].numOfNull = pBlockInfo->compBlock->numOfRows;
|
||||
}
|
||||
|
@ -1840,7 +1876,11 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta
|
|||
pHandle->statis[i].max = pBlockInfo->compBlock->keyLast;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int64_t elapsed = taosGetTimestampUs() - stime;
|
||||
pHandle->cost.statisInfoLoadTime += elapsed;
|
||||
|
||||
*pBlockStatis = pHandle->statis;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1893,8 +1933,6 @@ SArray* tsdbRetrieveDataBlock(TsdbQueryHandleT* pQueryHandle, SArray* pIdList) {
|
|||
}
|
||||
}
|
||||
|
||||
SArray* tsdbRetrieveDataRow(TsdbQueryHandleT* pQueryHandle, SArray* pIdList, SQueryRowCond* pCond) { return NULL; }
|
||||
|
||||
static int32_t getAllTableList(STable* pSuperTable, SArray* list) {
|
||||
SSkipListIterator* iter = tSkipListCreateIter(pSuperTable->pIndex);
|
||||
while (tSkipListIterNext(iter)) {
|
||||
|
@ -1923,6 +1961,7 @@ static void destroyHelper(void* param) {
|
|||
free(param);
|
||||
}
|
||||
|
||||
#define TAG_INVALID_COLUMN_INDEX -2
|
||||
static int32_t getTagColumnIndex(STSchema* pTSchema, SSchema* pSchema) {
|
||||
// filter on table name(TBNAME)
|
||||
if (strcasecmp(pSchema->name, TSQL_TBNAME_L) == 0) {
|
||||
|
@ -1954,9 +1993,8 @@ void filterPrepare(void* expr, void* param) {
|
|||
tVariant* pCond = pExpr->_node.pRight->pVal;
|
||||
SSchema* pSchema = pExpr->_node.pLeft->pSchema;
|
||||
|
||||
// todo : if current super table does not change schema yet, this function may fail to get correct schema, test case
|
||||
int32_t index = getTagColumnIndex(pTSSchema, pSchema);
|
||||
assert((index >= 0 && i < TSDB_MAX_TAGS) || (index == TSDB_TBNAME_COLUMN_INDEX));
|
||||
assert((index >= 0 && i < TSDB_MAX_TAGS) || (index == TSDB_TBNAME_COLUMN_INDEX) || index == TAG_INVALID_COLUMN_INDEX);
|
||||
|
||||
pInfo->sch = *pSchema;
|
||||
pInfo->colIndex = index;
|
||||
|
@ -2334,7 +2372,7 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
|
|||
size_t size = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
|
||||
for (int32_t i = 0; i < size; ++i) {
|
||||
STableCheckInfo* pTableCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
|
||||
tSkipListDestroyIter(pTableCheckInfo->iter);
|
||||
destroyTableMemIterator(pTableCheckInfo);
|
||||
|
||||
if (pTableCheckInfo->pDataCols != NULL) {
|
||||
tfree(pTableCheckInfo->pDataCols->buf);
|
||||
|
@ -2362,10 +2400,15 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
|
|||
tsdbUnRefMemTable(pQueryHandle->pTsdb, pQueryHandle->imem);
|
||||
|
||||
tsdbDestroyHelper(&pQueryHandle->rhelper);
|
||||
|
||||
SIOCostSummary* pCost = &pQueryHandle->cost;
|
||||
tsdbDebug(":io-cost summary: statis-info:%"PRId64"us, datablock:%" PRId64"us, check data:%"PRId64"us, %p",
|
||||
pCost->statisInfoLoadTime, pCost->blockLoadTime, pCost->checkForNextTime, pQueryHandle->qinfo);
|
||||
|
||||
tfree(pQueryHandle);
|
||||
}
|
||||
|
||||
void tsdbDestoryTableGroup(STableGroupInfo *pGroupList) {
|
||||
void tsdbDestroyTableGroup(STableGroupInfo *pGroupList) {
|
||||
assert(pGroupList != NULL);
|
||||
|
||||
size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList);
|
||||
|
|
|
@ -39,7 +39,9 @@ static int insertData(SInsertInfo *pInfo) {
|
|||
pBlock->uid = pInfo->uid;
|
||||
pBlock->tid = pInfo->tid;
|
||||
pBlock->sversion = pInfo->sversion;
|
||||
pBlock->len = 0;
|
||||
pBlock->dataLen = 0;
|
||||
pBlock->schemaLen = 0;
|
||||
pBlock->numOfRows = 0;
|
||||
for (int i = 0; i < pInfo->rowsPerSubmit; i++) {
|
||||
// start_time += 1000;
|
||||
if (pInfo->isAscend) {
|
||||
|
@ -47,7 +49,7 @@ static int insertData(SInsertInfo *pInfo) {
|
|||
} else {
|
||||
start_time -= pInfo->interval;
|
||||
}
|
||||
SDataRow row = (SDataRow)(pBlock->data + pBlock->len);
|
||||
SDataRow row = (SDataRow)(pBlock->data + pBlock->dataLen);
|
||||
tdInitDataRow(row, pInfo->pSchema);
|
||||
|
||||
for (int j = 0; j < schemaNCols(pInfo->pSchema); j++) {
|
||||
|
@ -59,13 +61,15 @@ static int insertData(SInsertInfo *pInfo) {
|
|||
tdAppendColVal(row, (void *)(&val), pTCol->type, pTCol->bytes, pTCol->offset);
|
||||
}
|
||||
}
|
||||
pBlock->len += dataRowLen(row);
|
||||
pBlock->dataLen += dataRowLen(row);
|
||||
pBlock->numOfRows++;
|
||||
}
|
||||
pMsg->length = pMsg->length + sizeof(SSubmitBlk) + pBlock->len;
|
||||
pMsg->length = sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + pBlock->dataLen;
|
||||
pMsg->numOfBlocks = 1;
|
||||
|
||||
pBlock->len = htonl(pBlock->len);
|
||||
pBlock->dataLen = htonl(pBlock->dataLen);
|
||||
pBlock->numOfRows = htonl(pBlock->numOfRows);
|
||||
pBlock->schemaLen = htonl(pBlock->schemaLen);
|
||||
pBlock->uid = htobe64(pBlock->uid);
|
||||
pBlock->tid = htonl(pBlock->tid);
|
||||
|
||||
|
@ -74,7 +78,6 @@ static int insertData(SInsertInfo *pInfo) {
|
|||
|
||||
pMsg->length = htonl(pMsg->length);
|
||||
pMsg->numOfBlocks = htonl(pMsg->numOfBlocks);
|
||||
pMsg->compressed = htonl(pMsg->numOfBlocks);
|
||||
|
||||
if (tsdbInsertData(pInfo->pRepo, pMsg, NULL) < 0) {
|
||||
tfree(pMsg);
|
||||
|
|
|
@ -225,7 +225,7 @@ static void doCleanupDataCache(SCacheObj *pCacheObj);
|
|||
* refresh cache to remove data in both hash list and trash, if any nodes' refcount == 0, every pCacheObj->refreshTime
|
||||
* @param handle Cache object handle
|
||||
*/
|
||||
static void* taosCacheTimedRefresh(void *pCacheObj);
|
||||
static void* taosCacheTimedRefresh(void *handle);
|
||||
|
||||
SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_free_fn_t fn, const char* cacheName) {
|
||||
if (refreshTimeInSeconds <= 0) {
|
||||
|
@ -455,51 +455,11 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
|
|||
__cache_unlock(pCacheObj);
|
||||
|
||||
} else {
|
||||
uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, T_REF_VAL_GET(pNode) - 1);
|
||||
|
||||
__cache_wr_lock(pCacheObj);
|
||||
|
||||
// NOTE: once refcount is decrease, pNode may be freed by other thread immediately.
|
||||
int32_t ref = T_REF_DEC(pNode);
|
||||
|
||||
if (inTrashCan && (ref == 0)) {
|
||||
// Remove it if the ref count is 0.
|
||||
// The ref count does not need to load and check again after lock acquired, since ref count can not be increased when
|
||||
// the node is in trashcan.
|
||||
assert(pNode->pTNodeHeader->pData == pNode);
|
||||
taosRemoveFromTrashCan(pCacheObj, pNode->pTNodeHeader);
|
||||
}
|
||||
|
||||
__cache_unlock(pCacheObj);
|
||||
uDebug("cache:%s, key:%p, %p is released, refcnt:%d, in trashcan:%d", pCacheObj->name, pNode->key, pNode->data, ref,
|
||||
inTrashCan);
|
||||
}
|
||||
|
||||
// else {
|
||||
// if (_remove) { // not in trash can, but need to remove it
|
||||
// __cache_wr_lock(pCacheObj);
|
||||
//
|
||||
// /*
|
||||
// * If not referenced by other users. Otherwise move this node to trashcan wait for all users
|
||||
// * releasing this resources.
|
||||
// *
|
||||
// * NOTE: previous ref is 0, and current ref is still 0, remove it. If previous is not 0, there is another thread
|
||||
// * that tries to do the same thing.
|
||||
// */
|
||||
// if (ref == 0) {
|
||||
// if (T_REF_VAL_GET(pNode) == 0) {
|
||||
// taosCacheReleaseNode(pCacheObj, pNode);
|
||||
// } else {
|
||||
// taosCacheMoveToTrash(pCacheObj, pNode);
|
||||
// }
|
||||
// } else if (ref > 0) {
|
||||
// if (!pNode->inTrashCan) {
|
||||
// assert(pNode->pTNodeHeader == NULL);
|
||||
// taosCacheMoveToTrash(pCacheObj, pNode);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// __cache_unlock(pCacheObj);
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
||||
void taosCacheEmpty(SCacheObj *pCacheObj) {
|
||||
|
|
|
@ -259,7 +259,6 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
|
|||
appH.cqH = pVnode->cq;
|
||||
appH.cqCreateFunc = cqCreate;
|
||||
appH.cqDropFunc = cqDrop;
|
||||
appH.configFunc = dnodeSendCfgTableToRecv;
|
||||
sprintf(temp, "%s/tsdb", rootDir);
|
||||
pVnode->tsdb = tsdbOpenRepo(temp, &appH);
|
||||
if (pVnode->tsdb == NULL) {
|
||||
|
@ -341,6 +340,13 @@ void vnodeRelease(void *pVnodeRaw) {
|
|||
tsdbCloseRepo(pVnode->tsdb, 1);
|
||||
pVnode->tsdb = NULL;
|
||||
|
||||
// stop continuous query
|
||||
if (pVnode->cq) {
|
||||
void *cq = pVnode->cq;
|
||||
pVnode->cq = NULL;
|
||||
cqClose(cq);
|
||||
}
|
||||
|
||||
if (pVnode->wal)
|
||||
walClose(pVnode->wal);
|
||||
pVnode->wal = NULL;
|
||||
|
@ -512,13 +518,6 @@ static void vnodeCleanUp(SVnodeObj *pVnode) {
|
|||
syncStop(sync);
|
||||
}
|
||||
|
||||
// stop continuous query
|
||||
if (pVnode->cq) {
|
||||
void *cq = pVnode->cq;
|
||||
pVnode->cq = NULL;
|
||||
cqClose(cq);
|
||||
}
|
||||
|
||||
vTrace("vgId:%d, vnode will cleanup, refCount:%d", pVnode->vgId, pVnode->refCount);
|
||||
|
||||
// release local resources only after cutting off outside connections
|
||||
|
@ -588,7 +587,6 @@ static int vnodeResetTsdb(SVnodeObj *pVnode)
|
|||
appH.cqH = pVnode->cq;
|
||||
appH.cqCreateFunc = cqCreate;
|
||||
appH.cqDropFunc = cqDrop;
|
||||
appH.configFunc = dnodeSendCfgTableToRecv;
|
||||
pVnode->tsdb = tsdbOpenRepo(rootDir, &appH);
|
||||
|
||||
pVnode->status = TAOS_VN_STATUS_READY;
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
select * from db.devices;
|
|
@ -13,8 +13,9 @@ typedef struct {
|
|||
char sql[256];
|
||||
char dataDir[256];
|
||||
int filesNum;
|
||||
int writeClients;
|
||||
int clients;
|
||||
int rowsPerRequest;
|
||||
int write;
|
||||
} ProArgs;
|
||||
|
||||
typedef struct {
|
||||
|
@ -41,7 +42,7 @@ int main(int argc, char *argv[]) {
|
|||
statis.totalRows = 0;
|
||||
parseArg(argc, argv);
|
||||
|
||||
if (arguments.writeClients > 0) {
|
||||
if (arguments.write) {
|
||||
writeData();
|
||||
} else {
|
||||
readData();
|
||||
|
@ -52,7 +53,7 @@ void parseArg(int argc, char *argv[]) {
|
|||
strcpy(arguments.sql, "./sqlCmd.txt");
|
||||
strcpy(arguments.dataDir, "./testdata");
|
||||
arguments.filesNum = 2;
|
||||
arguments.writeClients = 0;
|
||||
arguments.clients = 1;
|
||||
arguments.rowsPerRequest = 100;
|
||||
|
||||
for (int i = 1; i < argc; ++i) {
|
||||
|
@ -83,12 +84,12 @@ void parseArg(int argc, char *argv[]) {
|
|||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
else if (strcmp(argv[i], "-writeClients") == 0) {
|
||||
else if (strcmp(argv[i], "-clients") == 0) {
|
||||
if (i < argc - 1) {
|
||||
arguments.writeClients = atoi(argv[++i]);
|
||||
arguments.clients = atoi(argv[++i]);
|
||||
}
|
||||
else {
|
||||
fprintf(stderr, "'-writeClients' requires a parameter, default:%d\n", arguments.writeClients);
|
||||
fprintf(stderr, "'-clients' requires a parameter, default:%d\n", arguments.clients);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
@ -101,6 +102,9 @@ void parseArg(int argc, char *argv[]) {
|
|||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
else if (strcmp(argv[i], "-w") == 0) {
|
||||
arguments.write = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -215,7 +219,7 @@ void writeDataImp(void *param) {
|
|||
|
||||
void writeData() {
|
||||
printf("write data\n");
|
||||
printf("---- writeClients: %d\n", arguments.writeClients);
|
||||
printf("---- clients: %d\n", arguments.clients);
|
||||
printf("---- dataDir: %s\n", arguments.dataDir);
|
||||
printf("---- numOfFiles: %d\n", arguments.filesNum);
|
||||
printf("---- rowsPerRequest: %d\n", arguments.rowsPerRequest);
|
||||
|
@ -243,12 +247,12 @@ void writeData() {
|
|||
|
||||
int64_t st = getTimeStampMs();
|
||||
|
||||
int a = arguments.filesNum / arguments.writeClients;
|
||||
int b = arguments.filesNum % arguments.writeClients;
|
||||
int a = arguments.filesNum / arguments.clients;
|
||||
int b = arguments.filesNum % arguments.clients;
|
||||
int last = 0;
|
||||
|
||||
ThreadObj *threads = calloc((size_t)arguments.writeClients, sizeof(ThreadObj));
|
||||
for (int i = 0; i < arguments.writeClients; ++i) {
|
||||
ThreadObj *threads = calloc((size_t)arguments.clients, sizeof(ThreadObj));
|
||||
for (int i = 0; i < arguments.clients; ++i) {
|
||||
ThreadObj *pthread = threads + i;
|
||||
pthread_attr_t thattr;
|
||||
pthread->threadId = i + 1;
|
||||
|
@ -264,7 +268,7 @@ void writeData() {
|
|||
pthread_create(&pthread->pid, &thattr, (void *(*)(void *))writeDataImp, pthread);
|
||||
}
|
||||
|
||||
for (int i = 0; i < arguments.writeClients; i++) {
|
||||
for (int i = 0; i < arguments.clients; i++) {
|
||||
pthread_join(threads[i].pid, NULL);
|
||||
}
|
||||
|
||||
|
@ -272,17 +276,15 @@ void writeData() {
|
|||
float seconds = (float)elapsed / 1000;
|
||||
float rs = (float)statis.totalRows / seconds;
|
||||
|
||||
free(threads);
|
||||
|
||||
printf("---- Spent %f seconds to insert %ld records, speed: %f Rows/Second\n", seconds, statis.totalRows, rs);
|
||||
}
|
||||
|
||||
void readData() {
|
||||
printf("read data\n");
|
||||
printf("---- sql: %s\n", arguments.sql);
|
||||
|
||||
void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0);
|
||||
if (taos == NULL)
|
||||
taos_error(taos);
|
||||
|
||||
void readDataImp(void *param)
|
||||
{
|
||||
ThreadObj *pThread = (ThreadObj *)param;
|
||||
printf("Thread %d\n", pThread->threadId);
|
||||
FILE *fp = fopen(arguments.sql, "r");
|
||||
if (fp == NULL) {
|
||||
printf("failed to open file %s\n", arguments.sql);
|
||||
|
@ -290,6 +292,10 @@ void readData() {
|
|||
}
|
||||
printf("open file %s success\n", arguments.sql);
|
||||
|
||||
void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0);
|
||||
if (taos == NULL)
|
||||
taos_error(taos);
|
||||
|
||||
char *line = NULL;
|
||||
size_t len = 0;
|
||||
while (!feof(fp)) {
|
||||
|
@ -325,9 +331,36 @@ void readData() {
|
|||
|
||||
int64_t elapsed = getTimeStampMs() - st;
|
||||
float seconds = (float)elapsed / 1000;
|
||||
printf("---- Spent %f seconds to query: %s", seconds, line);
|
||||
printf("---- Spent %f seconds to retrieve %d records, Thread:%d query: %s\n", seconds, rows, pThread->threadId, line);
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
void readData() {
|
||||
printf("read data\n");
|
||||
printf("---- sql: %s\n", arguments.sql);
|
||||
printf("---- clients: %d\n", arguments.clients);
|
||||
|
||||
void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0);
|
||||
if (taos == NULL)
|
||||
taos_error(taos);
|
||||
|
||||
ThreadObj *threads = calloc((size_t)arguments.clients, sizeof(ThreadObj));
|
||||
|
||||
for (int i = 0; i < arguments.clients; ++i) {
|
||||
ThreadObj *pthread = threads + i;
|
||||
pthread_attr_t thattr;
|
||||
pthread->threadId = i + 1;
|
||||
pthread_attr_init(&thattr);
|
||||
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
|
||||
pthread_create(&pthread->pid, &thattr, (void *(*)(void *))readDataImp, pthread);
|
||||
}
|
||||
|
||||
for (int i = 0; i < arguments.clients; i++) {
|
||||
pthread_join(threads[i].pid, NULL);
|
||||
}
|
||||
|
||||
free(threads);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,127 @@
|
|||
#!/bin/bash
|
||||
|
||||
DATA_DIR=/mnt/root/testdata
|
||||
NUM_LOOP=5
|
||||
|
||||
function printTo {
|
||||
if $verbose ; then
|
||||
echo $1
|
||||
fi
|
||||
}
|
||||
|
||||
TDTESTQ1OUT=tdengineTestQ1.out
|
||||
|
||||
function runTest {
|
||||
totalG0=0
|
||||
totalG10=0
|
||||
totalG20=0
|
||||
totalG30=0
|
||||
totalG40=0
|
||||
totalG50=0
|
||||
totalG60=0
|
||||
totalG70=0
|
||||
totalG80=0
|
||||
totalG90=0
|
||||
for i in `seq 1 $NUM_LOOP`; do
|
||||
printTo "loop i:$i, $TDTEST_DIR/tdengineTest \
|
||||
-sql q1.txt"
|
||||
restartTaosd
|
||||
$TDTEST_DIR/tdengineTest \
|
||||
-sql $TDTEST_DIR/q1.txt > $TDTESTQ1OUT
|
||||
G0=`grep "devgroup=0" $TDTESTQ1OUT| awk '{print $3}'`
|
||||
totalG0=`echo "scale=4; $totalG0 + $G0" | bc`
|
||||
printTo "i: $i, G0: $G0, totalG0:$totalG0"
|
||||
G10=`grep "devgroup=10" $TDTESTQ1OUT| awk '{print $3}'`
|
||||
totalG10=`echo "scale=4; $totalG10 + $G10" | bc`
|
||||
printTo "i: $i, G10: $G10, totalG10:$totalG10"
|
||||
G20=`grep "devgroup=20" $TDTESTQ1OUT| awk '{print $3}'`
|
||||
totalG20=`echo "scale=4; $totalG20 + $G20" | bc`
|
||||
G30=`grep "devgroup=30" $TDTESTQ1OUT| awk '{print $3}'`
|
||||
totalG30=`echo "scale=4; $totalG30 + $G30" | bc`
|
||||
G40=`grep "devgroup=40" $TDTESTQ1OUT| awk '{print $3}'`
|
||||
totalG40=`echo "scale=4; $totalG40 + $G40" | bc`
|
||||
G50=`grep "devgroup=50" $TDTESTQ1OUT| awk '{print $3}'`
|
||||
totalG50=`echo "scale=4; $totalG50 + $G50" | bc`
|
||||
G60=`grep "devgroup=60" $TDTESTQ1OUT| awk '{print $3}'`
|
||||
totalG60=`echo "scale=4; $totalG60 + $G60" | bc`
|
||||
G70=`grep "devgroup=70" $TDTESTQ1OUT| awk '{print $3}'`
|
||||
totalG70=`echo "scale=4; $totalG70 + $G70" | bc`
|
||||
G80=`grep "devgroup=80" $TDTESTQ1OUT| awk '{print $3}'`
|
||||
totalG80=`echo "scale=4; $totalG80 + $G80" | bc`
|
||||
G90=`grep "devgroup=90" $TDTESTQ1OUT| awk '{print $3}'`
|
||||
totalG90=`echo "scale=4; $totalG90 + $G90" | bc`
|
||||
printTo "i: $i, G90: $G90, totalG90:$totalG90"
|
||||
done
|
||||
avgG0=`echo "scale=4; x = $totalG0 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG10=`echo "scale=4; x = $totalG10 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG20=`echo "scale=4; x = $totalG20 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG30=`echo "scale=4; x = $totalG30 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG40=`echo "scale=4; x = $totalG40 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG50=`echo "scale=4; x = $totalG50 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG60=`echo "scale=4; x = $totalG60 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG70=`echo "scale=4; x = $totalG70 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG80=`echo "scale=4; x = $totalG80 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG90=`echo "scale=4; x = $totalG90 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
echo "Latency, G-0, G-10, G-20, G-30, G-40, G-50, G-60, G-70, G-80, G-90"
|
||||
echo "TDengine, $avgG0, $avgG10, $avgG20, $avgG30, $avgG40, $avgG50, $avgG60, $avgG70, $avgG80, $avgG90"
|
||||
}
|
||||
|
||||
function restartTaosd {
|
||||
printTo "Stop taosd"
|
||||
systemctl stop taosd
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
pkill -TERM -x taosd
|
||||
sleep 1
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
|
||||
printTo "Start taosd"
|
||||
$TAOSD_DIR/taosd > /dev/null 2>&1 &
|
||||
sleep 10
|
||||
}
|
||||
|
||||
################ Main ################
|
||||
|
||||
master=false
|
||||
develop=true
|
||||
verbose=false
|
||||
|
||||
for arg in "$@"
|
||||
do
|
||||
case $arg in
|
||||
-v)
|
||||
verbose=true
|
||||
;;
|
||||
|
||||
master)
|
||||
master=true
|
||||
develop=false
|
||||
;;
|
||||
|
||||
develop)
|
||||
master=false
|
||||
develop=true
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if $master ; then
|
||||
echo "Test master branch.."
|
||||
cp /mnt/root/cfg/master/taos.cfg /etc/taos/taos.cfg
|
||||
WORK_DIR=/mnt/root/TDengine.master
|
||||
else
|
||||
echo "Test develop branch.."
|
||||
cp /mnt/root/cfg/10billion/taos.cfg /etc/taos/taos.cfg
|
||||
WORK_DIR=/mnt/root/TDengine
|
||||
fi
|
||||
|
||||
TAOSD_DIR=$WORK_DIR/debug/build/bin
|
||||
TDTEST_DIR=$WORK_DIR/tests/comparisonTest/tdengine
|
||||
|
||||
runTest
|
||||
|
||||
echo "Test done!"
|
|
@ -0,0 +1,348 @@
|
|||
#!/bin/bash
|
||||
|
||||
DATA_DIR=/mnt/root/testdata
|
||||
NUM_LOOP=5
|
||||
|
||||
function printTo {
|
||||
if $verbose ; then
|
||||
echo $1
|
||||
fi
|
||||
}
|
||||
|
||||
TDTESTQ2OUT=tdengineTestQ2.out
|
||||
|
||||
function runTest {
|
||||
totalCount10=0
|
||||
totalCount20=0
|
||||
totalCount30=0
|
||||
totalCount40=0
|
||||
totalCount50=0
|
||||
totalCount60=0
|
||||
totalCount70=0
|
||||
totalCount80=0
|
||||
totalCount90=0
|
||||
totalCount100=0
|
||||
|
||||
totalAvg10=0
|
||||
totalAvg20=0
|
||||
totalAvg30=0
|
||||
totalAvg40=0
|
||||
totalAvg50=0
|
||||
totalAvg60=0
|
||||
totalAvg70=0
|
||||
totalAvg80=0
|
||||
totalAvg90=0
|
||||
totalAvg100=0
|
||||
|
||||
totalSum10=0
|
||||
totalSum20=0
|
||||
totalSum30=0
|
||||
totalSum40=0
|
||||
totalSum50=0
|
||||
totalSum60=0
|
||||
totalSum70=0
|
||||
totalSum80=0
|
||||
totalSum90=0
|
||||
totalSum100=0
|
||||
|
||||
totalMax10=0
|
||||
totalMax20=0
|
||||
totalMax30=0
|
||||
totalMax40=0
|
||||
totalMax50=0
|
||||
totalMax60=0
|
||||
totalMax70=0
|
||||
totalMax80=0
|
||||
totalMax90=0
|
||||
totalMax100=0
|
||||
|
||||
totalMin10=0
|
||||
totalMin20=0
|
||||
totalMin30=0
|
||||
totalMin40=0
|
||||
totalMin50=0
|
||||
totalMin60=0
|
||||
totalMin70=0
|
||||
totalMin80=0
|
||||
totalMin90=0
|
||||
totalMin100=0
|
||||
|
||||
totalSpread10=0
|
||||
totalSpread20=0
|
||||
totalSpread30=0
|
||||
totalSpread40=0
|
||||
totalSpread50=0
|
||||
totalSpread60=0
|
||||
totalSpread70=0
|
||||
totalSpread80=0
|
||||
totalSpread90=0
|
||||
totalSpread100=0
|
||||
|
||||
for i in `seq 1 $NUM_LOOP`; do
|
||||
printTo "loop i:$i, $TDTEST_DIR/tdengineTest \
|
||||
-sql q2.txt"
|
||||
restartTaosd
|
||||
$TDTEST_DIR/tdengineTest \
|
||||
-sql $TDTEST_DIR/q2.txt > $TDTESTQ2OUT
|
||||
|
||||
Count10=`cat $TDTESTQ2OUT | grep "count" | grep "devgroup<10;" | awk '{print $3}'`
|
||||
totalCount10=`echo "scale=4; $totalCount10 + $Count10" | bc`
|
||||
Count20=`cat $TDTESTQ2OUT | grep "count" | grep "devgroup<20;" | awk '{print $3}'`
|
||||
totalCount20=`echo "scale=4; $totalCount20 + $Count20" | bc`
|
||||
Count30=`cat $TDTESTQ2OUT | grep count | grep "devgroup<30;" | awk '{print $3}'`
|
||||
totalCount30=`echo "scale=4; $totalCount30 + $Count30" | bc`
|
||||
Count40=`cat $TDTESTQ2OUT | grep count | grep "devgroup<40;" | awk '{print $3}'`
|
||||
totalCount40=`echo "scale=4; $totalCount40 + $Count40" | bc`
|
||||
Count50=`cat $TDTESTQ2OUT | grep count | grep "devgroup<50;" | awk '{print $3}'`
|
||||
totalCount50=`echo "scale=4; $totalCount50 + $Count50" | bc`
|
||||
Count60=`cat $TDTESTQ2OUT | grep count | grep "devgroup<60;" | awk '{print $3}'`
|
||||
totalCount60=`echo "scale=4; $totalCount60 + $Count60" | bc`
|
||||
Count70=`cat $TDTESTQ2OUT | grep count | grep "devgroup<70;" | awk '{print $3}'`
|
||||
totalCount70=`echo "scale=4; $totalCount70 + $Count70" | bc`
|
||||
Count80=`cat $TDTESTQ2OUT | grep count | grep "devgroup<80;" | awk '{print $3}'`
|
||||
totalCount80=`echo "scale=4; $totalCount80 + $Count80" | bc`
|
||||
Count90=`cat $TDTESTQ2OUT | grep count | grep "devgroup<90;" | awk '{print $3}'`
|
||||
totalCount90=`echo "scale=4; $totalCount90 + $Count90" | bc`
|
||||
Count100=`cat $TDTESTQ2OUT | grep count | grep "db.devices;" | awk '{print $3}'`
|
||||
totalCount100=`echo "scale=4; $totalCount100 + $Count100" | bc`
|
||||
|
||||
Avg10=`cat $TDTESTQ2OUT | grep "avg" | grep "devgroup<10;" | awk '{print $3}'`
|
||||
totalAvg10=`echo "scale=4; $totalAvg10 + $Avg10" | bc`
|
||||
Avg20=`cat $TDTESTQ2OUT | grep "avg" | grep "devgroup<20;" | awk '{print $3}'`
|
||||
totalAvg20=`echo "scale=4; $totalAvg20 + $Avg20" | bc`
|
||||
Avg30=`cat $TDTESTQ2OUT | grep avg | grep "devgroup<30;" | awk '{print $3}'`
|
||||
totalAvg30=`echo "scale=4; $totalAvg30 + $Avg30" | bc`
|
||||
Avg40=`cat $TDTESTQ2OUT | grep avg | grep "devgroup<40;" | awk '{print $3}'`
|
||||
totalAvg40=`echo "scale=4; $totalAvg40 + $Avg40" | bc`
|
||||
Avg50=`cat $TDTESTQ2OUT | grep avg | grep "devgroup<50;" | awk '{print $3}'`
|
||||
totalAvg50=`echo "scale=4; $totalAvg50 + $Avg50" | bc`
|
||||
Avg60=`cat $TDTESTQ2OUT | grep avg | grep "devgroup<60;" | awk '{print $3}'`
|
||||
totalAvg60=`echo "scale=4; $totalAvg60 + $Avg60" | bc`
|
||||
Avg70=`cat $TDTESTQ2OUT | grep avg | grep "devgroup<70;" | awk '{print $3}'`
|
||||
totalAvg70=`echo "scale=4; $totalAvg70 + $Avg70" | bc`
|
||||
Avg80=`cat $TDTESTQ2OUT | grep avg | grep "devgroup<80;" | awk '{print $3}'`
|
||||
totalAvg80=`echo "scale=4; $totalAvg80 + $Avg80" | bc`
|
||||
Avg90=`cat $TDTESTQ2OUT | grep avg | grep "devgroup<90;" | awk '{print $3}'`
|
||||
totalAvg90=`echo "scale=4; $totalAvg90 + $Avg90" | bc`
|
||||
Avg100=`cat $TDTESTQ2OUT | grep avg | grep "db.devices;" | awk '{print $3}'`
|
||||
totalAvg100=`echo "scale=4; $totalAvg100 + $Avg100" | bc`
|
||||
|
||||
Sum10=`cat $TDTESTQ2OUT | grep "sum" | grep "devgroup<10;" | awk '{print $3}'`
|
||||
totalSum10=`echo "scale=4; $totalSum10 + $Sum10" | bc`
|
||||
Sum20=`cat $TDTESTQ2OUT | grep "sum" | grep "devgroup<20;" | awk '{print $3}'`
|
||||
totalSum20=`echo "scale=4; $totalSum20 + $Sum20" | bc`
|
||||
Sum30=`cat $TDTESTQ2OUT | grep sum | grep "devgroup<30;" | awk '{print $3}'`
|
||||
totalSum30=`echo "scale=4; $totalSum30 + $Sum30" | bc`
|
||||
Sum40=`cat $TDTESTQ2OUT | grep sum | grep "devgroup<40;" | awk '{print $3}'`
|
||||
totalSum40=`echo "scale=4; $totalSum40 + $Sum40" | bc`
|
||||
Sum50=`cat $TDTESTQ2OUT | grep sum | grep "devgroup<50;" | awk '{print $3}'`
|
||||
totalSum50=`echo "scale=4; $totalSum50 + $Sum50" | bc`
|
||||
Sum60=`cat $TDTESTQ2OUT | grep sum | grep "devgroup<60;" | awk '{print $3}'`
|
||||
totalSum60=`echo "scale=4; $totalSum60 + $Sum60" | bc`
|
||||
Sum70=`cat $TDTESTQ2OUT | grep sum | grep "devgroup<70;" | awk '{print $3}'`
|
||||
totalSum70=`echo "scale=4; $totalSum70 + $Sum70" | bc`
|
||||
Sum80=`cat $TDTESTQ2OUT | grep sum | grep "devgroup<80;" | awk '{print $3}'`
|
||||
totalSum80=`echo "scale=4; $totalSum80 + $Sum80" | bc`
|
||||
Sum90=`cat $TDTESTQ2OUT | grep sum | grep "devgroup<90;" | awk '{print $3}'`
|
||||
totalSum90=`echo "scale=4; $totalSum90 + $Sum90" | bc`
|
||||
Sum100=`cat $TDTESTQ2OUT | grep sum | grep "db.devices;" | awk '{print $3}'`
|
||||
totalSum100=`echo "scale=4; $totalSum100 + $Sum100" | bc`
|
||||
|
||||
Max10=`cat $TDTESTQ2OUT | grep "max" | grep "devgroup<10;" | awk '{print $3}'`
|
||||
totalMax10=`echo "scale=4; $totalMax10 + $Max10" | bc`
|
||||
Max20=`cat $TDTESTQ2OUT | grep "max" | grep "devgroup<20;" | awk '{print $3}'`
|
||||
totalMax20=`echo "scale=4; $totalMax20 + $Max20" | bc`
|
||||
Max30=`cat $TDTESTQ2OUT | grep max | grep "devgroup<30;" | awk '{print $3}'`
|
||||
totalMax30=`echo "scale=4; $totalMax30 + $Max30" | bc`
|
||||
Max40=`cat $TDTESTQ2OUT | grep max | grep "devgroup<40;" | awk '{print $3}'`
|
||||
totalMax40=`echo "scale=4; $totalMax40 + $Max40" | bc`
|
||||
Max50=`cat $TDTESTQ2OUT | grep max | grep "devgroup<50;" | awk '{print $3}'`
|
||||
totalMax50=`echo "scale=4; $totalMax50 + $Max50" | bc`
|
||||
Max60=`cat $TDTESTQ2OUT | grep max | grep "devgroup<60;" | awk '{print $3}'`
|
||||
totalMax60=`echo "scale=4; $totalMax60 + $Max60" | bc`
|
||||
Max70=`cat $TDTESTQ2OUT | grep max | grep "devgroup<70;" | awk '{print $3}'`
|
||||
totalMax70=`echo "scale=4; $totalMax70 + $Max70" | bc`
|
||||
Max80=`cat $TDTESTQ2OUT | grep max | grep "devgroup<80;" | awk '{print $3}'`
|
||||
totalMax80=`echo "scale=4; $totalMax80 + $Max80" | bc`
|
||||
Max90=`cat $TDTESTQ2OUT | grep max | grep "devgroup<90;" | awk '{print $3}'`
|
||||
totalMax90=`echo "scale=4; $totalMax90 + $Max90" | bc`
|
||||
Max100=`cat $TDTESTQ2OUT | grep max | grep "db.devices;" | awk '{print $3}'`
|
||||
totalMax100=`echo "scale=4; $totalMax100 + $Max100" | bc`
|
||||
|
||||
Min10=`cat $TDTESTQ2OUT | grep "min" | grep "devgroup<10;" | awk '{print $3}'`
|
||||
totalMin10=`echo "scale=4; $totalMin10 + $Min10" | bc`
|
||||
Min20=`cat $TDTESTQ2OUT | grep "min" | grep "devgroup<20;" | awk '{print $3}'`
|
||||
totalMin20=`echo "scale=4; $totalMin20 + $Min20" | bc`
|
||||
Min30=`cat $TDTESTQ2OUT | grep min | grep "devgroup<30;" | awk '{print $3}'`
|
||||
totalMin30=`echo "scale=4; $totalMin30 + $Min30" | bc`
|
||||
Min40=`cat $TDTESTQ2OUT | grep min | grep "devgroup<40;" | awk '{print $3}'`
|
||||
totalMin40=`echo "scale=4; $totalMin40 + $Min40" | bc`
|
||||
Min50=`cat $TDTESTQ2OUT | grep min | grep "devgroup<50;" | awk '{print $3}'`
|
||||
totalMin50=`echo "scale=4; $totalMin50 + $Min50" | bc`
|
||||
Min60=`cat $TDTESTQ2OUT | grep min | grep "devgroup<60;" | awk '{print $3}'`
|
||||
totalMin60=`echo "scale=4; $totalMin60 + $Min60" | bc`
|
||||
Min70=`cat $TDTESTQ2OUT | grep min | grep "devgroup<70;" | awk '{print $3}'`
|
||||
totalMin70=`echo "scale=4; $totalMin70 + $Min70" | bc`
|
||||
Min80=`cat $TDTESTQ2OUT | grep min | grep "devgroup<80;" | awk '{print $3}'`
|
||||
totalMin80=`echo "scale=4; $totalMin80 + $Min80" | bc`
|
||||
Min90=`cat $TDTESTQ2OUT | grep min | grep "devgroup<90;" | awk '{print $3}'`
|
||||
totalMin90=`echo "scale=4; $totalMin90 + $Min90" | bc`
|
||||
Min100=`cat $TDTESTQ2OUT | grep min | grep "db.devices;" | awk '{print $3}'`
|
||||
totalMin100=`echo "scale=4; $totalMin100 + $Min100" | bc`
|
||||
|
||||
Spread10=`cat $TDTESTQ2OUT | grep "spread" | grep "devgroup<10;" | awk '{print $3}'`
|
||||
totalSpread10=`echo "scale=4; $totalSpread10 + $Spread10" | bc`
|
||||
Spread20=`cat $TDTESTQ2OUT | grep "spread" | grep "devgroup<20;" | awk '{print $3}'`
|
||||
totalSpread20=`echo "scale=4; $totalSpread20 + $Spread20" | bc`
|
||||
Spread30=`cat $TDTESTQ2OUT | grep spread | grep "devgroup<30;" | awk '{print $3}'`
|
||||
totalSpread30=`echo "scale=4; $totalSpread30 + $Spread30" | bc`
|
||||
Spread40=`cat $TDTESTQ2OUT | grep spread | grep "devgroup<40;" | awk '{print $3}'`
|
||||
totalSpread40=`echo "scale=4; $totalSpread40 + $Spread40" | bc`
|
||||
Spread50=`cat $TDTESTQ2OUT | grep spread | grep "devgroup<50;" | awk '{print $3}'`
|
||||
totalSpread50=`echo "scale=4; $totalSpread50 + $Spread50" | bc`
|
||||
Spread60=`cat $TDTESTQ2OUT | grep spread | grep "devgroup<60;" | awk '{print $3}'`
|
||||
totalSpread60=`echo "scale=4; $totalSpread60 + $Spread60" | bc`
|
||||
Spread70=`cat $TDTESTQ2OUT | grep spread | grep "devgroup<70;" | awk '{print $3}'`
|
||||
totalSpread70=`echo "scale=4; $totalSpread70 + $Spread70" | bc`
|
||||
Spread80=`cat $TDTESTQ2OUT | grep spread | grep "devgroup<80;" | awk '{print $3}'`
|
||||
totalSpread80=`echo "scale=4; $totalSpread80 + $Spread80" | bc`
|
||||
Spread90=`cat $TDTESTQ2OUT | grep spread | grep "devgroup<90;" | awk '{print $3}'`
|
||||
totalSpread90=`echo "scale=4; $totalSpread90 + $Spread90" | bc`
|
||||
Spread100=`cat $TDTESTQ2OUT | grep spread | grep "db.devices;" | awk '{print $3}'`
|
||||
totalSpread100=`echo "scale=4; $totalSpread100 + $Spread100" | bc`
|
||||
|
||||
done
|
||||
avgCount10=`echo "scale=4; x = $totalCount10 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgCount20=`echo "scale=4; x = $totalCount20 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgCount30=`echo "scale=4; x = $totalCount30 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgCount40=`echo "scale=4; x = $totalCount40 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgCount50=`echo "scale=4; x = $totalCount50 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgCount60=`echo "scale=4; x = $totalCount60 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgCount70=`echo "scale=4; x = $totalCount70 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgCount80=`echo "scale=4; x = $totalCount80 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgCount90=`echo "scale=4; x = $totalCount90 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgCount100=`echo "scale=4; x = $totalCount100 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
|
||||
avgAvg10=`echo "scale=4; x = $totalAvg10 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgAvg20=`echo "scale=4; x = $totalAvg20 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgAvg30=`echo "scale=4; x = $totalAvg30 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgAvg40=`echo "scale=4; x = $totalAvg40 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgAvg50=`echo "scale=4; x = $totalAvg50 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgAvg60=`echo "scale=4; x = $totalAvg60 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgAvg70=`echo "scale=4; x = $totalAvg70 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgAvg80=`echo "scale=4; x = $totalAvg80 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgAvg90=`echo "scale=4; x = $totalAvg90 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgAvg100=`echo "scale=4; x = $totalAvg100 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
|
||||
avgSum10=`echo "scale=4; x = $totalSum10 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgSum20=`echo "scale=4; x = $totalSum20 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgSum30=`echo "scale=4; x = $totalSum30 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgSum40=`echo "scale=4; x = $totalSum40 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgSum50=`echo "scale=4; x = $totalSum50 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgSum60=`echo "scale=4; x = $totalSum60 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgSum70=`echo "scale=4; x = $totalSum70 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgSum80=`echo "scale=4; x = $totalSum80 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgSum90=`echo "scale=4; x = $totalSum90 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgSum100=`echo "scale=4; x = $totalSum100 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
|
||||
avgMax10=`echo "scale=4; x = $totalMax10 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgMax20=`echo "scale=4; x = $totalMax20 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgMax30=`echo "scale=4; x = $totalMax30 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgMax40=`echo "scale=4; x = $totalMax40 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgMax50=`echo "scale=4; x = $totalMax50 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgMax60=`echo "scale=4; x = $totalMax60 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgMax70=`echo "scale=4; x = $totalMax70 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgMax80=`echo "scale=4; x = $totalMax80 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgMax90=`echo "scale=4; x = $totalMax90 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgMax100=`echo "scale=4; x = $totalMax100 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
|
||||
avgMin10=`echo "scale=4; x = $totalMin10 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgMin20=`echo "scale=4; x = $totalMin20 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgMin30=`echo "scale=4; x = $totalMin30 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgMin40=`echo "scale=4; x = $totalMin40 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgMin50=`echo "scale=4; x = $totalMin50 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgMin60=`echo "scale=4; x = $totalMin60 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgMin70=`echo "scale=4; x = $totalMin70 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgMin80=`echo "scale=4; x = $totalMin80 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgMin90=`echo "scale=4; x = $totalMin90 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgMin100=`echo "scale=4; x = $totalMin100 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
|
||||
avgSpread10=`echo "scale=4; x = $totalSpread10 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgSpread20=`echo "scale=4; x = $totalSpread20 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgSpread30=`echo "scale=4; x = $totalSpread30 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgSpread40=`echo "scale=4; x = $totalSpread40 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgSpread50=`echo "scale=4; x = $totalSpread50 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgSpread60=`echo "scale=4; x = $totalSpread60 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgSpread70=`echo "scale=4; x = $totalSpread70 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgSpread80=`echo "scale=4; x = $totalSpread80 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgSpread90=`echo "scale=4; x = $totalSpread90 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgSpread100=`echo "scale=4; x = $totalSpread100 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
|
||||
echo "Latency, 10%, 20%, 30%, 40%, 50%, 60%, 70%, 80%, 90%, 100%"
|
||||
echo "Count, $avgCount10, $avgCount20, $avgCount30, $avgCount40, $avgCount50, $avgCount60, $avgCount70, $avgCount80, $avgCount90, $avgCount100"
|
||||
echo "Avg, $avgAvg10, $avgAvg20, $avgAvg30, $avgAvg40, $avgAvg50, $avgAvg60, $avgAvg70, $avgAvg80, $avgAvg90, $avgAvg100"
|
||||
echo "Sum, $avgSum10, $avgSum20, $avgSum30, $avgSum40, $avgSum50, $avgSum60, $avgSum70, $avgSum80, $avgSum90, $avgSum100"
|
||||
echo "Max, $avgMax10, $avgMax20, $avgMax30, $avgMax40, $avgMax50, $avgMax60, $avgMax70, $avgMax80, $avgMax90, $avgMax100"
|
||||
echo "Min, $avgMin10, $avgMin20, $avgMin30, $avgMin40, $avgMin50, $avgMin60, $avgMin70, $avgMin80, $avgMin90, $avgMin100"
|
||||
echo "Spread, $avgSpread10, $avgSpread20, $avgSpread30, $avgSpread40, $avgSpread50, $avgSpread60, $avgSpread70, $avgSpread80, $avgSpread90, $avgSpread100"
|
||||
}
|
||||
|
||||
function restartTaosd {
|
||||
printTo "Stop taosd"
|
||||
systemctl stop taosd
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
pkill -TERM -x taosd
|
||||
sleep 1
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
|
||||
printTo "Start taosd"
|
||||
$TAOSD_DIR/taosd > /dev/null 2>&1 &
|
||||
sleep 10
|
||||
}
|
||||
|
||||
################ Main ################
|
||||
|
||||
master=false
|
||||
develop=true
|
||||
verbose=false
|
||||
|
||||
for arg in "$@"
|
||||
do
|
||||
case $arg in
|
||||
-v)
|
||||
verbose=true
|
||||
;;
|
||||
|
||||
master)
|
||||
master=true
|
||||
develop=false
|
||||
;;
|
||||
|
||||
develop)
|
||||
master=false
|
||||
develop=true
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if $master ; then
|
||||
echo "Test master branch.."
|
||||
cp /mnt/root/cfg/master/taos.cfg /etc/taos/taos.cfg
|
||||
WORK_DIR=/mnt/root/TDengine.master
|
||||
else
|
||||
echo "Test develop branch.."
|
||||
cp /mnt/root/cfg/10billion/taos.cfg /etc/taos/taos.cfg
|
||||
WORK_DIR=/mnt/root/TDengine
|
||||
fi
|
||||
|
||||
TAOSD_DIR=$WORK_DIR/debug/build/bin
|
||||
TDTEST_DIR=$WORK_DIR/tests/comparisonTest/tdengine
|
||||
|
||||
runTest
|
||||
|
||||
echo "Test done!"
|
|
@ -0,0 +1,124 @@
|
|||
#!/bin/bash
|
||||
|
||||
DATA_DIR=/mnt/root/testdata
|
||||
NUM_LOOP=5
|
||||
|
||||
function printTo {
|
||||
if $verbose ; then
|
||||
echo $1
|
||||
fi
|
||||
}
|
||||
|
||||
TDTESTQ3OUT=tdengineTestQ3.out
|
||||
|
||||
function runTest {
|
||||
totalG10=0
|
||||
totalG20=0
|
||||
totalG30=0
|
||||
totalG40=0
|
||||
totalG50=0
|
||||
totalG60=0
|
||||
totalG70=0
|
||||
totalG80=0
|
||||
totalG90=0
|
||||
totalG100=0
|
||||
for i in `seq 1 $NUM_LOOP`; do
|
||||
printTo "loop i:$i, $TDTEST_DIR/tdengineTest \
|
||||
-sql q3.txt"
|
||||
restartTaosd
|
||||
$TDTEST_DIR/tdengineTest \
|
||||
-sql $TDTEST_DIR/q3.txt > $TDTESTQ3OUT
|
||||
G10=`grep "devgroup<10" $TDTESTQ3OUT| awk '{print $3}'`
|
||||
totalG10=`echo "scale=4; $totalG10 + $G10" | bc`
|
||||
G20=`grep "devgroup<20" $TDTESTQ3OUT| awk '{print $3}'`
|
||||
totalG20=`echo "scale=4; $totalG20 + $G20" | bc`
|
||||
G30=`grep "devgroup<30" $TDTESTQ3OUT| awk '{print $3}'`
|
||||
totalG30=`echo "scale=4; $totalG30 + $G30" | bc`
|
||||
G40=`grep "devgroup<40" $TDTESTQ3OUT| awk '{print $3}'`
|
||||
totalG40=`echo "scale=4; $totalG40 + $G40" | bc`
|
||||
G50=`grep "devgroup<50" $TDTESTQ3OUT| awk '{print $3}'`
|
||||
totalG50=`echo "scale=4; $totalG50 + $G50" | bc`
|
||||
G60=`grep "devgroup<60" $TDTESTQ3OUT| awk '{print $3}'`
|
||||
totalG60=`echo "scale=4; $totalG60 + $G60" | bc`
|
||||
G70=`grep "devgroup<70" $TDTESTQ3OUT| awk '{print $3}'`
|
||||
totalG70=`echo "scale=4; $totalG70 + $G70" | bc`
|
||||
G80=`grep "devgroup<80" $TDTESTQ3OUT| awk '{print $3}'`
|
||||
totalG80=`echo "scale=4; $totalG80 + $G80" | bc`
|
||||
G90=`grep "devgroup<90" $TDTESTQ3OUT| awk '{print $3}'`
|
||||
totalG90=`echo "scale=4; $totalG90 + $G90" | bc`
|
||||
G100=`grep "db.devices group by devgroup;" $TDTESTQ3OUT| awk '{print $3}'`
|
||||
totalG100=`echo "scale=4; $totalG100 + $G100" | bc`
|
||||
done
|
||||
avgG10=`echo "scale=4; x = $totalG10 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG20=`echo "scale=4; x = $totalG20 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG30=`echo "scale=4; x = $totalG30 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG40=`echo "scale=4; x = $totalG40 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG50=`echo "scale=4; x = $totalG50 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG60=`echo "scale=4; x = $totalG60 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG70=`echo "scale=4; x = $totalG70 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG80=`echo "scale=4; x = $totalG80 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG90=`echo "scale=4; x = $totalG90 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG100=`echo "scale=4; x = $totalG100 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
echo "Latency, 10%, 20%, 30%, 40%, 50%, 60%, 70%, 80%, 90%, 100%"
|
||||
echo "TDengine, $avgG10, $avgG20, $avgG30, $avgG40, $avgG50, $avgG60, $avgG70, $avgG80, $avgG90, $avgG100"
|
||||
}
|
||||
|
||||
function restartTaosd {
|
||||
printTo "Stop taosd"
|
||||
systemctl stop taosd
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
pkill -TERM -x taosd
|
||||
sleep 1
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
|
||||
printTo "Start taosd"
|
||||
$TAOSD_DIR/taosd > /dev/null 2>&1 &
|
||||
sleep 10
|
||||
}
|
||||
|
||||
################ Main ################
|
||||
|
||||
master=false
|
||||
develop=true
|
||||
verbose=false
|
||||
|
||||
for arg in "$@"
|
||||
do
|
||||
case $arg in
|
||||
-v)
|
||||
verbose=true
|
||||
;;
|
||||
|
||||
master)
|
||||
master=true
|
||||
develop=false
|
||||
;;
|
||||
|
||||
develop)
|
||||
master=false
|
||||
develop=true
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if $master ; then
|
||||
echo "Test master branch.."
|
||||
cp /mnt/root/cfg/master/taos.cfg /etc/taos/taos.cfg
|
||||
WORK_DIR=/mnt/root/TDengine.master
|
||||
else
|
||||
echo "Test develop branch.."
|
||||
cp /mnt/root/cfg/10billion/taos.cfg /etc/taos/taos.cfg
|
||||
WORK_DIR=/mnt/root/TDengine
|
||||
fi
|
||||
|
||||
TAOSD_DIR=$WORK_DIR/debug/build/bin
|
||||
TDTEST_DIR=$WORK_DIR/tests/comparisonTest/tdengine
|
||||
|
||||
runTest
|
||||
|
||||
echo "Test done!"
|
|
@ -0,0 +1,124 @@
|
|||
#!/bin/bash
|
||||
|
||||
DATA_DIR=/mnt/root/testdata
|
||||
NUM_LOOP=5
|
||||
|
||||
function printTo {
|
||||
if $verbose ; then
|
||||
echo $1
|
||||
fi
|
||||
}
|
||||
|
||||
TDTESTQ4OUT=tdengineTestQ4.out
|
||||
|
||||
function runTest {
|
||||
totalG10=0
|
||||
totalG20=0
|
||||
totalG30=0
|
||||
totalG40=0
|
||||
totalG50=0
|
||||
totalG60=0
|
||||
totalG70=0
|
||||
totalG80=0
|
||||
totalG90=0
|
||||
totalG100=0
|
||||
for i in `seq 1 $NUM_LOOP`; do
|
||||
printTo "loop i:$i, $TDTEST_DIR/tdengineTest \
|
||||
-sql q4.txt"
|
||||
restartTaosd
|
||||
$TDTEST_DIR/tdengineTest \
|
||||
-sql $TDTEST_DIR/q4.txt > $TDTESTQ4OUT
|
||||
G10=`grep "devgroup<10" $TDTESTQ4OUT| awk '{print $3}'`
|
||||
totalG10=`echo "scale=4; $totalG10 + $G10" | bc`
|
||||
G20=`grep "devgroup<20" $TDTESTQ4OUT| awk '{print $3}'`
|
||||
totalG20=`echo "scale=4; $totalG20 + $G20" | bc`
|
||||
G30=`grep "devgroup<30" $TDTESTQ4OUT| awk '{print $3}'`
|
||||
totalG30=`echo "scale=4; $totalG30 + $G30" | bc`
|
||||
G40=`grep "devgroup<40" $TDTESTQ4OUT| awk '{print $3}'`
|
||||
totalG40=`echo "scale=4; $totalG40 + $G40" | bc`
|
||||
G50=`grep "devgroup<50" $TDTESTQ4OUT| awk '{print $3}'`
|
||||
totalG50=`echo "scale=4; $totalG50 + $G50" | bc`
|
||||
G60=`grep "devgroup<60" $TDTESTQ4OUT| awk '{print $3}'`
|
||||
totalG60=`echo "scale=4; $totalG60 + $G60" | bc`
|
||||
G70=`grep "devgroup<70" $TDTESTQ4OUT| awk '{print $3}'`
|
||||
totalG70=`echo "scale=4; $totalG70 + $G70" | bc`
|
||||
G80=`grep "devgroup<80" $TDTESTQ4OUT| awk '{print $3}'`
|
||||
totalG80=`echo "scale=4; $totalG80 + $G80" | bc`
|
||||
G90=`grep "devgroup<90" $TDTESTQ4OUT| awk '{print $3}'`
|
||||
totalG90=`echo "scale=4; $totalG90 + $G90" | bc`
|
||||
G100=`grep "db.devices interval" $TDTESTQ4OUT| awk '{print $3}'`
|
||||
totalG100=`echo "scale=4; $totalG100 + $G100" | bc`
|
||||
done
|
||||
avgG10=`echo "scale=4; x = $totalG10 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG20=`echo "scale=4; x = $totalG20 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG30=`echo "scale=4; x = $totalG30 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG40=`echo "scale=4; x = $totalG40 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG50=`echo "scale=4; x = $totalG50 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG60=`echo "scale=4; x = $totalG60 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG70=`echo "scale=4; x = $totalG70 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG80=`echo "scale=4; x = $totalG80 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG90=`echo "scale=4; x = $totalG90 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
avgG100=`echo "scale=4; x = $totalG100 / $NUM_LOOP; if(x<1) print 0; x" | bc`
|
||||
echo "Latency, 10%, 20%, 30%, 40%, 50%, 60%, 70%, 80%, 90%, 100%"
|
||||
echo "TDengine, $avgG10, $avgG20, $avgG30, $avgG40, $avgG50, $avgG60, $avgG70, $avgG80, $avgG90, $avgG100"
|
||||
}
|
||||
|
||||
function restartTaosd {
|
||||
printTo "Stop taosd"
|
||||
systemctl stop taosd
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
pkill -TERM -x taosd
|
||||
sleep 1
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
|
||||
printTo "Start taosd"
|
||||
$TAOSD_DIR/taosd > /dev/null 2>&1 &
|
||||
sleep 10
|
||||
}
|
||||
|
||||
################ Main ################
|
||||
|
||||
master=false
|
||||
develop=true
|
||||
verbose=false
|
||||
|
||||
for arg in "$@"
|
||||
do
|
||||
case $arg in
|
||||
-v)
|
||||
verbose=true
|
||||
;;
|
||||
|
||||
master)
|
||||
master=true
|
||||
develop=false
|
||||
;;
|
||||
|
||||
develop)
|
||||
master=false
|
||||
develop=true
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if $master ; then
|
||||
echo "Test master branch.."
|
||||
cp /mnt/root/cfg/master/taos.cfg /etc/taos/taos.cfg
|
||||
WORK_DIR=/mnt/root/TDengine.master
|
||||
else
|
||||
echo "Test develop branch.."
|
||||
cp /mnt/root/cfg/10billion/taos.cfg /etc/taos/taos.cfg
|
||||
WORK_DIR=/mnt/root/TDengine
|
||||
fi
|
||||
|
||||
TAOSD_DIR=$WORK_DIR/debug/build/bin
|
||||
TDTEST_DIR=$WORK_DIR/tests/comparisonTest/tdengine
|
||||
|
||||
runTest
|
||||
|
||||
echo "Test done!"
|
|
@ -0,0 +1,107 @@
|
|||
#!/bin/bash
|
||||
|
||||
DATA_DIR=/mnt/root/testdata
|
||||
NUM_LOOP=5
|
||||
|
||||
function printTo {
|
||||
if $verbose ; then
|
||||
echo $1
|
||||
fi
|
||||
}
|
||||
|
||||
TDTESTQ5OUT=tdengineTestQ5.out
|
||||
|
||||
function runTest {
|
||||
totalThroughput=0
|
||||
for i in `seq 1 $NUM_LOOP`; do
|
||||
for c in `seq 1 $clients`; do
|
||||
records[$c]=0
|
||||
spentTime[$c]=0
|
||||
throughput[$c]=0
|
||||
done
|
||||
printTo "loop i:$i, $TDTEST_DIR/tdengineTest \
|
||||
-clients $clients -sql q5.txt"
|
||||
restartTaosd
|
||||
beginMS=`date +%s%3N`
|
||||
$TDTEST_DIR/tdengineTest \
|
||||
-clients $clients -sql $TDTEST_DIR/q5.txt > $TDTESTQ5OUT
|
||||
endMS=`date +%s%3N`
|
||||
totalRecords=0
|
||||
for c in `seq 1 $clients`; do
|
||||
records[$c]=`grep Thread:$c $TDTESTQ5OUT | awk '{print $7}'`
|
||||
totalRecords=`echo "$totalRecords + ${records[$c]}"|bc`
|
||||
done
|
||||
spending=`echo "scale=4; x = ($endMS - $beginMS)/1000; if (x<1) print 0; x"|bc`
|
||||
throughput=`echo "scale=4; x= $totalRecords / $spending; if (x<1) print 0; x" | bc`
|
||||
printTo "spending: $spending sec, throughput: $throughput"
|
||||
totalThroughput=`echo "scale=4; x = $totalThroughput + $throughput; if(x<1) print 0; x"|bc`
|
||||
done
|
||||
avgThrougput=`echo "scale=4; x = $totalThroughput / $NUM_LOOP; if (x<1) print 0; x"|bc`
|
||||
echo "avg Throughput: $avgThrougput"
|
||||
}
|
||||
|
||||
function restartTaosd {
|
||||
printTo "Stop taosd"
|
||||
systemctl stop taosd
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
pkill -TERM -x taosd
|
||||
sleep 1
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
|
||||
printTo "Start taosd"
|
||||
$TAOSD_DIR/taosd > /dev/null 2>&1 &
|
||||
sleep 10
|
||||
}
|
||||
|
||||
################ Main ################
|
||||
|
||||
master=false
|
||||
develop=true
|
||||
verbose=false
|
||||
|
||||
clients=1
|
||||
|
||||
while : ; do
|
||||
case $1 in
|
||||
-v)
|
||||
verbose=true
|
||||
shift ;;
|
||||
|
||||
master)
|
||||
master=true
|
||||
develop=false
|
||||
shift ;;
|
||||
|
||||
develop)
|
||||
master=false
|
||||
develop=true
|
||||
shift ;;
|
||||
|
||||
-c)
|
||||
clients=$2
|
||||
shift 2;;
|
||||
|
||||
*)
|
||||
break ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if $master ; then
|
||||
printTo "Test master branch.."
|
||||
cp /mnt/root/cfg/master/taos.cfg /etc/taos/taos.cfg
|
||||
WORK_DIR=/mnt/root/TDengine.master
|
||||
else
|
||||
printTo "Test develop branch.."
|
||||
cp /mnt/root/cfg/10billion/taos.cfg /etc/taos/taos.cfg
|
||||
WORK_DIR=/mnt/root/TDengine
|
||||
fi
|
||||
|
||||
TAOSD_DIR=$WORK_DIR/debug/build/bin
|
||||
TDTEST_DIR=$WORK_DIR/tests/comparisonTest/tdengine
|
||||
|
||||
runTest
|
||||
|
||||
printTo "Test done!"
|
|
@ -0,0 +1,116 @@
|
|||
#!/bin/bash
|
||||
|
||||
DATA_DIR=/mnt/root/testdata
|
||||
NUM_LOOP=5
|
||||
NUM_OF_FILES=100
|
||||
|
||||
rowsPerRequest=(1 100 500 1000 2000)
|
||||
|
||||
function printTo {
|
||||
if $verbose ; then
|
||||
echo $1
|
||||
fi
|
||||
}
|
||||
|
||||
function runTest {
|
||||
printf "R/R, "
|
||||
for c in `seq 1 $clients`; do
|
||||
if [ "$c" == "1" ]; then
|
||||
printf "$c client, "
|
||||
else
|
||||
printf "$c clients, "
|
||||
fi
|
||||
done
|
||||
printf "\n"
|
||||
|
||||
for r in ${rowsPerRequest[@]}; do
|
||||
printf "$r, "
|
||||
for c in `seq 1 $clients`; do
|
||||
totalRPR=0
|
||||
for i in `seq 1 $NUM_LOOP`; do
|
||||
restartTaosd
|
||||
$TAOSD_DIR/taos -s "drop database db" > /dev/null 2>&1
|
||||
printTo "loop i:$i, $TDTEST_DIR/tdengineTest \
|
||||
-dataDir $DATA_DIR \
|
||||
-numOfFiles $NUM_OF_FILES \
|
||||
-w -clients $c \
|
||||
-rowsPerRequest $r"
|
||||
RPR=`$TDTEST_DIR/tdengineTest \
|
||||
-dataDir $DATA_DIR \
|
||||
-numOfFiles 1 \
|
||||
-w -clients $c \
|
||||
-rowsPerRequest $r \
|
||||
| grep speed | awk '{print $(NF-1)}'`
|
||||
totalRPR=`echo "scale=4; $totalRPR + $RPR" | bc`
|
||||
printTo "rows:$r, clients:$c, i:$i RPR:$RPR"
|
||||
done
|
||||
avgRPR=`echo "scale=4; $totalRPR / $NUM_LOOP" | bc`
|
||||
printf "$avgRPR, "
|
||||
done
|
||||
printf "\n"
|
||||
done
|
||||
}
|
||||
|
||||
function restartTaosd {
|
||||
printTo "Stop taosd"
|
||||
systemctl stop taosd
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
pkill -TERM -x taosd
|
||||
sleep 1
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
|
||||
printTo "Start taosd"
|
||||
$TAOSD_DIR/taosd > /dev/null 2>&1 &
|
||||
sleep 10
|
||||
}
|
||||
|
||||
################ Main ################
|
||||
|
||||
master=false
|
||||
develop=true
|
||||
verbose=false
|
||||
clients=1
|
||||
|
||||
while : ; do
|
||||
case $1 in
|
||||
-v)
|
||||
verbose=true
|
||||
shift ;;
|
||||
|
||||
master)
|
||||
master=true
|
||||
develop=false
|
||||
shift ;;
|
||||
|
||||
develop)
|
||||
master=false
|
||||
develop=true
|
||||
shift ;;
|
||||
|
||||
-c)
|
||||
clients=$2
|
||||
shift 2;;
|
||||
*)
|
||||
break ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if $master ; then
|
||||
echo "Test master branch.."
|
||||
cp /mnt/root/cfg/master/taos.cfg /etc/taos/taos.cfg
|
||||
WORK_DIR=/mnt/root/TDengine.master
|
||||
else
|
||||
echo "Test develop branch.."
|
||||
cp /mnt/root/cfg/10billion/taos.cfg /etc/taos/taos.cfg
|
||||
WORK_DIR=/mnt/root/TDengine
|
||||
fi
|
||||
|
||||
TAOSD_DIR=$WORK_DIR/debug/build/bin
|
||||
TDTEST_DIR=$WORK_DIR/tests/comparisonTest/tdengine
|
||||
|
||||
runTest
|
||||
|
||||
echo "Test done!"
|
|
@ -100,6 +100,8 @@ class TDTestCase:
|
|||
# TSIM: sql alter table $mt add tag tgcol4 int
|
||||
tdLog.info('alter table %s add tag tgcol4 int' % (mt))
|
||||
tdSql.execute('alter table %s add tag tgcol4 int' % (mt))
|
||||
tdLog.info('select * from %s where tgcol4=6' % (mt))
|
||||
tdSql.query('select * from %s where tgcol4=6' % (mt))
|
||||
# TSIM: sql reset query cache
|
||||
tdLog.info('reset query cache')
|
||||
tdSql.execute('reset query cache')
|
||||
|
|
|
@ -178,7 +178,6 @@ class TDDnode:
|
|||
self.cfg("walLevel", "2")
|
||||
self.cfg("fsync", "1000")
|
||||
self.cfg("statusInterval", "1")
|
||||
self.cfg("numOfTotalVnodes", "64")
|
||||
self.cfg("numOfMnodes", "3")
|
||||
self.cfg("numOfThreadsPerCore", "2.0")
|
||||
self.cfg("monitor", "0")
|
||||
|
|
|
@ -178,7 +178,6 @@ class TDDnode:
|
|||
self.cfg("walLevel", "2")
|
||||
self.cfg("fsync", "1000")
|
||||
self.cfg("statusInterval", "1")
|
||||
self.cfg("numOfTotalVnodes", "64")
|
||||
self.cfg("numOfMnodes", "3")
|
||||
self.cfg("numOfThreadsPerCore", "2.0")
|
||||
self.cfg("monitor", "0")
|
||||
|
|
|
@ -180,7 +180,6 @@ class TDDnode:
|
|||
self.cfg("walLevel", "2")
|
||||
self.cfg("fsync", "1000")
|
||||
self.cfg("statusInterval", "1")
|
||||
self.cfg("numOfTotalVnodes", "64")
|
||||
self.cfg("numOfMnodes", "3")
|
||||
self.cfg("numOfThreadsPerCore", "2.0")
|
||||
self.cfg("monitor", "0")
|
||||
|
|
|
@ -68,7 +68,7 @@ endi
|
|||
if $data01 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data02 != NULL then
|
||||
if $data02 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -80,7 +80,7 @@ endi
|
|||
if $data01 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data02 != NULL then
|
||||
if $data02 != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 1000
|
||||
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
|
|
@ -99,9 +99,7 @@ $db = $dbPrefix . $i
|
|||
$tb = $tbPrefix . $i
|
||||
sql create database $db
|
||||
sql use $db
|
||||
sql create table $tb (ts timestamp, speed int) -x step6
|
||||
return -1
|
||||
step6:
|
||||
sql create table $tb (ts timestamp, speed int)
|
||||
|
||||
print =============== step7
|
||||
$i = 0
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 2
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 10
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v 10
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 1000
|
||||
|
||||
|
|
|
@ -20,11 +20,6 @@ system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
|
|||
system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
|
||||
system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 4
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
|
||||
|
||||
print ========= start dnodes
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
|
|
|
@ -20,11 +20,6 @@ system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
|
|||
system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
|
||||
system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 4
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
|
||||
|
||||
print ========= start dnodes
|
||||
sleep 2000
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
|
|
@ -2,10 +2,6 @@ system sh/stop_dnodes.sh
|
|||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 10
|
||||
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 10
|
||||
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 10
|
||||
|
||||
print ========= start dnodes
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
|
@ -44,9 +40,6 @@ system sh/stop_dnodes.sh
|
|||
sleep 3000
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 10
|
||||
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 10
|
||||
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 10
|
||||
print ========= start dnodes
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 10
|
||||
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 10
|
||||
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 10
|
||||
|
||||
print ========= start dnodes
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
|
|
@ -20,11 +20,6 @@ system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
|
|||
system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
|
||||
system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 4
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
|
||||
|
||||
print ========= start dnodes
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
|
|
|
@ -2,7 +2,6 @@ system sh/stop_dnodes.sh
|
|||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
|
||||
|
||||
print ========= start dnodes
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
|
|
@ -3,7 +3,6 @@ system sh/stop_dnodes.sh
|
|||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2000
|
||||
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c wallevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
|
||||
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
|
|
@ -8,7 +8,6 @@ system sh/deploy.sh -n dnode1 -i 1
|
|||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v $maxTables
|
||||
system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v $totalVnodes
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v $totalVnodes
|
||||
system sh/cfg.sh -n dnode1 -c maxVnodeConnections -v 100000
|
||||
system sh/cfg.sh -n dnode1 -c maxMeterConnections -v 100000
|
||||
system sh/cfg.sh -n dnode1 -c maxShellConns -v 100000
|
||||
|
|
|
@ -20,7 +20,6 @@ system sh/cfg.sh -n dnode1 -c maxVnodeConnections -v 30000
|
|||
system sh/cfg.sh -n dnode1 -c maxMgmtConnections -v 30000
|
||||
system sh/cfg.sh -n dnode1 -c maxMeterConnections -v 30000
|
||||
system sh/cfg.sh -n dnode1 -c maxShellConns -v 30000
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 60
|
||||
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
|
|
@ -126,3 +126,14 @@ endi
|
|||
if $data01 != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
#add check for out of range first/last query
|
||||
sql select first(ts),last(ts) from first_tb4 where ts>'2018-9-18 1:40:01';
|
||||
if $row != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select first(ts),last(ts) from first_tb4 where ts<'2018-9-17 8:50:0';
|
||||
if $row != 0 then
|
||||
return -1
|
||||
endi
|
|
@ -105,6 +105,21 @@ if $data03 != 1 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
sql select tag1 from st2 limit 20 offset 1
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select tag1 from st2 limit 10 offset 2
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select tag1 from st2 limit 0 offset 0
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql create table st3 using mt2 tags (NULL, 'ABC', 103, 'FALSE')
|
||||
sql select tag1, tag2, tag3, tag5 from st3
|
||||
if $rows != 1 then
|
||||
|
|
|
@ -3,7 +3,6 @@ system sh/stop_dnodes.sh
|
|||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 5
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
|
|
@ -3,7 +3,6 @@ system sh/stop_dnodes.sh
|
|||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
|
|
@ -3,7 +3,6 @@ system sh/stop_dnodes.sh
|
|||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
|
|
@ -3,7 +3,6 @@ system sh/stop_dnodes.sh
|
|||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 10
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
|
|
@ -3,7 +3,6 @@ system sh/stop_dnodes.sh
|
|||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 10
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
|
|
@ -26,7 +26,7 @@ sleep 2000
|
|||
run general/parser/fill.sim
|
||||
sleep 2000
|
||||
run general/parser/fill_stb.sim
|
||||
sleep 2000
|
||||
sleep 2000
|
||||
#run general/parser/fill_us.sim #
|
||||
sleep 2000
|
||||
run general/parser/first_last.sim
|
||||
|
@ -91,13 +91,11 @@ run general/parser/select_with_tags.sim
|
|||
sleep 2000
|
||||
run general/parser/groupby.sim
|
||||
sleep 2000
|
||||
run general/parser/tags_filter.sim
|
||||
sleep 2000
|
||||
run general/parser/union.sim
|
||||
sleep 2000
|
||||
run general/parser/sliding.sim
|
||||
sleep 2000
|
||||
run general/parser/fill_us.sim
|
||||
sleep 2000
|
||||
run general/parser/tags_filter.sim
|
||||
|
||||
#sleep 2000
|
||||
#run general/parser/repeatStream.sim
|
||||
|
|
|
@ -2,7 +2,6 @@ system sh/stop_dnodes.sh
|
|||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 3000
|
||||
sql connect
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
system sh/stop_dnodes.sh
|
||||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c debugFlag -v 135
|
||||
system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
#system sh/stop_dnodes.sh
|
||||
#
|
||||
#system sh/deploy.sh -n dnode1 -i 1
|
||||
#system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
#system sh/cfg.sh -n dnode1 -c debugFlag -v 135
|
||||
#system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135
|
||||
#system sh/exec.sh -n dnode1 -s start
|
||||
sleep 1000
|
||||
sql connect
|
||||
|
||||
|
@ -24,77 +24,77 @@ $mt = $mtPrefix . $i
|
|||
$j = 1
|
||||
|
||||
$mt1 = $mtPrefix . $j
|
||||
|
||||
sql drop database if exits $db -x step1
|
||||
step1:
|
||||
sql create database if not exists $db maxtables 4
|
||||
#
|
||||
#sql drop database if exits $db -x step1
|
||||
#step1:
|
||||
#sql create database if not exists $db maxtables 4
|
||||
sql use $db
|
||||
sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int)
|
||||
|
||||
$i = 0
|
||||
$t = 1578203484000
|
||||
|
||||
while $i < $tbNum
|
||||
$tb = $tbPrefix . $i
|
||||
sql create table $tb using $mt tags( $i )
|
||||
|
||||
$x = 0
|
||||
while $x < $rowNum
|
||||
$ms = $x * 1000
|
||||
$ms = $ms * 60
|
||||
|
||||
$c = $x / 100
|
||||
$c = $c * 100
|
||||
$c = $x - $c
|
||||
$binary = 'binary . $c
|
||||
$binary = $binary . '
|
||||
$nchar = 'nchar . $c
|
||||
$nchar = $nchar . '
|
||||
|
||||
$t1 = $t + $ms
|
||||
sql insert into $tb values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar )
|
||||
$x = $x + 1
|
||||
endw
|
||||
|
||||
$i = $i + 1
|
||||
endw
|
||||
|
||||
sql create table $mt1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int)
|
||||
|
||||
$j = 0
|
||||
$t = 1578203484000
|
||||
$rowNum = 1000
|
||||
$tbNum = 5
|
||||
$i = 0
|
||||
|
||||
while $i < $tbNum
|
||||
$tb1 = $tbPrefix1 . $j
|
||||
sql create table $tb1 using $mt1 tags( $i )
|
||||
|
||||
$x = 0
|
||||
while $x < $rowNum
|
||||
$ms = $x * 1000
|
||||
$ms = $ms * 60
|
||||
|
||||
$c = $x / 100
|
||||
$c = $c * 100
|
||||
$c = $x - $c
|
||||
$binary = 'binary . $c
|
||||
$binary = $binary . '
|
||||
$nchar = 'nchar . $c
|
||||
$nchar = $nchar . '
|
||||
|
||||
$t1 = $t + $ms
|
||||
sql insert into $tb1 values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar )
|
||||
$x = $x + 1
|
||||
endw
|
||||
|
||||
$i = $i + 1
|
||||
$j = $j + 1
|
||||
endw
|
||||
|
||||
print sleep 1sec.
|
||||
sleep 1000
|
||||
#sql create table $mt (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int)
|
||||
#
|
||||
#$i = 0
|
||||
#$t = 1578203484000
|
||||
#
|
||||
#while $i < $tbNum
|
||||
# $tb = $tbPrefix . $i
|
||||
# sql create table $tb using $mt tags( $i )
|
||||
#
|
||||
# $x = 0
|
||||
# while $x < $rowNum
|
||||
# $ms = $x * 1000
|
||||
# $ms = $ms * 60
|
||||
#
|
||||
# $c = $x / 100
|
||||
# $c = $c * 100
|
||||
# $c = $x - $c
|
||||
# $binary = 'binary . $c
|
||||
# $binary = $binary . '
|
||||
# $nchar = 'nchar . $c
|
||||
# $nchar = $nchar . '
|
||||
#
|
||||
# $t1 = $t + $ms
|
||||
# sql insert into $tb values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar )
|
||||
# $x = $x + 1
|
||||
# endw
|
||||
#
|
||||
# $i = $i + 1
|
||||
#endw
|
||||
#
|
||||
#sql create table $mt1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int)
|
||||
#
|
||||
#$j = 0
|
||||
#$t = 1578203484000
|
||||
#$rowNum = 1000
|
||||
#$tbNum = 5
|
||||
#$i = 0
|
||||
#
|
||||
#while $i < $tbNum
|
||||
# $tb1 = $tbPrefix1 . $j
|
||||
# sql create table $tb1 using $mt1 tags( $i )
|
||||
#
|
||||
# $x = 0
|
||||
# while $x < $rowNum
|
||||
# $ms = $x * 1000
|
||||
# $ms = $ms * 60
|
||||
#
|
||||
# $c = $x / 100
|
||||
# $c = $c * 100
|
||||
# $c = $x - $c
|
||||
# $binary = 'binary . $c
|
||||
# $binary = $binary . '
|
||||
# $nchar = 'nchar . $c
|
||||
# $nchar = $nchar . '
|
||||
#
|
||||
# $t1 = $t + $ms
|
||||
# sql insert into $tb1 values ($t1 , $c , $c , $c , $c , $c , $c , $c , $binary , $nchar )
|
||||
# $x = $x + 1
|
||||
# endw
|
||||
#
|
||||
# $i = $i + 1
|
||||
# $j = $j + 1
|
||||
#endw
|
||||
#
|
||||
#print sleep 1sec.
|
||||
#sleep 1000
|
||||
|
||||
$i = 1
|
||||
$tb = $tbPrefix . $i
|
||||
|
@ -222,7 +222,7 @@ endi
|
|||
|
||||
print ===========================================tags union
|
||||
# two super table tag union, limit is not active during retrieve tags query
|
||||
sql select t1 from union_mt0 union all select t1 from union_mt0 limit 1
|
||||
sql select t1 from union_mt0 union all select t1 from union_mt0
|
||||
if $rows != 20 then
|
||||
return -1
|
||||
endi
|
||||
|
@ -235,6 +235,10 @@ if $data90 != 9 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
#sql select t1 from union_mt0 union all select t1 from union_mt0 limit 1
|
||||
#if $row != 11 then
|
||||
# return -1
|
||||
#endi
|
||||
#========================================== two super table join subclause
|
||||
print ================two super table join subclause
|
||||
sql select avg(union_mt0.c1) as c from union_mt0 interval(1h) limit 10 union all select union_mt1.ts, union_mt1.c1/1.0 as c from union_mt0, union_mt1 where union_mt1.ts=union_mt0.ts and union_mt1.t1=union_mt0.t1 limit 5;
|
||||
|
|
|
@ -3,7 +3,6 @@ system sh/stop_dnodes.sh
|
|||
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
||||
|
|
|
@ -8,10 +8,6 @@ system sh/cfg.sh -n dnode1 -c walLevel -v 0
|
|||
system sh/cfg.sh -n dnode2 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode3 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode4 -c walLevel -v 0
|
||||
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
|
||||
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
|
||||
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue