From 708dfee8c7652a41bd94b4bd852ac10984ffd810 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sun, 25 Apr 2021 19:26:23 +0800 Subject: [PATCH 01/14] [TD-3944]: make default offline threshold to 10 days. (#5913) Co-authored-by: Shuduo Sang --- packaging/cfg/taos.cfg | 2 +- src/common/src/tglobal.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index 83b70ed9f8..d3bd7510a3 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -64,7 +64,7 @@ # monitorInterval 30 # number of seconds allowed for a dnode to be offline, for cluster only -# offlineThreshold 8640000 +# offlineThreshold 864000 # RPC re-try timer, millisecond # rpcTimer 300 diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 69b01e6c08..c3c159ee45 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -139,7 +139,7 @@ int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP; int8_t tsEnableBalance = 1; int8_t tsAlternativeRole = 0; int32_t tsBalanceInterval = 300; // seconds -int32_t tsOfflineThreshold = 86400 * 100; // seconds 100 days +int32_t tsOfflineThreshold = 86400 * 10; // seconds of 10 days int32_t tsMnodeEqualVnodeNum = 4; int8_t tsEnableFlowCtrl = 1; int8_t tsEnableSlaveQuery = 1; From cb220d6572b6a0c6af59bf1264d270f62486cec1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E9=87=91=E5=AF=8C?= Date: Sun, 25 Apr 2021 21:59:24 +0800 Subject: [PATCH 02/14] =?UTF-8?q?=E4=BF=AE=E6=94=B9importSampleData=20(#59?= =?UTF-8?q?15)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update main.go 根据goland规范修改代码 * Update import_config.go * 增加go.mod --- importSampleData/README.md | 2 +- importSampleData/app/main.go | 380 +++++++++-------------- importSampleData/data/sensor_info.csv | 200 ++++++------ importSampleData/go.mod | 8 + importSampleData/import/import_config.go | 22 +- 5 files changed, 274 insertions(+), 338 deletions(-) create mode 100644 importSampleData/go.mod diff --git a/importSampleData/README.md b/importSampleData/README.md index ee3a6e073c..56c5be0da4 100644 --- a/importSampleData/README.md +++ b/importSampleData/README.md @@ -97,7 +97,7 @@ go build -o bin/taosimport app/main.go 是否保存统计信息到 tdengine 的 statistic 表中,1 是,0 否, 默认 0。 -* -savetb int +* -savetb string 当 save 为 1 时保存统计信息的表名, 默认 statistic。 diff --git a/importSampleData/app/main.go b/importSampleData/app/main.go index 61de6e740c..5fee49734d 100644 --- a/importSampleData/app/main.go +++ b/importSampleData/app/main.go @@ -7,7 +7,6 @@ import ( "encoding/json" "flag" "fmt" - "hash/crc32" "io" "log" "os" @@ -17,47 +16,55 @@ import ( "sync" "time" - dataimport "github.com/taosdata/TDengine/importSampleData/import" + dataImport "github.com/taosdata/TDengine/importSampleData/import" _ "github.com/taosdata/driver-go/taosSql" ) const ( - TIMESTAMP = "timestamp" - DATETIME = "datetime" - MILLISECOND = "millisecond" - DEFAULT_STARTTIME int64 = -1 - DEFAULT_INTERVAL int64 = 1 * 1000 - DEFAULT_DELAY int64 = -1 - DEFAULT_STATISTIC_TABLE = "statistic" + // 主键类型必须为 timestamp + TIMESTAMP = "timestamp" - JSON_FORMAT = "json" - CSV_FORMAT = "csv" - SUPERTABLE_PREFIX = "s_" - SUBTABLE_PREFIX = "t_" + // 样例数据中主键时间字段是 millisecond 还是 dateTime 格式 + DATETIME = "datetime" + MILLISECOND = "millisecond" - DRIVER_NAME = "taosSql" - STARTTIME_LAYOUT = "2006-01-02 15:04:05.000" - INSERT_PREFIX = "insert into " + DefaultStartTime int64 = -1 + DefaultInterval int64 = 1 * 1000 // 导入的记录时间间隔,该设置只会在指定 auto=1 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000。 + DefaultDelay int64 = -1 // + + // 当 save 为 1 时保存统计信息的表名, 默认 statistic。 + DefaultStatisticTable = "statistic" + + // 样例数据文件格式,可以是 json 或 csv + JsonFormat = "json" + CsvFormat = "csv" + + SuperTablePrefix = "s_" // 超级表前缀 + SubTablePrefix = "t_" // 子表前缀 + + DriverName = "taosSql" + StartTimeLayout = "2006-01-02 15:04:05.000" + InsertPrefix = "insert into " ) var ( - cfg string - cases string - hnum int - vnum int - thread int - batch int - auto int - starttimestr string - interval int64 - host string - port int - user string - password string - dropdb int - db string - dbparam string + cfg string // 导入配置文件路径,包含样例数据文件相关描述及对应 TDengine 配置信息。默认使用 config/cfg.toml + cases string // 需要导入的场景名称,该名称可从 -cfg 指定的配置文件中 [usecase] 查看,可同时导入多个场景,中间使用逗号分隔,如:sensor_info,camera_detection,默认为 sensor_info + hnum int // 需要将样例数据进行横向扩展的倍数,假设原有样例数据包含 1 张子表 t_0 数据,指定 hnum 为 2 时会根据原有表名创建 t、t_1 两张子表。默认为 100。 + vnum int // 需要将样例数据进行纵向扩展的次数,如果设置为 0 代表将历史数据导入至当前时间后持续按照指定间隔导入。默认为 1000,表示将样例数据在时间轴上纵向复制1000 次 + thread int // 执行导入数据的线程数目,默认为 10 + batch int // 执行导入数据时的批量大小,默认为 100。批量是指一次写操作时,包含多少条记录 + auto int // 是否自动生成样例数据中的主键时间戳,1 是,0 否, 默认 0 + startTimeStr string // 导入的记录开始时间,格式为 "yyyy-MM-dd HH:mm:ss.SSS",不设置会使用样例数据中最小时间,设置后会忽略样例数据中的主键时间,会按照指定的 start 进行导入。如果 auto 为 1,则必须设置 start,默认为空 + interval int64 // 导入的记录时间间隔,该设置只会在指定 auto=1 之后生效,否则会根据样例数据自动计算间隔时间。单位为毫秒,默认 1000 + host string // 导入的 TDengine 服务器 IP,默认为 127.0.0.1 + port int // 导入的 TDengine 服务器端口,默认为 6030 + user string // 导入的 TDengine 用户名,默认为 root + password string // 导入的 TDengine 用户密码,默认为 taosdata + dropdb int // 导入数据之前是否删除数据库,1 是,0 否, 默认 0 + db string // 导入的 TDengine 数据库名称,默认为 test_yyyyMMdd + dbparam string // 当指定的数据库不存在时,自动创建数据库时可选项配置参数,如 days 10 cache 16000 ablocks 4,默认为空 dataSourceName string startTime int64 @@ -72,10 +79,10 @@ var ( lastStaticTime time.Time lastTotalRows int64 timeTicker *time.Ticker - delay int64 // default 10 milliseconds - tick int64 - save int - saveTable string + delay int64 // 当 vnum 设置为 0 时持续导入的时间间隔,默认为所有场景中最小记录间隔时间的一半,单位 ms。 + tick int64 // 打印统计信息的时间间隔,默认 2000 ms。 + save int // 是否保存统计信息到 tdengine 的 statistic 表中,1 是,0 否, 默认 0。 + saveTable string // 当 save 为 1 时保存统计信息的表名, 默认 statistic。 ) type superTableConfig struct { @@ -83,7 +90,7 @@ type superTableConfig struct { endTime int64 cycleTime int64 avgInterval int64 - config dataimport.CaseConfig + config dataImport.CaseConfig } type scaleTableInfo struct { @@ -92,14 +99,14 @@ type scaleTableInfo struct { insertRows int64 } -type tableRows struct { - tableName string // tableName - value string // values(...) -} +//type tableRows struct { +// tableName string // tableName +// value string // values(...) +//} type dataRows struct { rows []map[string]interface{} - config dataimport.CaseConfig + config dataImport.CaseConfig } func (rows dataRows) Len() int { @@ -107,9 +114,9 @@ func (rows dataRows) Len() int { } func (rows dataRows) Less(i, j int) bool { - itime := getPrimaryKey(rows.rows[i][rows.config.Timestamp]) - jtime := getPrimaryKey(rows.rows[j][rows.config.Timestamp]) - return itime < jtime + iTime := getPrimaryKey(rows.rows[i][rows.config.Timestamp]) + jTime := getPrimaryKey(rows.rows[j][rows.config.Timestamp]) + return iTime < jTime } func (rows dataRows) Swap(i, j int) { @@ -123,26 +130,26 @@ func getPrimaryKey(value interface{}) int64 { } func init() { - parseArg() //parse argument + parseArg() // parse argument if db == "" { - //db = "go" + // 导入的 TDengine 数据库名称,默认为 test_yyyyMMdd db = fmt.Sprintf("test_%s", time.Now().Format("20060102")) } - if auto == 1 && len(starttimestr) == 0 { + if auto == 1 && len(startTimeStr) == 0 { log.Fatalf("startTime must be set when auto is 1, the format is \"yyyy-MM-dd HH:mm:ss.SSS\" ") } - if len(starttimestr) != 0 { - t, err := time.ParseInLocation(STARTTIME_LAYOUT, strings.TrimSpace(starttimestr), time.Local) + if len(startTimeStr) != 0 { + t, err := time.ParseInLocation(StartTimeLayout, strings.TrimSpace(startTimeStr), time.Local) if err != nil { - log.Fatalf("param startTime %s error, %s\n", starttimestr, err) + log.Fatalf("param startTime %s error, %s\n", startTimeStr, err) } startTime = t.UnixNano() / 1e6 // as millisecond } else { - startTime = DEFAULT_STARTTIME + startTime = DefaultStartTime } dataSourceName = fmt.Sprintf("%s:%s@/tcp(%s:%d)/", user, password, host, port) @@ -154,9 +161,9 @@ func init() { func main() { - importConfig := dataimport.LoadConfig(cfg) + importConfig := dataImport.LoadConfig(cfg) - var caseMinumInterval int64 = -1 + var caseMinInterval int64 = -1 for _, userCase := range strings.Split(cases, ",") { caseConfig, ok := importConfig.UserCases[userCase] @@ -168,7 +175,7 @@ func main() { checkUserCaseConfig(userCase, &caseConfig) - //read file as map array + // read file as map array fileRows := readFile(caseConfig) log.Printf("case [%s] sample data file contains %d rows.\n", userCase, len(fileRows.rows)) @@ -177,31 +184,31 @@ func main() { continue } - _, exists := superTableConfigMap[caseConfig.Stname] + _, exists := superTableConfigMap[caseConfig.StName] if !exists { - superTableConfigMap[caseConfig.Stname] = &superTableConfig{config: caseConfig} + superTableConfigMap[caseConfig.StName] = &superTableConfig{config: caseConfig} } else { - log.Fatalf("the stname of case %s already exist.\n", caseConfig.Stname) + log.Fatalf("the stname of case %s already exist.\n", caseConfig.StName) } var start, cycleTime, avgInterval int64 = getSuperTableTimeConfig(fileRows) // set super table's startTime, cycleTime and avgInterval - superTableConfigMap[caseConfig.Stname].startTime = start - superTableConfigMap[caseConfig.Stname].avgInterval = avgInterval - superTableConfigMap[caseConfig.Stname].cycleTime = cycleTime + superTableConfigMap[caseConfig.StName].startTime = start + superTableConfigMap[caseConfig.StName].cycleTime = cycleTime + superTableConfigMap[caseConfig.StName].avgInterval = avgInterval - if caseMinumInterval == -1 || caseMinumInterval > avgInterval { - caseMinumInterval = avgInterval + if caseMinInterval == -1 || caseMinInterval > avgInterval { + caseMinInterval = avgInterval } - startStr := time.Unix(0, start*int64(time.Millisecond)).Format(STARTTIME_LAYOUT) + startStr := time.Unix(0, start*int64(time.Millisecond)).Format(StartTimeLayout) log.Printf("case [%s] startTime %s(%d), average dataInterval %d ms, cycleTime %d ms.\n", userCase, startStr, start, avgInterval, cycleTime) } - if DEFAULT_DELAY == delay { + if DefaultDelay == delay { // default delay - delay = caseMinumInterval / 2 + delay = caseMinInterval / 2 if delay < 1 { delay = 1 } @@ -218,7 +225,7 @@ func main() { createSuperTable(superTableConfigMap) log.Printf("create %d superTable ,used %d ms.\n", superTableNum, time.Since(start)/1e6) - //create sub table + // create sub table start = time.Now() createSubTable(subTableMap) log.Printf("create %d times of %d subtable ,all %d tables, used %d ms.\n", hnum, len(subTableMap), len(scaleTableMap), time.Since(start)/1e6) @@ -278,7 +285,7 @@ func staticSpeed() { defer connection.Close() if save == 1 { - connection.Exec("use " + db) + _, _ = connection.Exec("use " + db) _, err := connection.Exec("create table if not exists " + saveTable + "(ts timestamp, speed int)") if err != nil { log.Fatalf("create %s Table error: %s\n", saveTable, err) @@ -294,12 +301,12 @@ func staticSpeed() { total := getTotalRows(successRows) currentSuccessRows := total - lastTotalRows - speed := currentSuccessRows * 1e9 / int64(usedTime) + speed := currentSuccessRows * 1e9 / usedTime log.Printf("insert %d rows, used %d ms, speed %d rows/s", currentSuccessRows, usedTime/1e6, speed) if save == 1 { insertSql := fmt.Sprintf("insert into %s values(%d, %d)", saveTable, currentTime.UnixNano()/1e6, speed) - connection.Exec(insertSql) + _, _ = connection.Exec(insertSql) } lastStaticTime = currentTime @@ -327,12 +334,13 @@ func getSuperTableTimeConfig(fileRows dataRows) (start, cycleTime, avgInterval i } else { // use the sample data primary timestamp - sort.Sort(fileRows) // sort the file data by the primarykey + sort.Sort(fileRows) // sort the file data by the primaryKey minTime := getPrimaryKey(fileRows.rows[0][fileRows.config.Timestamp]) maxTime := getPrimaryKey(fileRows.rows[len(fileRows.rows)-1][fileRows.config.Timestamp]) start = minTime // default startTime use the minTime - if DEFAULT_STARTTIME != startTime { + // 设置了start时间的话 按照start来 + if DefaultStartTime != startTime { start = startTime } @@ -350,31 +358,21 @@ func getSuperTableTimeConfig(fileRows dataRows) (start, cycleTime, avgInterval i return } -func createStatisticTable() { - connection := getConnection() - defer connection.Close() - - _, err := connection.Exec("create table if not exist " + db + "." + saveTable + "(ts timestamp, speed int)") - if err != nil { - log.Fatalf("createStatisticTable error: %s\n", err) - } -} - func createSubTable(subTableMaps map[string]*dataRows) { connection := getConnection() defer connection.Close() - connection.Exec("use " + db) + _, _ = connection.Exec("use " + db) createTablePrefix := "create table if not exists " + var buffer bytes.Buffer for subTableName := range subTableMaps { - superTableName := getSuperTableName(subTableMaps[subTableName].config.Stname) - tagValues := subTableMaps[subTableName].rows[0] // the first rows values as tags + superTableName := getSuperTableName(subTableMaps[subTableName].config.StName) + firstRowValues := subTableMaps[subTableName].rows[0] // the first rows values as tags - buffers := bytes.Buffer{} - // create table t using supertTable tags(...); + // create table t using superTable tags(...); for i := 0; i < hnum; i++ { tableName := getScaleSubTableName(subTableName, i) @@ -384,21 +382,21 @@ func createSubTable(subTableMaps map[string]*dataRows) { } scaleTableNames = append(scaleTableNames, tableName) - buffers.WriteString(createTablePrefix) - buffers.WriteString(tableName) - buffers.WriteString(" using ") - buffers.WriteString(superTableName) - buffers.WriteString(" tags(") + buffer.WriteString(createTablePrefix) + buffer.WriteString(tableName) + buffer.WriteString(" using ") + buffer.WriteString(superTableName) + buffer.WriteString(" tags(") for _, tag := range subTableMaps[subTableName].config.Tags { - tagValue := fmt.Sprintf("%v", tagValues[strings.ToLower(tag.Name)]) - buffers.WriteString("'" + tagValue + "'") - buffers.WriteString(",") + tagValue := fmt.Sprintf("%v", firstRowValues[strings.ToLower(tag.Name)]) + buffer.WriteString("'" + tagValue + "'") + buffer.WriteString(",") } - buffers.Truncate(buffers.Len() - 1) - buffers.WriteString(")") + buffer.Truncate(buffer.Len() - 1) + buffer.WriteString(")") - createTableSql := buffers.String() - buffers.Reset() + createTableSql := buffer.String() + buffer.Reset() //log.Printf("create table: %s\n", createTableSql) _, err := connection.Exec(createTableSql) @@ -420,7 +418,7 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) { if err != nil { log.Fatalf("drop database error: %s\n", err) } - log.Printf("dropDb: %s\n", dropDbSql) + log.Printf("dropdb: %s\n", dropDbSql) } createDbSql := "create database if not exists " + db + " " + dbparam @@ -431,7 +429,7 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) { } log.Printf("createDb: %s\n", createDbSql) - connection.Exec("use " + db) + _, _ = connection.Exec("use " + db) prefix := "create table if not exists " var buffer bytes.Buffer @@ -464,7 +462,7 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) { createSql := buffer.String() buffer.Reset() - //log.Printf("supertable: %s\n", createSql) + //log.Printf("superTable: %s\n", createSql) _, err = connection.Exec(createSql) if err != nil { log.Fatalf("create supertable error: %s\n", err) @@ -473,15 +471,15 @@ func createSuperTable(superTableConfigMap map[string]*superTableConfig) { } -func getScaleSubTableName(subTableName string, hnum int) string { - if hnum == 0 { +func getScaleSubTableName(subTableName string, hNum int) string { + if hNum == 0 { return subTableName } - return fmt.Sprintf("%s_%d", subTableName, hnum) + return fmt.Sprintf("%s_%d", subTableName, hNum) } -func getSuperTableName(stname string) string { - return SUPERTABLE_PREFIX + stname +func getSuperTableName(stName string) string { + return SuperTablePrefix + stName } /** @@ -499,7 +497,7 @@ func normalizationData(fileRows dataRows, minTime int64) int64 { row[fileRows.config.Timestamp] = getPrimaryKey(row[fileRows.config.Timestamp]) - minTime - subTableName := getSubTableName(tableValue, fileRows.config.Stname) + subTableName := getSubTableName(tableValue, fileRows.config.StName) value, ok := subTableMap[subTableName] if !ok { @@ -527,7 +525,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int continue } - subTableName := getSubTableName(tableValue, fileRows.config.Stname) + subTableName := getSubTableName(tableValue, fileRows.config.StName) value, ok := currSubTableMap[subTableName] if !ok { @@ -543,7 +541,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int } - var maxRows, tableRows int = 0, 0 + var maxRows, tableRows = 0, 0 for tableName := range currSubTableMap { tableRows = len(currSubTableMap[tableName].rows) subTableMap[tableName] = currSubTableMap[tableName] // add to global subTableMap @@ -556,7 +554,7 @@ func normalizationDataWithSameInterval(fileRows dataRows, avgInterval int64) int } func getSubTableName(subTableValue string, superTableName string) string { - return SUBTABLE_PREFIX + subTableValue + "_" + superTableName + return SubTablePrefix + subTableValue + "_" + superTableName } func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []int64) { @@ -564,25 +562,25 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i defer connection.Close() defer wg.Done() - connection.Exec("use " + db) // use db + _, _ = connection.Exec("use " + db) // use db log.Printf("thread-%d start insert into [%d, %d) subtables.\n", threadIndex, start, end) num := 0 subTables := scaleTableNames[start:end] + var buffer bytes.Buffer for { var currSuccessRows int64 var appendRows int var lastTableName string - buffers := bytes.Buffer{} - buffers.WriteString(INSERT_PREFIX) + buffer.WriteString(InsertPrefix) for _, tableName := range subTables { subTableInfo := subTableMap[scaleTableMap[tableName].subTableName] subTableRows := int64(len(subTableInfo.rows)) - superTableConf := superTableConfigMap[subTableInfo.config.Stname] + superTableConf := superTableConfigMap[subTableInfo.config.StName] tableStartTime := superTableConf.startTime var tableEndTime int64 @@ -605,40 +603,35 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i // append if lastTableName != tableName { - buffers.WriteString(tableName) - buffers.WriteString(" values") + buffer.WriteString(tableName) + buffer.WriteString(" values") } lastTableName = tableName - buffers.WriteString("(") - buffers.WriteString(fmt.Sprintf("%v", currentTime)) - buffers.WriteString(",") + buffer.WriteString("(") + buffer.WriteString(fmt.Sprintf("%v", currentTime)) + buffer.WriteString(",") - // fieldNum := len(subTableInfo.config.Fields) for _, field := range subTableInfo.config.Fields { - buffers.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)])) - buffers.WriteString(",") - // if( i != fieldNum -1){ - - // } + buffer.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)])) + buffer.WriteString(",") } - buffers.Truncate(buffers.Len() - 1) - buffers.WriteString(") ") + buffer.Truncate(buffer.Len() - 1) + buffer.WriteString(") ") appendRows++ insertRows++ if appendRows == batch { - // executebatch - insertSql := buffers.String() - connection.Exec("use " + db) + // executeBatch + insertSql := buffer.String() affectedRows := executeBatchInsert(insertSql, connection) successRows[threadIndex] += affectedRows currSuccessRows += affectedRows - buffers.Reset() - buffers.WriteString(INSERT_PREFIX) + buffer.Reset() + buffer.WriteString(InsertPrefix) lastTableName = "" appendRows = 0 } @@ -654,15 +647,14 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i // left := len(rows) if appendRows > 0 { - // executebatch - insertSql := buffers.String() - connection.Exec("use " + db) + // executeBatch + insertSql := buffer.String() affectedRows := executeBatchInsert(insertSql, connection) successRows[threadIndex] += affectedRows currSuccessRows += affectedRows - buffers.Reset() + buffer.Reset() } // log.Printf("thread-%d finished insert %d rows, used %d ms.", threadIndex, currSuccessRows, time.Since(threadStartTime)/1e6) @@ -688,65 +680,10 @@ func insertData(threadIndex, start, end int, wg *sync.WaitGroup, successRows []i } -func buildSql(rows []tableRows) string { - - var lastTableName string - - buffers := bytes.Buffer{} - - for i, row := range rows { - if i == 0 { - lastTableName = row.tableName - buffers.WriteString(INSERT_PREFIX) - buffers.WriteString(row.tableName) - buffers.WriteString(" values") - buffers.WriteString(row.value) - continue - } - - if lastTableName == row.tableName { - buffers.WriteString(row.value) - } else { - buffers.WriteString(" ") - buffers.WriteString(row.tableName) - buffers.WriteString(" values") - buffers.WriteString(row.value) - lastTableName = row.tableName - } - } - - inserSql := buffers.String() - return inserSql -} - -func buildRow(tableName string, currentTime int64, subTableInfo *dataRows, currentRow map[string]interface{}) tableRows { - - tableRows := tableRows{tableName: tableName} - - buffers := bytes.Buffer{} - - buffers.WriteString("(") - buffers.WriteString(fmt.Sprintf("%v", currentTime)) - buffers.WriteString(",") - - for _, field := range subTableInfo.config.Fields { - buffers.WriteString(getFieldValue(currentRow[strings.ToLower(field.Name)])) - buffers.WriteString(",") - } - - buffers.Truncate(buffers.Len() - 1) - buffers.WriteString(")") - - insertSql := buffers.String() - tableRows.value = insertSql - - return tableRows -} - func executeBatchInsert(insertSql string, connection *sql.DB) int64 { - result, error := connection.Exec(insertSql) - if error != nil { - log.Printf("execute insertSql %s error, %s\n", insertSql, error) + result, err := connection.Exec(insertSql) + if err != nil { + log.Printf("execute insertSql %s error, %s\n", insertSql, err) return 0 } affected, _ := result.RowsAffected() @@ -754,7 +691,6 @@ func executeBatchInsert(insertSql string, connection *sql.DB) int64 { affected = 0 } return affected - // return 0 } func getFieldValue(fieldValue interface{}) string { @@ -762,7 +698,7 @@ func getFieldValue(fieldValue interface{}) string { } func getConnection() *sql.DB { - db, err := sql.Open(DRIVER_NAME, dataSourceName) + db, err := sql.Open(DriverName, dataSourceName) if err != nil { panic(err) } @@ -773,19 +709,11 @@ func getSubTableNameValue(suffix interface{}) string { return fmt.Sprintf("%v", suffix) } -func hash(s string) int { - v := int(crc32.ChecksumIEEE([]byte(s))) - if v < 0 { - return -v - } - return v -} - -func readFile(config dataimport.CaseConfig) dataRows { +func readFile(config dataImport.CaseConfig) dataRows { fileFormat := strings.ToLower(config.Format) - if fileFormat == JSON_FORMAT { + if fileFormat == JsonFormat { return readJSONFile(config) - } else if fileFormat == CSV_FORMAT { + } else if fileFormat == CsvFormat { return readCSVFile(config) } @@ -793,7 +721,7 @@ func readFile(config dataimport.CaseConfig) dataRows { return dataRows{} } -func readCSVFile(config dataimport.CaseConfig) dataRows { +func readCSVFile(config dataImport.CaseConfig) dataRows { var rows dataRows f, err := os.Open(config.FilePath) if err != nil { @@ -813,7 +741,7 @@ func readCSVFile(config dataimport.CaseConfig) dataRows { line := strings.ToLower(string(lineBytes)) titles := strings.Split(line, config.Separator) if len(titles) < 3 { - // need suffix、 primarykey and at least one other field + // need suffix、 primaryKey and at least one other field log.Printf("the first line of file %s should be title row, and at least 3 field.\n", config.FilePath) return rows } @@ -848,7 +776,7 @@ func readCSVFile(config dataimport.CaseConfig) dataRows { } // if the primary key valid - primaryKeyValue := getPrimaryKeyMillisec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, dataMap) + primaryKeyValue := getPrimaryKeyMilliSec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, dataMap) if primaryKeyValue == -1 { log.Printf("the Timestamp[%s] of line %d is not valid, will filtered.\n", config.Timestamp, lineNum) continue @@ -861,7 +789,7 @@ func readCSVFile(config dataimport.CaseConfig) dataRows { return rows } -func readJSONFile(config dataimport.CaseConfig) dataRows { +func readJSONFile(config dataImport.CaseConfig) dataRows { var rows dataRows f, err := os.Open(config.FilePath) @@ -899,7 +827,7 @@ func readJSONFile(config dataimport.CaseConfig) dataRows { continue } - primaryKeyValue := getPrimaryKeyMillisec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, line) + primaryKeyValue := getPrimaryKeyMilliSec(config.Timestamp, config.TimestampType, config.TimestampTypeFormat, line) if primaryKeyValue == -1 { log.Printf("the Timestamp[%s] of line %d is not valid, will filtered.\n", config.Timestamp, lineNum) continue @@ -916,7 +844,7 @@ func readJSONFile(config dataimport.CaseConfig) dataRows { /** * get primary key as millisecond , otherwise return -1 */ -func getPrimaryKeyMillisec(key string, valueType string, valueFormat string, line map[string]interface{}) int64 { +func getPrimaryKeyMilliSec(key string, valueType string, valueFormat string, line map[string]interface{}) int64 { if !existMapKeyAndNotEmpty(key, line) { return -1 } @@ -971,13 +899,13 @@ func existMapKeyAndNotEmpty(key string, maps map[string]interface{}) bool { return true } -func checkUserCaseConfig(caseName string, caseConfig *dataimport.CaseConfig) { +func checkUserCaseConfig(caseName string, caseConfig *dataImport.CaseConfig) { - if len(caseConfig.Stname) == 0 { + if len(caseConfig.StName) == 0 { log.Fatalf("the stname of case %s can't be empty\n", caseName) } - caseConfig.Stname = strings.ToLower(caseConfig.Stname) + caseConfig.StName = strings.ToLower(caseConfig.StName) if len(caseConfig.Tags) == 0 { log.Fatalf("the tags of case %s can't be empty\n", caseName) @@ -1029,24 +957,24 @@ func checkUserCaseConfig(caseName string, caseConfig *dataimport.CaseConfig) { } func parseArg() { - flag.StringVar(&cfg, "cfg", "config/cfg.toml", "configuration file which describes usecase and data format.") - flag.StringVar(&cases, "cases", "sensor_info", "usecase for dataset to be imported. Multiple choices can be separated by comma, for example, -cases sensor_info,camera_detection.") + flag.StringVar(&cfg, "cfg", "config/cfg.toml", "configuration file which describes useCase and data format.") + flag.StringVar(&cases, "cases", "sensor_info", "useCase for dataset to be imported. Multiple choices can be separated by comma, for example, -cases sensor_info,camera_detection.") flag.IntVar(&hnum, "hnum", 100, "magnification factor of the sample tables. For example, if hnum is 100 and in the sample data there are 10 tables, then 10x100=1000 tables will be created in the database.") flag.IntVar(&vnum, "vnum", 1000, "copies of the sample records in each table. If set to 0,this program will never stop simulating and importing data even if the timestamp has passed current time.") - flag.Int64Var(&delay, "delay", DEFAULT_DELAY, "the delay time interval(millisecond) to continue generating data when vnum set 0.") + flag.Int64Var(&delay, "delay", DefaultDelay, "the delay time interval(millisecond) to continue generating data when vnum set 0.") flag.Int64Var(&tick, "tick", 2000, "the tick time interval(millisecond) to print statistic info.") flag.IntVar(&save, "save", 0, "whether to save the statistical info into 'statistic' table. 0 is disabled and 1 is enabled.") - flag.StringVar(&saveTable, "savetb", DEFAULT_STATISTIC_TABLE, "the table to save 'statistic' info when save set 1.") + flag.StringVar(&saveTable, "savetb", DefaultStatisticTable, "the table to save 'statistic' info when save set 1.") flag.IntVar(&thread, "thread", 10, "number of threads to import data.") flag.IntVar(&batch, "batch", 100, "rows of records in one import batch.") - flag.IntVar(&auto, "auto", 0, "whether to use the starttime and interval specified by users when simulating the data. 0 is disabled and 1 is enabled.") - flag.StringVar(&starttimestr, "start", "", "the starting timestamp of simulated data, in the format of yyyy-MM-dd HH:mm:ss.SSS. If not specified, the ealiest timestamp in the sample data will be set as the starttime.") - flag.Int64Var(&interval, "interval", DEFAULT_INTERVAL, "time inteval between two consecutive records, in the unit of millisecond. Only valid when auto is 1.") + flag.IntVar(&auto, "auto", 0, "whether to use the startTime and interval specified by users when simulating the data. 0 is disabled and 1 is enabled.") + flag.StringVar(&startTimeStr, "start", "", "the starting timestamp of simulated data, in the format of yyyy-MM-dd HH:mm:ss.SSS. If not specified, the earliest timestamp in the sample data will be set as the startTime.") + flag.Int64Var(&interval, "interval", DefaultInterval, "time interval between two consecutive records, in the unit of millisecond. Only valid when auto is 1.") flag.StringVar(&host, "host", "127.0.0.1", "tdengine server ip.") flag.IntVar(&port, "port", 6030, "tdengine server port.") flag.StringVar(&user, "user", "root", "user name to login into the database.") flag.StringVar(&password, "password", "taosdata", "the import tdengine user password") - flag.IntVar(&dropdb, "dropdb", 0, "whether to drop the existing datbase. 1 is yes and 0 otherwise.") + flag.IntVar(&dropdb, "dropdb", 0, "whether to drop the existing database. 1 is yes and 0 otherwise.") flag.StringVar(&db, "db", "", "name of the database to store data.") flag.StringVar(&dbparam, "dbparam", "", "database configurations when it is created.") @@ -1066,7 +994,7 @@ func printArg() { fmt.Println("-thread:", thread) fmt.Println("-batch:", batch) fmt.Println("-auto:", auto) - fmt.Println("-start:", starttimestr) + fmt.Println("-start:", startTimeStr) fmt.Println("-interval:", interval) fmt.Println("-host:", host) fmt.Println("-port", port) diff --git a/importSampleData/data/sensor_info.csv b/importSampleData/data/sensor_info.csv index d049c8b004..c5ff898118 100644 --- a/importSampleData/data/sensor_info.csv +++ b/importSampleData/data/sensor_info.csv @@ -899,103 +899,103 @@ devid,location,color,devgroup,ts,temperature,humidity 8, haerbing, yellow, 2, 1575129697000, 31, 16.321497 8, haerbing, yellow, 2, 1575129698000, 25, 15.864515 8, haerbing, yellow, 2, 1575129699000, 25, 16.492443 -9, sijiazhuang, blue, 0, 1575129600000, 23, 16.002889 -9, sijiazhuang, blue, 0, 1575129601000, 26, 17.034610 -9, sijiazhuang, blue, 0, 1575129602000, 29, 12.892319 -9, sijiazhuang, blue, 0, 1575129603000, 34, 15.321807 -9, sijiazhuang, blue, 0, 1575129604000, 29, 12.562642 -9, sijiazhuang, blue, 0, 1575129605000, 32, 17.190246 -9, sijiazhuang, blue, 0, 1575129606000, 19, 15.361774 -9, sijiazhuang, blue, 0, 1575129607000, 26, 15.022364 -9, sijiazhuang, blue, 0, 1575129608000, 31, 14.837084 -9, sijiazhuang, blue, 0, 1575129609000, 25, 11.554289 -9, sijiazhuang, blue, 0, 1575129610000, 21, 15.313973 -9, sijiazhuang, blue, 0, 1575129611000, 27, 18.621783 -9, sijiazhuang, blue, 0, 1575129612000, 31, 18.018101 -9, sijiazhuang, blue, 0, 1575129613000, 23, 14.421450 -9, sijiazhuang, blue, 0, 1575129614000, 28, 10.833142 -9, sijiazhuang, blue, 0, 1575129615000, 33, 18.169837 -9, sijiazhuang, blue, 0, 1575129616000, 21, 18.772730 -9, sijiazhuang, blue, 0, 1575129617000, 24, 18.893146 -9, sijiazhuang, blue, 0, 1575129618000, 24, 10.290187 -9, sijiazhuang, blue, 0, 1575129619000, 23, 17.393345 -9, sijiazhuang, blue, 0, 1575129620000, 30, 12.949215 -9, sijiazhuang, blue, 0, 1575129621000, 19, 19.267621 -9, sijiazhuang, blue, 0, 1575129622000, 33, 14.831735 -9, sijiazhuang, blue, 0, 1575129623000, 21, 14.711125 -9, sijiazhuang, blue, 0, 1575129624000, 16, 17.168485 -9, sijiazhuang, blue, 0, 1575129625000, 17, 16.426433 -9, sijiazhuang, blue, 0, 1575129626000, 19, 13.879050 -9, sijiazhuang, blue, 0, 1575129627000, 21, 18.308168 -9, sijiazhuang, blue, 0, 1575129628000, 17, 10.845681 -9, sijiazhuang, blue, 0, 1575129629000, 20, 10.238272 -9, sijiazhuang, blue, 0, 1575129630000, 19, 19.424976 -9, sijiazhuang, blue, 0, 1575129631000, 31, 13.885909 -9, sijiazhuang, blue, 0, 1575129632000, 15, 19.264740 -9, sijiazhuang, blue, 0, 1575129633000, 30, 12.460645 -9, sijiazhuang, blue, 0, 1575129634000, 27, 17.608036 -9, sijiazhuang, blue, 0, 1575129635000, 25, 13.493812 -9, sijiazhuang, blue, 0, 1575129636000, 19, 10.955939 -9, sijiazhuang, blue, 0, 1575129637000, 24, 11.956587 -9, sijiazhuang, blue, 0, 1575129638000, 15, 19.141381 -9, sijiazhuang, blue, 0, 1575129639000, 24, 14.801530 -9, sijiazhuang, blue, 0, 1575129640000, 17, 14.347318 -9, sijiazhuang, blue, 0, 1575129641000, 29, 14.803237 -9, sijiazhuang, blue, 0, 1575129642000, 28, 10.342297 -9, sijiazhuang, blue, 0, 1575129643000, 29, 19.368282 -9, sijiazhuang, blue, 0, 1575129644000, 31, 17.491654 -9, sijiazhuang, blue, 0, 1575129645000, 18, 13.161736 -9, sijiazhuang, blue, 0, 1575129646000, 17, 16.067354 -9, sijiazhuang, blue, 0, 1575129647000, 18, 13.736465 -9, sijiazhuang, blue, 0, 1575129648000, 23, 19.103276 -9, sijiazhuang, blue, 0, 1575129649000, 29, 16.075892 -9, sijiazhuang, blue, 0, 1575129650000, 21, 10.728566 -9, sijiazhuang, blue, 0, 1575129651000, 15, 18.921849 -9, sijiazhuang, blue, 0, 1575129652000, 24, 16.914709 -9, sijiazhuang, blue, 0, 1575129653000, 19, 13.501651 -9, sijiazhuang, blue, 0, 1575129654000, 19, 13.538347 -9, sijiazhuang, blue, 0, 1575129655000, 16, 13.261095 -9, sijiazhuang, blue, 0, 1575129656000, 32, 16.315746 -9, sijiazhuang, blue, 0, 1575129657000, 27, 16.400939 -9, sijiazhuang, blue, 0, 1575129658000, 24, 13.321819 -9, sijiazhuang, blue, 0, 1575129659000, 27, 19.070181 -9, sijiazhuang, blue, 0, 1575129660000, 27, 13.040922 -9, sijiazhuang, blue, 0, 1575129661000, 32, 10.872530 -9, sijiazhuang, blue, 0, 1575129662000, 28, 16.428657 -9, sijiazhuang, blue, 0, 1575129663000, 32, 13.883854 -9, sijiazhuang, blue, 0, 1575129664000, 33, 14.299554 -9, sijiazhuang, blue, 0, 1575129665000, 30, 16.445130 -9, sijiazhuang, blue, 0, 1575129666000, 15, 18.059404 -9, sijiazhuang, blue, 0, 1575129667000, 21, 12.348847 -9, sijiazhuang, blue, 0, 1575129668000, 32, 13.315378 -9, sijiazhuang, blue, 0, 1575129669000, 17, 15.689507 -9, sijiazhuang, blue, 0, 1575129670000, 22, 15.591808 -9, sijiazhuang, blue, 0, 1575129671000, 27, 16.386065 -9, sijiazhuang, blue, 0, 1575129672000, 25, 10.564803 -9, sijiazhuang, blue, 0, 1575129673000, 20, 12.276544 -9, sijiazhuang, blue, 0, 1575129674000, 26, 15.828786 -9, sijiazhuang, blue, 0, 1575129675000, 18, 12.236420 -9, sijiazhuang, blue, 0, 1575129676000, 15, 19.439522 -9, sijiazhuang, blue, 0, 1575129677000, 19, 19.831531 -9, sijiazhuang, blue, 0, 1575129678000, 22, 17.115744 -9, sijiazhuang, blue, 0, 1575129679000, 29, 19.879456 -9, sijiazhuang, blue, 0, 1575129680000, 34, 10.207136 -9, sijiazhuang, blue, 0, 1575129681000, 16, 17.633523 -9, sijiazhuang, blue, 0, 1575129682000, 15, 14.227873 -9, sijiazhuang, blue, 0, 1575129683000, 34, 12.027768 -9, sijiazhuang, blue, 0, 1575129684000, 22, 11.376610 -9, sijiazhuang, blue, 0, 1575129685000, 21, 11.711299 -9, sijiazhuang, blue, 0, 1575129686000, 33, 14.281126 -9, sijiazhuang, blue, 0, 1575129687000, 31, 10.895302 -9, sijiazhuang, blue, 0, 1575129688000, 31, 13.971350 -9, sijiazhuang, blue, 0, 1575129689000, 15, 15.262790 -9, sijiazhuang, blue, 0, 1575129690000, 23, 12.440568 -9, sijiazhuang, blue, 0, 1575129691000, 32, 19.731267 -9, sijiazhuang, blue, 0, 1575129692000, 22, 10.518092 -9, sijiazhuang, blue, 0, 1575129693000, 34, 17.863021 -9, sijiazhuang, blue, 0, 1575129694000, 28, 11.478909 -9, sijiazhuang, blue, 0, 1575129695000, 16, 15.075524 -9, sijiazhuang, blue, 0, 1575129696000, 16, 10.292127 -9, sijiazhuang, blue, 0, 1575129697000, 22, 13.716012 -9, sijiazhuang, blue, 0, 1575129698000, 32, 10.906551 -9, sijiazhuang, blue, 0, 1575129699000, 19, 18.386868 \ No newline at end of file +9, shijiazhuang, blue, 0, 1575129600000, 23, 16.002889 +9, shijiazhuang, blue, 0, 1575129601000, 26, 17.034610 +9, shijiazhuang, blue, 0, 1575129602000, 29, 12.892319 +9, shijiazhuang, blue, 0, 1575129603000, 34, 15.321807 +9, shijiazhuang, blue, 0, 1575129604000, 29, 12.562642 +9, shijiazhuang, blue, 0, 1575129605000, 32, 17.190246 +9, shijiazhuang, blue, 0, 1575129606000, 19, 15.361774 +9, shijiazhuang, blue, 0, 1575129607000, 26, 15.022364 +9, shijiazhuang, blue, 0, 1575129608000, 31, 14.837084 +9, shijiazhuang, blue, 0, 1575129609000, 25, 11.554289 +9, shijiazhuang, blue, 0, 1575129610000, 21, 15.313973 +9, shijiazhuang, blue, 0, 1575129611000, 27, 18.621783 +9, shijiazhuang, blue, 0, 1575129612000, 31, 18.018101 +9, shijiazhuang, blue, 0, 1575129613000, 23, 14.421450 +9, shijiazhuang, blue, 0, 1575129614000, 28, 10.833142 +9, shijiazhuang, blue, 0, 1575129615000, 33, 18.169837 +9, shijiazhuang, blue, 0, 1575129616000, 21, 18.772730 +9, shijiazhuang, blue, 0, 1575129617000, 24, 18.893146 +9, shijiazhuang, blue, 0, 1575129618000, 24, 10.290187 +9, shijiazhuang, blue, 0, 1575129619000, 23, 17.393345 +9, shijiazhuang, blue, 0, 1575129620000, 30, 12.949215 +9, shijiazhuang, blue, 0, 1575129621000, 19, 19.267621 +9, shijiazhuang, blue, 0, 1575129622000, 33, 14.831735 +9, shijiazhuang, blue, 0, 1575129623000, 21, 14.711125 +9, shijiazhuang, blue, 0, 1575129624000, 16, 17.168485 +9, shijiazhuang, blue, 0, 1575129625000, 17, 16.426433 +9, shijiazhuang, blue, 0, 1575129626000, 19, 13.879050 +9, shijiazhuang, blue, 0, 1575129627000, 21, 18.308168 +9, shijiazhuang, blue, 0, 1575129628000, 17, 10.845681 +9, shijiazhuang, blue, 0, 1575129629000, 20, 10.238272 +9, shijiazhuang, blue, 0, 1575129630000, 19, 19.424976 +9, shijiazhuang, blue, 0, 1575129631000, 31, 13.885909 +9, shijiazhuang, blue, 0, 1575129632000, 15, 19.264740 +9, shijiazhuang, blue, 0, 1575129633000, 30, 12.460645 +9, shijiazhuang, blue, 0, 1575129634000, 27, 17.608036 +9, shijiazhuang, blue, 0, 1575129635000, 25, 13.493812 +9, shijiazhuang, blue, 0, 1575129636000, 19, 10.955939 +9, shijiazhuang, blue, 0, 1575129637000, 24, 11.956587 +9, shijiazhuang, blue, 0, 1575129638000, 15, 19.141381 +9, shijiazhuang, blue, 0, 1575129639000, 24, 14.801530 +9, shijiazhuang, blue, 0, 1575129640000, 17, 14.347318 +9, shijiazhuang, blue, 0, 1575129641000, 29, 14.803237 +9, shijiazhuang, blue, 0, 1575129642000, 28, 10.342297 +9, shijiazhuang, blue, 0, 1575129643000, 29, 19.368282 +9, shijiazhuang, blue, 0, 1575129644000, 31, 17.491654 +9, shijiazhuang, blue, 0, 1575129645000, 18, 13.161736 +9, shijiazhuang, blue, 0, 1575129646000, 17, 16.067354 +9, shijiazhuang, blue, 0, 1575129647000, 18, 13.736465 +9, shijiazhuang, blue, 0, 1575129648000, 23, 19.103276 +9, shijiazhuang, blue, 0, 1575129649000, 29, 16.075892 +9, shijiazhuang, blue, 0, 1575129650000, 21, 10.728566 +9, shijiazhuang, blue, 0, 1575129651000, 15, 18.921849 +9, shijiazhuang, blue, 0, 1575129652000, 24, 16.914709 +9, shijiazhuang, blue, 0, 1575129653000, 19, 13.501651 +9, shijiazhuang, blue, 0, 1575129654000, 19, 13.538347 +9, shijiazhuang, blue, 0, 1575129655000, 16, 13.261095 +9, shijiazhuang, blue, 0, 1575129656000, 32, 16.315746 +9, shijiazhuang, blue, 0, 1575129657000, 27, 16.400939 +9, shijiazhuang, blue, 0, 1575129658000, 24, 13.321819 +9, shijiazhuang, blue, 0, 1575129659000, 27, 19.070181 +9, shijiazhuang, blue, 0, 1575129660000, 27, 13.040922 +9, shijiazhuang, blue, 0, 1575129661000, 32, 10.872530 +9, shijiazhuang, blue, 0, 1575129662000, 28, 16.428657 +9, shijiazhuang, blue, 0, 1575129663000, 32, 13.883854 +9, shijiazhuang, blue, 0, 1575129664000, 33, 14.299554 +9, shijiazhuang, blue, 0, 1575129665000, 30, 16.445130 +9, shijiazhuang, blue, 0, 1575129666000, 15, 18.059404 +9, shijiazhuang, blue, 0, 1575129667000, 21, 12.348847 +9, shijiazhuang, blue, 0, 1575129668000, 32, 13.315378 +9, shijiazhuang, blue, 0, 1575129669000, 17, 15.689507 +9, shijiazhuang, blue, 0, 1575129670000, 22, 15.591808 +9, shijiazhuang, blue, 0, 1575129671000, 27, 16.386065 +9, shijiazhuang, blue, 0, 1575129672000, 25, 10.564803 +9, shijiazhuang, blue, 0, 1575129673000, 20, 12.276544 +9, shijiazhuang, blue, 0, 1575129674000, 26, 15.828786 +9, shijiazhuang, blue, 0, 1575129675000, 18, 12.236420 +9, shijiazhuang, blue, 0, 1575129676000, 15, 19.439522 +9, shijiazhuang, blue, 0, 1575129677000, 19, 19.831531 +9, shijiazhuang, blue, 0, 1575129678000, 22, 17.115744 +9, shijiazhuang, blue, 0, 1575129679000, 29, 19.879456 +9, shijiazhuang, blue, 0, 1575129680000, 34, 10.207136 +9, shijiazhuang, blue, 0, 1575129681000, 16, 17.633523 +9, shijiazhuang, blue, 0, 1575129682000, 15, 14.227873 +9, shijiazhuang, blue, 0, 1575129683000, 34, 12.027768 +9, shijiazhuang, blue, 0, 1575129684000, 22, 11.376610 +9, shijiazhuang, blue, 0, 1575129685000, 21, 11.711299 +9, shijiazhuang, blue, 0, 1575129686000, 33, 14.281126 +9, shijiazhuang, blue, 0, 1575129687000, 31, 10.895302 +9, shijiazhuang, blue, 0, 1575129688000, 31, 13.971350 +9, shijiazhuang, blue, 0, 1575129689000, 15, 15.262790 +9, shijiazhuang, blue, 0, 1575129690000, 23, 12.440568 +9, shijiazhuang, blue, 0, 1575129691000, 32, 19.731267 +9, shijiazhuang, blue, 0, 1575129692000, 22, 10.518092 +9, shijiazhuang, blue, 0, 1575129693000, 34, 17.863021 +9, shijiazhuang, blue, 0, 1575129694000, 28, 11.478909 +9, shijiazhuang, blue, 0, 1575129695000, 16, 15.075524 +9, shijiazhuang, blue, 0, 1575129696000, 16, 10.292127 +9, shijiazhuang, blue, 0, 1575129697000, 22, 13.716012 +9, shijiazhuang, blue, 0, 1575129698000, 32, 10.906551 +9, shijiazhuang, blue, 0, 1575129699000, 19, 18.386868 \ No newline at end of file diff --git a/importSampleData/go.mod b/importSampleData/go.mod new file mode 100644 index 0000000000..fa1d978e59 --- /dev/null +++ b/importSampleData/go.mod @@ -0,0 +1,8 @@ +module github.com/taosdata/TDengine/importSampleData + +go 1.13 + +require ( + github.com/pelletier/go-toml v1.9.0 // indirect + github.com/taosdata/driver-go v0.0.0-20210415143420-d99751356e28 // indirect +) diff --git a/importSampleData/import/import_config.go b/importSampleData/import/import_config.go index e7942cc505..fdaeeab7da 100644 --- a/importSampleData/import/import_config.go +++ b/importSampleData/import/import_config.go @@ -14,23 +14,23 @@ var ( once sync.Once ) -// Config inclue all scene import config +// Config include all scene import config type Config struct { UserCases map[string]CaseConfig } // CaseConfig include the sample data config and tdengine config type CaseConfig struct { - Format string - FilePath string - Separator string - Stname string - SubTableName string - Timestamp string - TimestampType string - TimestampTypeFormat string - Tags []FieldInfo - Fields []FieldInfo + Format string + FilePath string + Separator string + StName string + SubTableName string + Timestamp string + TimestampType string + TimestampTypeFormat string + Tags []FieldInfo + Fields []FieldInfo } // FieldInfo is field or tag info From 2aa0f15ea23058dbaf82eb52abe07ec48136589c Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 26 Apr 2021 13:36:23 +0800 Subject: [PATCH 03/14] Feature/sangshuduo/td 3317 taosdemo interlace (#5922) * [TD-3316] : add testcase for taosdemo limit and offset. check offset 0. * [TD-3316] : add testcase for taosdemo limit and offset. fix sample file import bug. * [TD-3316] : add test case for limit and offset. fix sample data issue. * [TD-3327] : fix taosdemo segfault when import data from sample data file. * [TD-3317] : make taosdemo support interlace mode. json parameter rows_per_tbl support. * [TD-3317] : support interlace mode. refactor * [TD-3317] : support interlace mode. refactor * [TD-3317] : support interlace mode insertion. refactor. * [TD-3317] : support interlace mode insertion. change json file. * [TD-3317] : support interlace mode insertion. fix multithread create table regression. * [TD-3317] : support interlace mode insertion. working but not perfect. * [TD-3317] : support interlace mode insertion. rename lowaTest with taosdemoTestWithJson * [TD-3317] : support interlace mode insertion. perfect * [TD-3317] : support interlace mode insertion. cleanup. * [TD-3317] : support interlace mode insertion. adjust algorithm of loop times. * [TD-3317] : support interlace mode insertion. fix delay time bug. * [TD-3317] : support interlace mode insertion. fix progressive timestamp bug. * [TD-3317] : support interlace mode insertion. add an option for performance print. * [TD-3317] : support interlace mode insertion. change json test case with less table for acceleration. * [TD-3317] : support interlace mode insertion. change progressive mode timestamp step and testcase. * [TD-3197] : fix taosdemo coverity scan issues. * [TD-3197] : fix taosdemo coverity scan issue. fix subscribeTest pids uninitialized. * [TD-3317] : support interlace mode insertion. add time shift for no sleep time. * [TD-3317] : support interlace insert. rework timestamp. * [TD-3317] : support interlace mode insertion. change rows_per_tbl to interlace_rows. * [TD-3317] : taosdemo suppoert interlace mode. remove trailing spaces. * [TD-3317] : taosdemo support interlace insertion. prompt if interlace > num_of_records_per_req * fill insert-into early to buffer. * fix buffer overflow issue. * change rows_per_tbl to interlace_rows to align with taosdemo. * adjust remainder rows logic. * [TD-3317]: taosdemo support interlace mode. fix global and stable interlace rows logic. * [TD-3317]: taosdemo support interlace mode. fix 'interlaceRows' is used uninitialized Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 1744 ++++++++++++++++++----------------- 1 file changed, 885 insertions(+), 859 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 3631fbec1c..68292692fa 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -252,7 +252,7 @@ typedef struct SSuperTable_S { int maxSqlLen; // int insertInterval; // insert interval, will override global insert interval - int64_t insertRows; // 0: no limit + int64_t insertRows; int timeStampStep; char startTimestamp[MAX_TB_NAME_SIZE]; char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json @@ -530,50 +530,50 @@ char *aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)", "max(col0)", "min(col0)", "first(col0)", "last(col0)"}; SArguments g_args = { - NULL, // metaFile - 0, // test_mode - "127.0.0.1", // host - 6030, // port - "root", // user -#ifdef _TD_POWER_ - "powerdb", // password -#else - "taosdata", // password -#endif - "test", // database - 1, // replica - "t", // tb_prefix - NULL, // sqlFile - true, // use_metric - true, // drop_database - true, // insert_only - false, // debug_print - false, // verbose_print - false, // performance statistic print - false, // answer_yes; - "./output.txt", // output_file - 0, // mode : sync or async - { - "INT", // datatype - "INT", // datatype - "INT", // datatype - "INT", // datatype - }, - 16, // len_of_binary - 4, // num_of_CPR - 10, // num_of_connections/thread - 0, // insert_interval - 1, // query_times - 0, // interlace_rows; - 30000, // num_of_RPR - 1024000, // max_sql_len - 10000, // num_of_tables - 10000, // num_of_DPT - 0, // abort - 0, // disorderRatio - 1000, // disorderRange - 1, // method_of_delete - NULL // arg_list + NULL, // metaFile + 0, // test_mode + "127.0.0.1", // host + 6030, // port + "root", // user + #ifdef _TD_POWER_ + "powerdb", // password + #else + "taosdata", // password + #endif + "test", // database + 1, // replica + "t", // tb_prefix + NULL, // sqlFile + true, // use_metric + true, // drop_database + true, // insert_only + false, // debug_print + false, // verbose_print + false, // performance statistic print + false, // answer_yes; + "./output.txt", // output_file + 0, // mode : sync or async + { + "INT", // datatype + "INT", // datatype + "INT", // datatype + "INT", // datatype + }, + 16, // len_of_binary + 4, // num_of_CPR + 10, // num_of_connections/thread + 0, // insert_interval + 1, // query_times + 0, // interlace_rows; + 30000, // num_of_RPR + 1024000, // max_sql_len + 10000, // num_of_tables + 10000, // num_of_DPT + 0, // abort + 0, // disorderRatio + 1000, // disorderRange + 1, // method_of_delete + NULL // arg_list }; @@ -733,7 +733,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->host = argv[++i]; } else if (strcmp(argv[i], "-p") == 0) { if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { + (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-p need a number following!\n"); exit(EXIT_FAILURE); @@ -793,7 +793,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->insert_interval = atoi(argv[++i]); } else if (strcmp(argv[i], "-qt") == 0) { if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { + (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-qt need a number following!\n"); exit(EXIT_FAILURE); @@ -801,7 +801,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->query_times = atoi(argv[++i]); } else if (strcmp(argv[i], "-B") == 0) { if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { + (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-B need a number following!\n"); exit(EXIT_FAILURE); @@ -852,14 +852,14 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { if (strstr(argv[i], ",") == NULL) { // only one col if (strcasecmp(argv[i], "INT") - && strcasecmp(argv[i], "FLOAT") - && strcasecmp(argv[i], "TINYINT") - && strcasecmp(argv[i], "BOOL") - && strcasecmp(argv[i], "SMALLINT") - && strcasecmp(argv[i], "BIGINT") - && strcasecmp(argv[i], "DOUBLE") - && strcasecmp(argv[i], "BINARY") - && strcasecmp(argv[i], "NCHAR")) { + && strcasecmp(argv[i], "FLOAT") + && strcasecmp(argv[i], "TINYINT") + && strcasecmp(argv[i], "BOOL") + && strcasecmp(argv[i], "SMALLINT") + && strcasecmp(argv[i], "BIGINT") + && strcasecmp(argv[i], "DOUBLE") + && strcasecmp(argv[i], "BINARY") + && strcasecmp(argv[i], "NCHAR")) { printHelp(); errorPrint("%s", "-b: Invalid data_type!\n"); exit(EXIT_FAILURE); @@ -873,14 +873,14 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { char *token = strsep(&running, ","); while(token != NULL) { if (strcasecmp(token, "INT") - && strcasecmp(token, "FLOAT") - && strcasecmp(token, "TINYINT") - && strcasecmp(token, "BOOL") - && strcasecmp(token, "SMALLINT") - && strcasecmp(token, "BIGINT") - && strcasecmp(token, "DOUBLE") - && strcasecmp(token, "BINARY") - && strcasecmp(token, "NCHAR")) { + && strcasecmp(token, "FLOAT") + && strcasecmp(token, "TINYINT") + && strcasecmp(token, "BOOL") + && strcasecmp(token, "SMALLINT") + && strcasecmp(token, "BIGINT") + && strcasecmp(token, "DOUBLE") + && strcasecmp(token, "BINARY") + && strcasecmp(token, "NCHAR")) { printHelp(); free(dupstr); errorPrint("%s", "-b: Invalid data_type!\n"); @@ -895,7 +895,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } } else if (strcmp(argv[i], "-w") == 0) { if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { + (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-w need a number following!\n"); exit(EXIT_FAILURE); @@ -903,7 +903,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->len_of_binary = atoi(argv[++i]); } else if (strcmp(argv[i], "-m") == 0) { if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { + (!isStringNumber(argv[i+1]))) { printHelp(); errorPrint("%s", "\n\t-m need a number following!\n"); exit(EXIT_FAILURE); @@ -983,23 +983,23 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } if (((arguments->debug_print) && (arguments->metaFile == NULL)) - || arguments->verbose_print) { + || arguments->verbose_print) { printf("###################################################################\n"); printf("# meta file: %s\n", arguments->metaFile); printf("# Server IP: %s:%hu\n", - arguments->host == NULL ? "localhost" : arguments->host, - arguments->port ); + arguments->host == NULL ? "localhost" : arguments->host, + arguments->port ); printf("# User: %s\n", arguments->user); printf("# Password: %s\n", arguments->password); printf("# Use metric: %s\n", arguments->use_metric ? "true" : "false"); if (*(arguments->datatype)) { - printf("# Specified data type: "); - for (int i = 0; i < MAX_NUM_DATATYPE; i++) - if (arguments->datatype[i]) - printf("%s,", arguments->datatype[i]); - else - break; - printf("\n"); + printf("# Specified data type: "); + for (int i = 0; i < MAX_NUM_DATATYPE; i++) + if (arguments->datatype[i]) + printf("%s,", arguments->datatype[i]); + else + break; + printf("\n"); } printf("# Insertion interval: %d\n", arguments->insert_interval); printf("# Number of records per req: %d\n", arguments->num_of_RPR); @@ -1132,7 +1132,7 @@ static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) TAOS_RES *res = taos_query(taos, command); if (res == NULL || taos_errno(res) != 0) { errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n", - __func__, __LINE__, command, taos_errstr(res)); + __func__, __LINE__, command, taos_errstr(res)); taos_free_result(res); return; } @@ -1235,7 +1235,7 @@ static void init_rand_data() { g_args.metaFile); } while(0) static int printfInsertMeta() { - SHOW_PARSE_RESULT_START(); + SHOW_PARSE_RESULT_START(); printf("host: \033[33m%s:%u\033[0m\n", g_Dbs.host, g_Dbs.port); printf("user: \033[33m%s\033[0m\n", g_Dbs.user); @@ -1297,23 +1297,23 @@ static int printfInsertMeta() { } if (g_Dbs.db[i].dbCfg.precision[0] != 0) { if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) - || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { printf(" precision: \033[33m%s\033[0m\n", - g_Dbs.db[i].dbCfg.precision); + g_Dbs.db[i].dbCfg.precision); } else { printf("\033[1m\033[40;31m precision error: %s\033[0m\n", - g_Dbs.db[i].dbCfg.precision); + g_Dbs.db[i].dbCfg.precision); return -1; } } printf(" super table count: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTblCount); + g_Dbs.db[i].superTblCount); for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { printf(" super table[\033[33m%d\033[0m]:\n", j); printf(" stbName: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].sTblName); + g_Dbs.db[i].superTbls[j].sTblName); if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { printf(" autoCreateTable: \033[33m%s\033[0m\n", "no"); @@ -1332,23 +1332,23 @@ static int printfInsertMeta() { } printf(" childTblCount: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblCount); + g_Dbs.db[i].superTbls[j].childTblCount); printf(" childTblPrefix: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblPrefix); + g_Dbs.db[i].superTbls[j].childTblPrefix); printf(" dataSource: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].dataSource); + g_Dbs.db[i].superTbls[j].dataSource); printf(" insertMode: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].insertMode); + g_Dbs.db[i].superTbls[j].insertMode); if (g_Dbs.db[i].superTbls[j].childTblLimit > 0) { printf(" childTblLimit: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblLimit); + g_Dbs.db[i].superTbls[j].childTblLimit); } if (g_Dbs.db[i].superTbls[j].childTblOffset >= 0) { printf(" childTblOffset: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblOffset); + g_Dbs.db[i].superTbls[j].childTblOffset); } printf(" insertRows: \033[33m%"PRId64"\033[0m\n", - g_Dbs.db[i].superTbls[j].insertRows); + g_Dbs.db[i].superTbls[j].insertRows); if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n"); @@ -1356,61 +1356,61 @@ static int printfInsertMeta() { printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n"); } printf(" interlaceRows: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].interlaceRows); + g_Dbs.db[i].superTbls[j].interlaceRows); if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { printf(" stable insert interval: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].insertInterval); + g_Dbs.db[i].superTbls[j].insertInterval); } printf(" disorderRange: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].disorderRange); + g_Dbs.db[i].superTbls[j].disorderRange); printf(" disorderRatio: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].disorderRatio); + g_Dbs.db[i].superTbls[j].disorderRatio); printf(" maxSqlLen: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].maxSqlLen); + g_Dbs.db[i].superTbls[j].maxSqlLen); printf(" timeStampStep: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].timeStampStep); + g_Dbs.db[i].superTbls[j].timeStampStep); printf(" startTimestamp: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].startTimestamp); + g_Dbs.db[i].superTbls[j].startTimestamp); printf(" sampleFormat: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].sampleFormat); + g_Dbs.db[i].superTbls[j].sampleFormat); printf(" sampleFile: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].sampleFile); + g_Dbs.db[i].superTbls[j].sampleFile); printf(" tagsFile: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].tagsFile); + g_Dbs.db[i].superTbls[j].tagsFile); printf(" columnCount: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].columnCount); + g_Dbs.db[i].superTbls[j].columnCount); for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, - "binary", 6)) - || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, - "nchar", 5))) { + "binary", 6)) + || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, + "nchar", 5))) { printf("column[\033[33m%d\033[0m]:\033[33m%s(%d)\033[0m ", k, - g_Dbs.db[i].superTbls[j].columns[k].dataType, - g_Dbs.db[i].superTbls[j].columns[k].dataLen); + g_Dbs.db[i].superTbls[j].columns[k].dataType, + g_Dbs.db[i].superTbls[j].columns[k].dataLen); } else { printf("column[%d]:\033[33m%s\033[0m ", k, - g_Dbs.db[i].superTbls[j].columns[k].dataType); + g_Dbs.db[i].superTbls[j].columns[k].dataType); } } printf("\n"); printf(" tagCount: \033[33m%d\033[0m\n ", - g_Dbs.db[i].superTbls[j].tagCount); + g_Dbs.db[i].superTbls[j].tagCount); for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "binary", strlen("binary"))) - || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "nchar", strlen("nchar")))) { + "binary", strlen("binary"))) + || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, + "nchar", strlen("nchar")))) { printf("tag[%d]:\033[33m%s(%d)\033[0m ", k, - g_Dbs.db[i].superTbls[j].tags[k].dataType, - g_Dbs.db[i].superTbls[j].tags[k].dataLen); + g_Dbs.db[i].superTbls[j].tags[k].dataType, + g_Dbs.db[i].superTbls[j].tags[k].dataLen); } else { printf("tag[%d]:\033[33m%s\033[0m ", k, - g_Dbs.db[i].superTbls[j].tags[k].dataType); + g_Dbs.db[i].superTbls[j].tags[k].dataType); } } printf("\n"); @@ -1484,7 +1484,7 @@ static void printfInsertMetaToFile(FILE* fp) { } if (g_Dbs.db[i].dbCfg.precision[0] != 0) { if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) - || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { fprintf(fp, " precision: %s\n", g_Dbs.db[i].dbCfg.precision); } else { fprintf(fp, " precision error: %s\n", g_Dbs.db[i].dbCfg.precision); @@ -1551,10 +1551,10 @@ static void printfInsertMetaToFile(FILE* fp) { for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); if ((0 == strncasecmp( - g_Dbs.db[i].superTbls[j].columns[k].dataType, - "binary", strlen("binary"))) - || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, - "nchar", strlen("nchar")))) { + g_Dbs.db[i].superTbls[j].columns[k].dataType, + "binary", strlen("binary"))) + || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, + "nchar", strlen("nchar")))) { fprintf(fp, "column[%d]:%s(%d) ", k, g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); @@ -1569,9 +1569,9 @@ static void printfInsertMetaToFile(FILE* fp) { for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "binary", strlen("binary"))) - || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "nchar", strlen("nchar")))) { + "binary", strlen("binary"))) + || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, + "nchar", strlen("nchar")))) { fprintf(fp, "tag[%d]:%s(%d) ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); } else { @@ -1591,67 +1591,67 @@ static void printfQueryMeta() { SHOW_PARSE_RESULT_START(); printf("host: \033[33m%s:%u\033[0m\n", - g_queryInfo.host, g_queryInfo.port); + g_queryInfo.host, g_queryInfo.port); printf("user: \033[33m%s\033[0m\n", g_queryInfo.user); printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName); printf("\n"); printf("specified table query info: \n"); printf("query interval: \033[33m%d ms\033[0m\n", - g_queryInfo.specifiedQueryInfo.queryInterval); + g_queryInfo.specifiedQueryInfo.queryInterval); printf("top query times:\033[33m%d\033[0m\n", g_args.query_times); printf("concurrent: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.concurrent); + g_queryInfo.specifiedQueryInfo.concurrent); printf("sqlCount: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.sqlCount); + g_queryInfo.specifiedQueryInfo.sqlCount); printf("specified tbl query times:\n"); printf(" \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.queryTimes); + g_queryInfo.specifiedQueryInfo.queryTimes); if (SUBSCRIBE_TEST == g_args.test_mode) { printf("mod: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.mode); + g_queryInfo.specifiedQueryInfo.mode); printf("interval: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.subscribeInterval); + g_queryInfo.specifiedQueryInfo.subscribeInterval); printf("restart: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.subscribeRestart); + g_queryInfo.specifiedQueryInfo.subscribeRestart); printf("keepProgress: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); } for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { printf(" sql[%d]: \033[33m%s\033[0m\n", - i, g_queryInfo.specifiedQueryInfo.sql[i]); + i, g_queryInfo.specifiedQueryInfo.sql[i]); } printf("\n"); printf("super table query info:\n"); printf("query interval: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.queryInterval); + g_queryInfo.superQueryInfo.queryInterval); printf("threadCnt: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.threadCnt); + g_queryInfo.superQueryInfo.threadCnt); printf("childTblCount: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.childTblCount); + g_queryInfo.superQueryInfo.childTblCount); printf("stable name: \033[33m%s\033[0m\n", - g_queryInfo.superQueryInfo.sTblName); + g_queryInfo.superQueryInfo.sTblName); printf("stb query times:\033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.queryTimes); + g_queryInfo.superQueryInfo.queryTimes); if (SUBSCRIBE_TEST == g_args.test_mode) { printf("mod: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.mode); + g_queryInfo.superQueryInfo.mode); printf("interval: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.subscribeInterval); + g_queryInfo.superQueryInfo.subscribeInterval); printf("restart: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.subscribeRestart); + g_queryInfo.superQueryInfo.subscribeRestart); printf("keepProgress: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.subscribeKeepProgress); + g_queryInfo.superQueryInfo.subscribeKeepProgress); } printf("sqlCount: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.sqlCount); + g_queryInfo.superQueryInfo.sqlCount); for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { printf(" sql[%d]: \033[33m%s\033[0m\n", - i, g_queryInfo.superQueryInfo.sql[i]); + i, g_queryInfo.superQueryInfo.sql[i]); } printf("\n"); @@ -1691,7 +1691,7 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { } static void xDumpFieldToFile(FILE* fp, const char* val, - TAOS_FIELD* field, int32_t length, int precision) { + TAOS_FIELD* field, int32_t length, int precision) { if (val == NULL) { fprintf(fp, "%s", TSDB_DATA_NULL_STR); @@ -1798,7 +1798,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { while((row = taos_fetch_row(res)) != NULL) { // sys database name : 'log' if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) { + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) { continue; } @@ -1809,10 +1809,10 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { } tstrncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], - fields[TSDB_SHOW_DB_NAME_INDEX].bytes); + fields[TSDB_SHOW_DB_NAME_INDEX].bytes); formatTimestamp(dbInfos[count]->create_time, - *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], - TSDB_TIME_PRECISION_MILLI); + *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], + TSDB_TIME_PRECISION_MILLI); dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); @@ -1820,7 +1820,7 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); tstrncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], - fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); + fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); @@ -1829,19 +1829,19 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); dbInfos[count]->cachelast = - (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); + (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); tstrncpy(dbInfos[count]->precision, - (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], - fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); + (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], + fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); tstrncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], - fields[TSDB_SHOW_DB_STATUS_INDEX].bytes); + fields[TSDB_SHOW_DB_STATUS_INDEX].bytes); count++; if (count > MAX_DATABASE_COUNT) { errorPrint("%s() LN%d, The database count overflow than %d\n", - __func__, __LINE__, MAX_DATABASE_COUNT); + __func__, __LINE__, MAX_DATABASE_COUNT); break; } } @@ -1850,10 +1850,10 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { } static void printfDbInfoForQueryToFile( - char* filename, SDbInfo* dbInfos, int index) { + char* filename, SDbInfo* dbInfos, int index) { if (filename[0] == 0) - return; + return; FILE *fp = fopen(filename, "at"); if (fp == NULL) { @@ -1896,8 +1896,8 @@ static void printfQuerySystemInfo(TAOS * taos) { time(&t); lt = localtime(&t); snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d", - lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, - lt->tm_sec); + lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, + lt->tm_sec); // show variables res = taos_query(taos, "show variables;"); @@ -1918,8 +1918,8 @@ static void printfQuerySystemInfo(TAOS * taos) { } int dbCount = getDbFromServer(taos, dbInfos); if (dbCount <= 0) { - free(dbInfos); - return; + free(dbInfos); + return; } for (int i = 0; i < dbCount; i++) { @@ -1944,184 +1944,184 @@ static void printfQuerySystemInfo(TAOS * taos) { static int postProceSql(char* host, uint16_t port, char* sqlstr) { - char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: Basic %s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s"; + char *req_fmt = "POST %s HTTP/1.1\r\nHost: %s:%d\r\nAccept: */*\r\nAuthorization: Basic %s\r\nContent-Length: %d\r\nContent-Type: application/x-www-form-urlencoded\r\n\r\n%s"; - char *url = "/rest/sql"; + char *url = "/rest/sql"; - struct hostent *server; - struct sockaddr_in serv_addr; - int bytes, sent, received, req_str_len, resp_len; - char *request_buf; - char response_buf[RESP_BUF_LEN]; - uint16_t rest_port = port + TSDB_PORT_HTTP; + struct hostent *server; + struct sockaddr_in serv_addr; + int bytes, sent, received, req_str_len, resp_len; + char *request_buf; + char response_buf[RESP_BUF_LEN]; + uint16_t rest_port = port + TSDB_PORT_HTTP; - int req_buf_len = strlen(sqlstr) + REQ_EXTRA_BUF_LEN; + int req_buf_len = strlen(sqlstr) + REQ_EXTRA_BUF_LEN; - request_buf = malloc(req_buf_len); - if (NULL == request_buf) { - errorPrint("%s", "ERROR, cannot allocate memory.\n"); - exit(EXIT_FAILURE); - } + request_buf = malloc(req_buf_len); + if (NULL == request_buf) { + errorPrint("%s", "ERROR, cannot allocate memory.\n"); + exit(EXIT_FAILURE); + } - char userpass_buf[INPUT_BUF_LEN]; - int mod_table[] = {0, 2, 1}; + char userpass_buf[INPUT_BUF_LEN]; + int mod_table[] = {0, 2, 1}; - static char base64[] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', - 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', - 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', - 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', - 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', - 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', - 'w', 'x', 'y', 'z', '0', '1', '2', '3', - '4', '5', '6', '7', '8', '9', '+', '/'}; + static char base64[] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', + 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', + 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', + 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', + 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', + 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', + 'w', 'x', 'y', 'z', '0', '1', '2', '3', + '4', '5', '6', '7', '8', '9', '+', '/'}; - snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s", - g_Dbs.user, g_Dbs.password); - size_t userpass_buf_len = strlen(userpass_buf); - size_t encoded_len = 4 * ((userpass_buf_len +2) / 3); + snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s", + g_Dbs.user, g_Dbs.password); + size_t userpass_buf_len = strlen(userpass_buf); + size_t encoded_len = 4 * ((userpass_buf_len +2) / 3); - char base64_buf[INPUT_BUF_LEN]; + char base64_buf[INPUT_BUF_LEN]; #ifdef WINDOWS - WSADATA wsaData; + WSADATA wsaData; WSAStartup(MAKEWORD(2, 2), &wsaData); SOCKET sockfd; #else - int sockfd; + int sockfd; #endif - sockfd = socket(AF_INET, SOCK_STREAM, 0); - if (sockfd < 0) { + sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (sockfd < 0) { #ifdef WINDOWS - errorPrint( "Could not create socket : %d" , WSAGetLastError()); + errorPrint( "Could not create socket : %d" , WSAGetLastError()); #endif - debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd); - free(request_buf); - ERROR_EXIT("ERROR opening socket"); - } - - server = gethostbyname(host); - if (server == NULL) { - free(request_buf); - ERROR_EXIT("ERROR, no such host"); - } - - debugPrint("h_name: %s\nh_addretype: %s\nh_length: %d\n", - server->h_name, - (server->h_addrtype == AF_INET)?"ipv4":"ipv6", - server->h_length); - - memset(&serv_addr, 0, sizeof(serv_addr)); - serv_addr.sin_family = AF_INET; - serv_addr.sin_port = htons(rest_port); -#ifdef WINDOWS - serv_addr.sin_addr.s_addr = inet_addr(host); -#else - memcpy(&serv_addr.sin_addr.s_addr,server->h_addr,server->h_length); -#endif - - int retConn = connect(sockfd,(struct sockaddr *)&serv_addr,sizeof(serv_addr)); - debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn); - if (retConn < 0) { - free(request_buf); - ERROR_EXIT("ERROR connecting"); - } - - memset(base64_buf, 0, INPUT_BUF_LEN); - - for (int n = 0, m = 0; n < userpass_buf_len;) { - uint32_t oct_a = n < userpass_buf_len ? - (unsigned char) userpass_buf[n++]:0; - uint32_t oct_b = n < userpass_buf_len ? - (unsigned char) userpass_buf[n++]:0; - uint32_t oct_c = n < userpass_buf_len ? - (unsigned char) userpass_buf[n++]:0; - uint32_t triple = (oct_a << 0x10) + (oct_b << 0x08) + oct_c; - - base64_buf[m++] = base64[(triple >> 3* 6) & 0x3f]; - base64_buf[m++] = base64[(triple >> 2* 6) & 0x3f]; - base64_buf[m++] = base64[(triple >> 1* 6) & 0x3f]; - base64_buf[m++] = base64[(triple >> 0* 6) & 0x3f]; - } - - for (int l = 0; l < mod_table[userpass_buf_len % 3]; l++) - base64_buf[encoded_len - 1 - l] = '='; - - debugPrint("%s() LN%d: auth string base64 encoded: %s\n", - __func__, __LINE__, base64_buf); - char *auth = base64_buf; - - int r = snprintf(request_buf, - req_buf_len, - req_fmt, url, host, rest_port, - auth, strlen(sqlstr), sqlstr); - if (r >= req_buf_len) { - free(request_buf); - ERROR_EXIT("ERROR too long request"); - } - verbosePrint("%s() LN%d: Request:\n%s\n", __func__, __LINE__, request_buf); - - req_str_len = strlen(request_buf); - sent = 0; - do { -#ifdef WINDOWS - bytes = send(sockfd, request_buf + sent, req_str_len - sent, 0); -#else - bytes = write(sockfd, request_buf + sent, req_str_len - sent); -#endif - if (bytes < 0) - ERROR_EXIT("ERROR writing message to socket"); - if (bytes == 0) - break; - sent+=bytes; - } while(sent < req_str_len); - - memset(response_buf, 0, RESP_BUF_LEN); - resp_len = sizeof(response_buf) - 1; - received = 0; - do { -#ifdef WINDOWS - bytes = recv(sockfd, response_buf + received, resp_len - received, 0); -#else - bytes = read(sockfd, response_buf + received, resp_len - received); -#endif - if (bytes < 0) { - free(request_buf); - ERROR_EXIT("ERROR reading response from socket"); + debugPrint("%s() LN%d, sockfd=%d\n", __func__, __LINE__, sockfd); + free(request_buf); + ERROR_EXIT("ERROR opening socket"); } - if (bytes == 0) - break; - received += bytes; - } while(received < resp_len); - if (received == resp_len) { - free(request_buf); - ERROR_EXIT("ERROR storing complete response from socket"); - } + server = gethostbyname(host); + if (server == NULL) { + free(request_buf); + ERROR_EXIT("ERROR, no such host"); + } - response_buf[RESP_BUF_LEN - 1] = '\0'; - printf("Response:\n%s\n", response_buf); + debugPrint("h_name: %s\nh_addretype: %s\nh_length: %d\n", + server->h_name, + (server->h_addrtype == AF_INET)?"ipv4":"ipv6", + server->h_length); - free(request_buf); + memset(&serv_addr, 0, sizeof(serv_addr)); + serv_addr.sin_family = AF_INET; + serv_addr.sin_port = htons(rest_port); #ifdef WINDOWS - closesocket(sockfd); + serv_addr.sin_addr.s_addr = inet_addr(host); +#else + memcpy(&serv_addr.sin_addr.s_addr,server->h_addr,server->h_length); +#endif + + int retConn = connect(sockfd,(struct sockaddr *)&serv_addr,sizeof(serv_addr)); + debugPrint("%s() LN%d connect() return %d\n", __func__, __LINE__, retConn); + if (retConn < 0) { + free(request_buf); + ERROR_EXIT("ERROR connecting"); + } + + memset(base64_buf, 0, INPUT_BUF_LEN); + + for (int n = 0, m = 0; n < userpass_buf_len;) { + uint32_t oct_a = n < userpass_buf_len ? + (unsigned char) userpass_buf[n++]:0; + uint32_t oct_b = n < userpass_buf_len ? + (unsigned char) userpass_buf[n++]:0; + uint32_t oct_c = n < userpass_buf_len ? + (unsigned char) userpass_buf[n++]:0; + uint32_t triple = (oct_a << 0x10) + (oct_b << 0x08) + oct_c; + + base64_buf[m++] = base64[(triple >> 3* 6) & 0x3f]; + base64_buf[m++] = base64[(triple >> 2* 6) & 0x3f]; + base64_buf[m++] = base64[(triple >> 1* 6) & 0x3f]; + base64_buf[m++] = base64[(triple >> 0* 6) & 0x3f]; + } + + for (int l = 0; l < mod_table[userpass_buf_len % 3]; l++) + base64_buf[encoded_len - 1 - l] = '='; + + debugPrint("%s() LN%d: auth string base64 encoded: %s\n", + __func__, __LINE__, base64_buf); + char *auth = base64_buf; + + int r = snprintf(request_buf, + req_buf_len, + req_fmt, url, host, rest_port, + auth, strlen(sqlstr), sqlstr); + if (r >= req_buf_len) { + free(request_buf); + ERROR_EXIT("ERROR too long request"); + } + verbosePrint("%s() LN%d: Request:\n%s\n", __func__, __LINE__, request_buf); + + req_str_len = strlen(request_buf); + sent = 0; + do { +#ifdef WINDOWS + bytes = send(sockfd, request_buf + sent, req_str_len - sent, 0); +#else + bytes = write(sockfd, request_buf + sent, req_str_len - sent); +#endif + if (bytes < 0) + ERROR_EXIT("ERROR writing message to socket"); + if (bytes == 0) + break; + sent+=bytes; + } while(sent < req_str_len); + + memset(response_buf, 0, RESP_BUF_LEN); + resp_len = sizeof(response_buf) - 1; + received = 0; + do { +#ifdef WINDOWS + bytes = recv(sockfd, response_buf + received, resp_len - received, 0); +#else + bytes = read(sockfd, response_buf + received, resp_len - received); +#endif + if (bytes < 0) { + free(request_buf); + ERROR_EXIT("ERROR reading response from socket"); + } + if (bytes == 0) + break; + received += bytes; + } while(received < resp_len); + + if (received == resp_len) { + free(request_buf); + ERROR_EXIT("ERROR storing complete response from socket"); + } + + response_buf[RESP_BUF_LEN - 1] = '\0'; + printf("Response:\n%s\n", response_buf); + + free(request_buf); +#ifdef WINDOWS + closesocket(sockfd); WSACleanup(); #else - close(sockfd); + close(sockfd); #endif - return 0; + return 0; } static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) { char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); if (NULL == dataBuf) { errorPrint("%s() LN%d, calloc failed! size:%d\n", - __func__, __LINE__, TSDB_MAX_SQL_LEN+1); + __func__, __LINE__, TSDB_MAX_SQL_LEN+1); return NULL; } int dataLen = 0; dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "(%s)", stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * tagUsePos); + "(%s)", stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * tagUsePos); return dataBuf; } @@ -2137,10 +2137,10 @@ static char* generateTagVaulesForStb(SSuperTable* stbInfo, int32_t tableSeq) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "("); for (int i = 0; i < stbInfo->tagCount; i++) { if ((0 == strncasecmp(stbInfo->tags[i].dataType, "binary", strlen("binary"))) - || (0 == strncasecmp(stbInfo->tags[i].dataType, "nchar", strlen("nchar")))) { + || (0 == strncasecmp(stbInfo->tags[i].dataType, "nchar", strlen("nchar")))) { if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) { printf("binary or nchar length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); + (uint32_t)TSDB_MAX_BINARY_LEN); tmfree(dataBuf); return NULL; } @@ -2160,40 +2160,40 @@ static char* generateTagVaulesForStb(SSuperTable* stbInfo, int32_t tableSeq) { } //rand_string(buf, stbInfo->tags[i].dataLen); dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "\'%s\', ", buf); + "\'%s\', ", buf); tmfree(buf); } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "int", strlen("int"))) { + "int", strlen("int"))) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d, ", tableSeq); + "%d, ", tableSeq); } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "bigint", strlen("bigint"))) { + "bigint", strlen("bigint"))) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%"PRId64", ", rand_bigint()); + "%"PRId64", ", rand_bigint()); } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "float", strlen("float"))) { + "float", strlen("float"))) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%f, ", rand_float()); + "%f, ", rand_float()); } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "double", strlen("double"))) { + "double", strlen("double"))) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%f, ", rand_double()); + "%f, ", rand_double()); } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "smallint", strlen("smallint"))) { + "smallint", strlen("smallint"))) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d, ", rand_smallint()); + "%d, ", rand_smallint()); } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "tinyint", strlen("tinyint"))) { + "tinyint", strlen("tinyint"))) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d, ", rand_tinyint()); + "%d, ", rand_tinyint()); } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "bool", strlen("bool"))) { + "bool", strlen("bool"))) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d, ", rand_bool()); + "%d, ", rand_bool()); } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "timestamp", strlen("timestamp"))) { + "timestamp", strlen("timestamp"))) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%"PRId64", ", rand_bigint()); + "%"PRId64", ", rand_bigint()); } else { printf("No support data type: %s\n", stbInfo->tags[i].dataType); tmfree(dataBuf); @@ -2277,8 +2277,8 @@ static int calcRowLen(SSuperTable* superTbls) { static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, - char* dbName, char* sTblName, char** childTblNameOfSuperTbl, - int* childTblCountOfSuperTbl, int limit, int offset) { + char* dbName, char* sTblName, char** childTblNameOfSuperTbl, + int* childTblCountOfSuperTbl, int limit, int offset) { char command[BUFFER_SIZE] = "\0"; char limitBuf[100] = "\0"; @@ -2294,7 +2294,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, //get all child table name use cmd: select tbname from superTblName; snprintf(command, BUFFER_SIZE, "select tbname from %s.%s %s", - dbName, sTblName, limitBuf); + dbName, sTblName, limitBuf); res = taos_query(taos, command); int32_t code = taos_errno(res); @@ -2302,7 +2302,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, taos_free_result(res); taos_close(taos); errorPrint("%s() LN%d, failed to run command %s\n", - __func__, __LINE__, command); + __func__, __LINE__, command); exit(-1); } @@ -2311,10 +2311,10 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, if (childTblName == NULL) { childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); if (NULL == childTblName) { - taos_free_result(res); - taos_close(taos); - errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__); - exit(-1); + taos_free_result(res); + taos_close(taos); + errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__); + exit(-1); } } @@ -2326,16 +2326,16 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, count++; if (count >= childTblCount - 1) { char *tmp = realloc(childTblName, - (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1); + (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1); if (tmp != NULL) { childTblName = tmp; childTblCount = (int)(childTblCount*1.5); memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0, - (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN)); + (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN)); } else { // exit, if allocate more memory failed errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n", - __func__, __LINE__, dbName, sTblName); + __func__, __LINE__, dbName, sTblName); tmfree(childTblName); taos_free_result(res); taos_close(taos); @@ -2353,16 +2353,16 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, } static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, - char* sTblName, char** childTblNameOfSuperTbl, - int* childTblCountOfSuperTbl) { + char* sTblName, char** childTblNameOfSuperTbl, + int* childTblCountOfSuperTbl) { - return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, sTblName, - childTblNameOfSuperTbl, childTblCountOfSuperTbl, - -1, -1); + return getChildNameOfSuperTableWithLimitAndOffset(taos, dbName, sTblName, + childTblNameOfSuperTbl, childTblCountOfSuperTbl, + -1, -1); } static int getSuperTableFromServer(TAOS * taos, char* dbName, - SSuperTable* superTbls) { + SSuperTable* superTbls) { char command[BUFFER_SIZE] = "\0"; TAOS_RES * res; @@ -2390,29 +2390,29 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, if (strcmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "TAG") == 0) { tstrncpy(superTbls->tags[tagIndex].field, - (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); + (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], + fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); tstrncpy(superTbls->tags[tagIndex].dataType, - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); + (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); superTbls->tags[tagIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); tstrncpy(superTbls->tags[tagIndex].note, - (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], - fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); + (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], + fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); tagIndex++; } else { tstrncpy(superTbls->columns[columnIndex].field, - (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); + (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], + fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); tstrncpy(superTbls->columns[columnIndex].dataType, - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); + (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); superTbls->columns[columnIndex].dataLen = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); tstrncpy(superTbls->columns[columnIndex].note, - (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], - fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); + (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], + fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); columnIndex++; } count++; @@ -2443,8 +2443,8 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, } static int createSuperTable( - TAOS * taos, char* dbName, - SSuperTable* superTbl) { + TAOS * taos, char* dbName, + SSuperTable* superTbl) { char command[BUFFER_SIZE] = "\0"; @@ -2456,7 +2456,7 @@ static int createSuperTable( if (superTbl->columnCount == 0) { errorPrint("%s() LN%d, super table column count is %d\n", - __func__, __LINE__, superTbl->columnCount); + __func__, __LINE__, superTbl->columnCount); return -1; } @@ -2465,13 +2465,13 @@ static int createSuperTable( if (strcasecmp(dataType, "BINARY") == 0) { len += snprintf(cols + len, STRING_LEN - len, - ", col%d %s(%d)", colIndex, "BINARY", - superTbl->columns[colIndex].dataLen); + ", col%d %s(%d)", colIndex, "BINARY", + superTbl->columns[colIndex].dataLen); lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; } else if (strcasecmp(dataType, "NCHAR") == 0) { len += snprintf(cols + len, STRING_LEN - len, - ", col%d %s(%d)", colIndex, "NCHAR", - superTbl->columns[colIndex].dataLen); + ", col%d %s(%d)", colIndex, "NCHAR", + superTbl->columns[colIndex].dataLen); lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; } else if (strcasecmp(dataType, "INT") == 0) { len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "INT"); @@ -2500,7 +2500,7 @@ static int createSuperTable( } else { taos_close(taos); errorPrint("%s() LN%d, config error data type : %s\n", - __func__, __LINE__, dataType); + __func__, __LINE__, dataType); exit(-1); } } @@ -2512,18 +2512,18 @@ static int createSuperTable( superTbl->colsOfCreateChildTable = (char*)calloc(len+20, 1); if (NULL == superTbl->colsOfCreateChildTable) { errorPrint("%s() LN%d, Failed when calloc, size:%d", - __func__, __LINE__, len+1); + __func__, __LINE__, len+1); taos_close(taos); exit(-1); } snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols); verbosePrint("%s() LN%d: %s\n", - __func__, __LINE__, superTbl->colsOfCreateChildTable); + __func__, __LINE__, superTbl->colsOfCreateChildTable); if (superTbl->tagCount == 0) { errorPrint("%s() LN%d, super table tag count is %d\n", - __func__, __LINE__, superTbl->tagCount); + __func__, __LINE__, superTbl->tagCount); return -1; } @@ -2538,44 +2538,44 @@ static int createSuperTable( if (strcasecmp(dataType, "BINARY") == 0) { len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, - "BINARY", superTbl->tags[tagIndex].dataLen); + "BINARY", superTbl->tags[tagIndex].dataLen); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; } else if (strcasecmp(dataType, "NCHAR") == 0) { len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, - "NCHAR", superTbl->tags[tagIndex].dataLen); + "NCHAR", superTbl->tags[tagIndex].dataLen); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; } else if (strcasecmp(dataType, "INT") == 0) { len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "INT"); + "INT"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 11; } else if (strcasecmp(dataType, "BIGINT") == 0) { len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "BIGINT"); + "BIGINT"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 21; } else if (strcasecmp(dataType, "SMALLINT") == 0) { len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "SMALLINT"); + "SMALLINT"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6; } else if (strcasecmp(dataType, "TINYINT") == 0) { len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "TINYINT"); + "TINYINT"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 4; } else if (strcasecmp(dataType, "BOOL") == 0) { len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "BOOL"); + "BOOL"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6; } else if (strcasecmp(dataType, "FLOAT") == 0) { len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "FLOAT"); + "FLOAT"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 22; } else if (strcasecmp(dataType, "DOUBLE") == 0) { len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "DOUBLE"); + "DOUBLE"); lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 42; } else { taos_close(taos); errorPrint("%s() LN%d, config error tag type : %s\n", - __func__, __LINE__, dataType); + __func__, __LINE__, dataType); exit(-1); } } @@ -2586,14 +2586,14 @@ static int createSuperTable( superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow; snprintf(command, BUFFER_SIZE, - "create table if not exists %s.%s (ts timestamp%s) tags %s", - dbName, superTbl->sTblName, cols, tags); + "create table if not exists %s.%s (ts timestamp%s) tags %s", + dbName, superTbl->sTblName, cols, tags); verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, command); if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { - errorPrint( "create supertable %s failed!\n\n", - superTbl->sTblName); - return -1; + errorPrint( "create supertable %s failed!\n\n", + superTbl->sTblName); + return -1; } debugPrint("create supertable %s success!\n\n", superTbl->sTblName); return 0; @@ -2620,35 +2620,35 @@ static int createDatabasesAndStables() { int dataLen = 0; dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, "create database if not exists %s", g_Dbs.db[i].dbName); + BUFFER_SIZE - dataLen, "create database if not exists %s", g_Dbs.db[i].dbName); if (g_Dbs.db[i].dbCfg.blocks > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " blocks %d", g_Dbs.db[i].dbCfg.blocks); + BUFFER_SIZE - dataLen, " blocks %d", g_Dbs.db[i].dbCfg.blocks); } if (g_Dbs.db[i].dbCfg.cache > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " cache %d", g_Dbs.db[i].dbCfg.cache); + BUFFER_SIZE - dataLen, " cache %d", g_Dbs.db[i].dbCfg.cache); } if (g_Dbs.db[i].dbCfg.days > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " days %d", g_Dbs.db[i].dbCfg.days); + BUFFER_SIZE - dataLen, " days %d", g_Dbs.db[i].dbCfg.days); } if (g_Dbs.db[i].dbCfg.keep > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " keep %d", g_Dbs.db[i].dbCfg.keep); + BUFFER_SIZE - dataLen, " keep %d", g_Dbs.db[i].dbCfg.keep); } if (g_Dbs.db[i].dbCfg.quorum > 1) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " quorum %d", g_Dbs.db[i].dbCfg.quorum); + BUFFER_SIZE - dataLen, " quorum %d", g_Dbs.db[i].dbCfg.quorum); } if (g_Dbs.db[i].dbCfg.replica > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " replica %d", g_Dbs.db[i].dbCfg.replica); + BUFFER_SIZE - dataLen, " replica %d", g_Dbs.db[i].dbCfg.replica); } if (g_Dbs.db[i].dbCfg.update > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " update %d", g_Dbs.db[i].dbCfg.update); + BUFFER_SIZE - dataLen, " update %d", g_Dbs.db[i].dbCfg.update); } //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { // dataLen += snprintf(command + dataLen, @@ -2656,33 +2656,33 @@ static int createDatabasesAndStables() { //} if (g_Dbs.db[i].dbCfg.minRows > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " minrows %d", g_Dbs.db[i].dbCfg.minRows); + BUFFER_SIZE - dataLen, " minrows %d", g_Dbs.db[i].dbCfg.minRows); } if (g_Dbs.db[i].dbCfg.maxRows > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " maxrows %d", g_Dbs.db[i].dbCfg.maxRows); + BUFFER_SIZE - dataLen, " maxrows %d", g_Dbs.db[i].dbCfg.maxRows); } if (g_Dbs.db[i].dbCfg.comp > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " comp %d", g_Dbs.db[i].dbCfg.comp); + BUFFER_SIZE - dataLen, " comp %d", g_Dbs.db[i].dbCfg.comp); } if (g_Dbs.db[i].dbCfg.walLevel > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " wal %d", g_Dbs.db[i].dbCfg.walLevel); + BUFFER_SIZE - dataLen, " wal %d", g_Dbs.db[i].dbCfg.walLevel); } if (g_Dbs.db[i].dbCfg.cacheLast > 0) { dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " cachelast %d", g_Dbs.db[i].dbCfg.cacheLast); + BUFFER_SIZE - dataLen, " cachelast %d", g_Dbs.db[i].dbCfg.cacheLast); } if (g_Dbs.db[i].dbCfg.fsync > 0) { dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " fsync %d", g_Dbs.db[i].dbCfg.fsync); + " fsync %d", g_Dbs.db[i].dbCfg.fsync); } if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", strlen("ms"))) - || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, - "us", strlen("us")))) { + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, + "us", strlen("us")))) { dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " precision \'%s\';", g_Dbs.db[i].dbCfg.precision); + " precision \'%s\';", g_Dbs.db[i].dbCfg.precision); } debugPrint("%s() %d command: %s\n", __func__, __LINE__, command); @@ -2695,7 +2695,7 @@ static int createDatabasesAndStables() { } debugPrint("%s() %d supertbl count:%d\n", - __func__, __LINE__, g_Dbs.db[i].superTblCount); + __func__, __LINE__, g_Dbs.db[i].superTblCount); int validStbCount = 0; @@ -2708,7 +2708,7 @@ static int createDatabasesAndStables() { if ((ret != 0) || (g_Dbs.db[i].drop)) { ret = createSuperTable(taos, g_Dbs.db[i].dbName, - &g_Dbs.db[i].superTbls[j]); + &g_Dbs.db[i].superTbls[j]); if (0 != ret) { errorPrint("create super table %d failed!\n\n", j); @@ -2717,10 +2717,10 @@ static int createDatabasesAndStables() { } ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, - &g_Dbs.db[i].superTbls[j]); + &g_Dbs.db[i].superTbls[j]); if (0 != ret) { errorPrint("\nget super table %s.%s info failed!\n\n", - g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName); + g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName); continue; } @@ -2754,20 +2754,20 @@ static void* createTable(void *sarg) int batchNum = 0; verbosePrint("%s() LN%d: Creating table from %d to %d\n", - __func__, __LINE__, - pThreadInfo->start_table_from, pThreadInfo->end_table_to); + __func__, __LINE__, + pThreadInfo->start_table_from, pThreadInfo->end_table_to); for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { if (0 == g_Dbs.use_metric) { snprintf(buffer, buff_len, - "create table if not exists %s.%s%d %s;", - pThreadInfo->db_name, - g_args.tb_prefix, i, - pThreadInfo->cols); + "create table if not exists %s.%s%d %s;", + pThreadInfo->db_name, + g_args.tb_prefix, i, + pThreadInfo->cols); } else { if (superTblInfo == NULL) { errorPrint("%s() LN%d, use metric, but super table info is NULL\n", - __func__, __LINE__); + __func__, __LINE__); free(buffer); exit(-1); } else { @@ -2775,31 +2775,31 @@ static void* createTable(void *sarg) batchNum = 0; memset(buffer, 0, buff_len); len += snprintf(buffer + len, - buff_len - len, "create table "); + buff_len - len, "create table "); } char* tagsValBuf = NULL; if (0 == superTblInfo->tagSource) { tagsValBuf = generateTagVaulesForStb(superTblInfo, i); } else { tagsValBuf = getTagValueFromTagSample( - superTblInfo, - i % superTblInfo->tagSampleCount); + superTblInfo, + i % superTblInfo->tagSampleCount); } if (NULL == tagsValBuf) { free(buffer); return NULL; } len += snprintf(buffer + len, - buff_len - len, - "if not exists %s.%s%d using %s.%s tags %s ", - pThreadInfo->db_name, superTblInfo->childTblPrefix, - i, pThreadInfo->db_name, - superTblInfo->sTblName, tagsValBuf); + buff_len - len, + "if not exists %s.%s%d using %s.%s tags %s ", + pThreadInfo->db_name, superTblInfo->childTblPrefix, + i, pThreadInfo->db_name, + superTblInfo->sTblName, tagsValBuf); free(tagsValBuf); batchNum++; if ((batchNum < superTblInfo->batchCreateTableNum) - && ((buff_len - len) - >= (superTblInfo->lenOfTagOfOneRow + 256))) { + && ((buff_len - len) + >= (superTblInfo->lenOfTagOfOneRow + 256))) { continue; } } @@ -2816,7 +2816,7 @@ static void* createTable(void *sarg) int64_t currentPrintTime = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { printf("thread[%d] already create %d - %d tables\n", - pThreadInfo->threadID, pThreadInfo->start_table_from, i); + pThreadInfo->threadID, pThreadInfo->start_table_from, i); lastPrintTime = currentPrintTime; } } @@ -2833,8 +2833,8 @@ static void* createTable(void *sarg) } static int startMultiThreadCreateChildTable( - char* cols, int threads, int startFrom, int ntables, - char* db_name, SSuperTable* superTblInfo) { + char* cols, int threads, int startFrom, int ntables, + char* db_name, SSuperTable* superTblInfo) { pthread_t *pids = malloc(threads * sizeof(pthread_t)); threadInfo *infos = malloc(threads * sizeof(threadInfo)); @@ -2864,14 +2864,14 @@ static int startMultiThreadCreateChildTable( t_info->superTblInfo = superTblInfo; verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name); t_info->taos = taos_connect( - g_Dbs.host, - g_Dbs.user, - g_Dbs.password, - db_name, - g_Dbs.port); + g_Dbs.host, + g_Dbs.user, + g_Dbs.password, + db_name, + g_Dbs.port); if (t_info->taos == NULL) { errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n", - __func__, __LINE__, taos_errstr(NULL)); + __func__, __LINE__, taos_errstr(NULL)); free(pids); free(infos); return -1; @@ -2903,62 +2903,62 @@ static int startMultiThreadCreateChildTable( } static void createChildTables() { - char tblColsBuf[MAX_SQL_SIZE]; - int len; + char tblColsBuf[MAX_SQL_SIZE]; + int len; for (int i = 0; i < g_Dbs.dbCount; i++) { if (g_Dbs.use_metric) { if (g_Dbs.db[i].superTblCount > 0) { - // with super table + // with super table for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { if ((AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) - || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) { + || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) { continue; } verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, - g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); + g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); int startFrom = 0; g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; verbosePrint("%s() LN%d: create %d child tables from %d\n", - __func__, __LINE__, g_totalChildTables, startFrom); + __func__, __LINE__, g_totalChildTables, startFrom); startMultiThreadCreateChildTable( - g_Dbs.db[i].superTbls[j].colsOfCreateChildTable, - g_Dbs.threadCountByCreateTbl, - startFrom, - g_Dbs.db[i].superTbls[j].childTblCount, - g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j])); + g_Dbs.db[i].superTbls[j].colsOfCreateChildTable, + g_Dbs.threadCountByCreateTbl, + startFrom, + g_Dbs.db[i].superTbls[j].childTblCount, + g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j])); } } } else { // normal table len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP"); for (int j = 0; j < g_args.num_of_CPR; j++) { - if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) - || (strncasecmp(g_args.datatype[j], - "NCHAR", strlen("NCHAR")) == 0)) { - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, - ", COL%d %s(%d)", j, g_args.datatype[j], g_args.len_of_binary); - } else { - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, - ", COL%d %s", j, g_args.datatype[j]); - } - len = strlen(tblColsBuf); + if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) + || (strncasecmp(g_args.datatype[j], + "NCHAR", strlen("NCHAR")) == 0)) { + snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, + ", COL%d %s(%d)", j, g_args.datatype[j], g_args.len_of_binary); + } else { + snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, + ", COL%d %s", j, g_args.datatype[j]); + } + len = strlen(tblColsBuf); } snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")"); verbosePrint("%s() LN%d: dbName: %s num of tb: %d schema: %s\n", - __func__, __LINE__, - g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf); + __func__, __LINE__, + g_Dbs.db[i].dbName, g_args.num_of_tables, tblColsBuf); startMultiThreadCreateChildTable( - tblColsBuf, - g_Dbs.threadCountByCreateTbl, - 0, - g_args.num_of_tables, - g_Dbs.db[i].dbName, - NULL); + tblColsBuf, + g_Dbs.threadCountByCreateTbl, + 0, + g_args.num_of_tables, + g_Dbs.db[i].dbName, + NULL); } } } @@ -2974,7 +2974,7 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { FILE *fp = fopen(superTblInfo->tagsFile, "r"); if (fp == NULL) { printf("Failed to open tags file: %s, reason:%s\n", - superTblInfo->tagsFile, strerror(errno)); + superTblInfo->tagsFile, strerror(errno)); return -1; } @@ -3006,12 +3006,12 @@ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { if (count >= tagCount - 1) { char *tmp = realloc(tagDataBuf, - (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow); + (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow); if (tmp != NULL) { tagDataBuf = tmp; tagCount = (int)(tagCount*1.5); memset(tagDataBuf + count*superTblInfo->lenOfTagOfOneRow, - 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow)); + 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow)); } else { // exit, if allocate more memory failed printf("realloc fail for save tag val from %s\n", superTblInfo->tagsFile); @@ -3040,7 +3040,7 @@ int readSampleFromJsonFileToMem(SSuperTable * superTblInfo) { Read 10000 lines at most. If more than 10000 lines, continue to read after using */ static int readSampleFromCsvFileToMem( - SSuperTable* superTblInfo) { + SSuperTable* superTblInfo) { size_t n = 0; ssize_t readLen = 0; char * line = NULL; @@ -3048,20 +3048,20 @@ static int readSampleFromCsvFileToMem( FILE* fp = fopen(superTblInfo->sampleFile, "r"); if (fp == NULL) { - errorPrint( "Failed to open sample file: %s, reason:%s\n", - superTblInfo->sampleFile, strerror(errno)); - return -1; + errorPrint( "Failed to open sample file: %s, reason:%s\n", + superTblInfo->sampleFile, strerror(errno)); + return -1; } assert(superTblInfo->sampleDataBuf); memset(superTblInfo->sampleDataBuf, 0, - MAX_SAMPLES_ONCE_FROM_FILE * superTblInfo->lenOfOneRow); + MAX_SAMPLES_ONCE_FROM_FILE * superTblInfo->lenOfOneRow); while(1) { readLen = tgetline(&line, &n, fp); if (-1 == readLen) { if(0 != fseek(fp, 0, SEEK_SET)) { errorPrint( "Failed to fseek file: %s, reason:%s\n", - superTblInfo->sampleFile, strerror(errno)); + superTblInfo->sampleFile, strerror(errno)); fclose(fp); return -1; } @@ -3078,12 +3078,12 @@ static int readSampleFromCsvFileToMem( if (readLen > superTblInfo->lenOfOneRow) { printf("sample row len[%d] overflow define schema len[%d], so discard this row\n", - (int32_t)readLen, superTblInfo->lenOfOneRow); + (int32_t)readLen, superTblInfo->lenOfOneRow); continue; } memcpy(superTblInfo->sampleDataBuf + getRows * superTblInfo->lenOfOneRow, - line, readLen); + line, readLen); getRows++; if (getRows == MAX_SAMPLES_ONCE_FROM_FILE) { @@ -3097,7 +3097,7 @@ static int readSampleFromCsvFileToMem( } static bool getColumnAndTagTypeFromInsertJsonFile( - cJSON* stbInfo, SSuperTable* superTbls) { + cJSON* stbInfo, SSuperTable* superTbls) { bool ret = false; // columns @@ -3114,7 +3114,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( int columnSize = cJSON_GetArraySize(columns); if ((columnSize + 1/* ts */) > MAX_COLUMN_COUNT) { errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n", - __func__, __LINE__, MAX_COLUMN_COUNT); + __func__, __LINE__, MAX_COLUMN_COUNT); goto PARSE_OVER; } @@ -3133,7 +3133,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( count = countObj->valueint; } else if (countObj && countObj->type != cJSON_Number) { errorPrint("%s() LN%d, failed to read json, column count not found\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } else { count = 1; @@ -3145,7 +3145,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { errorPrint("%s() LN%d: failed to read json, column type not found\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } //tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE); @@ -3156,7 +3156,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( columnCase.dataLen = dataLen->valueint; } else if (dataLen && dataLen->type != cJSON_Number) { debugPrint("%s() LN%d: failed to read json, column len not found\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } else { columnCase.dataLen = 8; @@ -3164,7 +3164,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( for (int n = 0; n < count; ++n) { tstrncpy(superTbls->columns[index].dataType, - columnCase.dataType, MAX_TB_NAME_SIZE); + columnCase.dataType, MAX_TB_NAME_SIZE); superTbls->columns[index].dataLen = columnCase.dataLen; index++; } @@ -3172,7 +3172,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if ((index + 1 /* ts */) > MAX_COLUMN_COUNT) { errorPrint("%s() LN%d, failed to read json, column size overflow, allowed max column size is %d\n", - __func__, __LINE__, MAX_COLUMN_COUNT); + __func__, __LINE__, MAX_COLUMN_COUNT); goto PARSE_OVER; } @@ -3184,14 +3184,14 @@ static bool getColumnAndTagTypeFromInsertJsonFile( cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); if (!tags || tags->type != cJSON_Array) { errorPrint("%s() LN%d, failed to read json, tags not found\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } int tagSize = cJSON_GetArraySize(tags); if (tagSize > MAX_TAG_COUNT) { errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n", - __func__, __LINE__, MAX_TAG_COUNT); + __func__, __LINE__, MAX_TAG_COUNT); goto PARSE_OVER; } @@ -3217,7 +3217,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { errorPrint("%s() LN%d, failed to read json, tag type not found\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); @@ -3227,7 +3227,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( columnCase.dataLen = dataLen->valueint; } else if (dataLen && dataLen->type != cJSON_Number) { errorPrint("%s() LN%d, failed to read json, column len not found\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } else { columnCase.dataLen = 0; @@ -3235,7 +3235,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( for (int n = 0; n < count; ++n) { tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, - MAX_TB_NAME_SIZE); + MAX_TB_NAME_SIZE); superTbls->tags[index].dataLen = columnCase.dataLen; index++; } @@ -3243,7 +3243,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if (index > MAX_TAG_COUNT) { errorPrint("%s() LN%d, failed to read json, tags size overflow, allowed max tag count is %d\n", - __func__, __LINE__, MAX_TAG_COUNT); + __func__, __LINE__, MAX_TAG_COUNT); goto PARSE_OVER; } @@ -3251,12 +3251,12 @@ static bool getColumnAndTagTypeFromInsertJsonFile( if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > MAX_COLUMN_COUNT) { errorPrint("%s() LN%d, columns + tags is more than allowed max columns count: %d\n", - __func__, __LINE__, MAX_COLUMN_COUNT); + __func__, __LINE__, MAX_COLUMN_COUNT); goto PARSE_OVER; } ret = true; - PARSE_OVER: +PARSE_OVER: return ret; } @@ -3323,7 +3323,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; } else { errorPrint("%s() LN%d, failed to read json, threads2 not found\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } @@ -3334,7 +3334,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_args.insert_interval = 0; } else { errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } @@ -3345,9 +3345,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { // rows per table need be less than insert batch if (g_args.interlace_rows > g_args.num_of_RPR) { printf("NOTICE: interlace rows value %d > num_of_records_per_req %d\n\n", - g_args.interlace_rows, g_args.num_of_RPR); + g_args.interlace_rows, g_args.num_of_RPR); printf(" interlace rows value will be set to num_of_records_per_req %d\n\n", - g_args.num_of_RPR); + g_args.num_of_RPR); printf(" press Enter key to continue or Ctrl-C to stop."); (void)getchar(); g_args.interlace_rows = g_args.num_of_RPR; @@ -3356,7 +3356,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } @@ -3367,7 +3367,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_args.max_sql_len = 1024000; } else { errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } @@ -3378,14 +3378,14 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_args.num_of_RPR = INT32_MAX; } else { errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, if (answerPrompt - && answerPrompt->type == cJSON_String - && answerPrompt->valuestring != NULL) { + && answerPrompt->type == cJSON_String + && answerPrompt->valuestring != NULL) { if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { g_args.answer_yes = false; } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { @@ -3409,8 +3409,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { int dbSize = cJSON_GetArraySize(dbs); if (dbSize > MAX_DB_COUNT) { errorPrint( - "ERROR: failed to read json, databases size overflow, max database is %d\n", - MAX_DB_COUNT); + "ERROR: failed to read json, databases size overflow, max database is %d\n", + MAX_DB_COUNT); goto PARSE_OVER; } @@ -3444,15 +3444,15 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_Dbs.db[i].drop = g_args.drop_database; } else { errorPrint("%s() LN%d, failed to read json, drop input mistake\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } cJSON *precision = cJSON_GetObjectItem(dbinfo, "precision"); if (precision && precision->type == cJSON_String - && precision->valuestring != NULL) { + && precision->valuestring != NULL) { tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, - MAX_DB_NAME_SIZE); + MAX_DB_NAME_SIZE); } else if (!precision) { //tstrncpy(g_Dbs.db[i].dbCfg.precision, "ms", MAX_DB_NAME_SIZE); memset(g_Dbs.db[i].dbCfg.precision, 0, MAX_DB_NAME_SIZE); @@ -3487,8 +3487,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!keep) { g_Dbs.db[i].dbCfg.keep = -1; } else { - printf("ERROR: failed to read json, keep not found\n"); - goto PARSE_OVER; + printf("ERROR: failed to read json, keep not found\n"); + goto PARSE_OVER; } cJSON* days = cJSON_GetObjectItem(dbinfo, "days"); @@ -3497,8 +3497,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!days) { g_Dbs.db[i].dbCfg.days = -1; } else { - printf("ERROR: failed to read json, days not found\n"); - goto PARSE_OVER; + printf("ERROR: failed to read json, days not found\n"); + goto PARSE_OVER; } cJSON* cache = cJSON_GetObjectItem(dbinfo, "cache"); @@ -3507,8 +3507,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!cache) { g_Dbs.db[i].dbCfg.cache = -1; } else { - printf("ERROR: failed to read json, cache not found\n"); - goto PARSE_OVER; + printf("ERROR: failed to read json, cache not found\n"); + goto PARSE_OVER; } cJSON* blocks= cJSON_GetObjectItem(dbinfo, "blocks"); @@ -3517,8 +3517,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!blocks) { g_Dbs.db[i].dbCfg.blocks = -1; } else { - printf("ERROR: failed to read json, block not found\n"); - goto PARSE_OVER; + printf("ERROR: failed to read json, block not found\n"); + goto PARSE_OVER; } //cJSON* maxtablesPerVnode= cJSON_GetObjectItem(dbinfo, "maxtablesPerVnode"); @@ -3537,8 +3537,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!minRows) { g_Dbs.db[i].dbCfg.minRows = -1; } else { - printf("ERROR: failed to read json, minRows not found\n"); - goto PARSE_OVER; + printf("ERROR: failed to read json, minRows not found\n"); + goto PARSE_OVER; } cJSON* maxRows= cJSON_GetObjectItem(dbinfo, "maxRows"); @@ -3547,8 +3547,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!maxRows) { g_Dbs.db[i].dbCfg.maxRows = -1; } else { - printf("ERROR: failed to read json, maxRows not found\n"); - goto PARSE_OVER; + printf("ERROR: failed to read json, maxRows not found\n"); + goto PARSE_OVER; } cJSON* comp= cJSON_GetObjectItem(dbinfo, "comp"); @@ -3557,8 +3557,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!comp) { g_Dbs.db[i].dbCfg.comp = -1; } else { - printf("ERROR: failed to read json, comp not found\n"); - goto PARSE_OVER; + printf("ERROR: failed to read json, comp not found\n"); + goto PARSE_OVER; } cJSON* walLevel= cJSON_GetObjectItem(dbinfo, "walLevel"); @@ -3567,8 +3567,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!walLevel) { g_Dbs.db[i].dbCfg.walLevel = -1; } else { - printf("ERROR: failed to read json, walLevel not found\n"); - goto PARSE_OVER; + printf("ERROR: failed to read json, walLevel not found\n"); + goto PARSE_OVER; } cJSON* cacheLast= cJSON_GetObjectItem(dbinfo, "cachelast"); @@ -3577,8 +3577,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!cacheLast) { g_Dbs.db[i].dbCfg.cacheLast = -1; } else { - printf("ERROR: failed to read json, cacheLast not found\n"); - goto PARSE_OVER; + printf("ERROR: failed to read json, cacheLast not found\n"); + goto PARSE_OVER; } cJSON* quorum= cJSON_GetObjectItem(dbinfo, "quorum"); @@ -3587,8 +3587,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!quorum) { g_Dbs.db[i].dbCfg.quorum = 1; } else { - printf("failed to read json, quorum input mistake"); - goto PARSE_OVER; + printf("failed to read json, quorum input mistake"); + goto PARSE_OVER; } cJSON* fsync= cJSON_GetObjectItem(dbinfo, "fsync"); @@ -3598,7 +3598,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_Dbs.db[i].dbCfg.fsync = -1; } else { errorPrint("%s() LN%d, failed to read json, fsync input mistake\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } @@ -3606,15 +3606,15 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables"); if (!stables || stables->type != cJSON_Array) { errorPrint("%s() LN%d, failed to read json, super_tables not found\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } int stbSize = cJSON_GetArraySize(stables); if (stbSize > MAX_SUPER_TABLE_COUNT) { errorPrint( - "%s() LN%d, failed to read json, supertable size overflow, max supertable is %d\n", - __func__, __LINE__, MAX_SUPER_TABLE_COUNT); + "%s() LN%d, failed to read json, supertable size overflow, max supertable is %d\n", + __func__, __LINE__, MAX_SUPER_TABLE_COUNT); goto PARSE_OVER; } @@ -3627,7 +3627,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name"); if (!stbName || stbName->type != cJSON_String || stbName->valuestring == NULL) { errorPrint("%s() LN%d, failed to read json, stb name not found\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, MAX_TB_NAME_SIZE); @@ -3641,8 +3641,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table"); // yes, no, null if (autoCreateTbl - && autoCreateTbl->type == cJSON_String - && autoCreateTbl->valuestring != NULL) { + && autoCreateTbl->type == cJSON_String + && autoCreateTbl->valuestring != NULL) { if (0 == strncasecmp(autoCreateTbl->valuestring, "yes", 3)) { g_Dbs.db[i].superTbls[j].autoCreateTable = AUTO_CREATE_SUBTBL; } else if (0 == strncasecmp(autoCreateTbl->valuestring, "no", 2)) { @@ -3671,13 +3671,13 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no if (childTblExists - && childTblExists->type == cJSON_String - && childTblExists->valuestring != NULL) { + && childTblExists->type == cJSON_String + && childTblExists->valuestring != NULL) { if ((0 == strncasecmp(childTblExists->valuestring, "yes", 3)) && (g_Dbs.db[i].drop == false)) { g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS; } else if ((0 == strncasecmp(childTblExists->valuestring, "no", 2) - || (g_Dbs.db[i].drop == true))) { + || (g_Dbs.db[i].drop == true))) { g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; } else { g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; @@ -3686,36 +3686,36 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; } else { errorPrint("%s() LN%d, failed to read json, child_table_exists not found\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count"); if (!count || count->type != cJSON_Number || 0 >= count->valueint) { errorPrint("%s() LN%d, failed to read json, childtable_count not found\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].childTblCount = count->valueint; cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source"); if (dataSource && dataSource->type == cJSON_String - && dataSource->valuestring != NULL) { + && dataSource->valuestring != NULL) { tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, - dataSource->valuestring, MAX_DB_NAME_SIZE); + dataSource->valuestring, MAX_DB_NAME_SIZE); } else if (!dataSource) { tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE); } else { errorPrint("%s() LN%d, failed to read json, data_source not found\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } cJSON *insertMode = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , restful if (insertMode && insertMode->type == cJSON_String - && insertMode->valuestring != NULL) { + && insertMode->valuestring != NULL) { tstrncpy(g_Dbs.db[i].superTbls[j].insertMode, - insertMode->valuestring, MAX_DB_NAME_SIZE); + insertMode->valuestring, MAX_DB_NAME_SIZE); } else if (!insertMode) { tstrncpy(g_Dbs.db[i].superTbls[j].insertMode, "taosc", MAX_DB_NAME_SIZE); } else { @@ -3727,8 +3727,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if ((childTbl_limit) && (g_Dbs.db[i].drop != true) && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { if (childTbl_limit->type != cJSON_Number) { - printf("ERROR: failed to read json, childtable_limit\n"); - goto PARSE_OVER; + printf("ERROR: failed to read json, childtable_limit\n"); + goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint; } else { @@ -3739,8 +3739,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if ((childTbl_offset) && (g_Dbs.db[i].drop != true) && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { if (childTbl_offset->type != cJSON_Number || 0 > childTbl_offset->valueint) { - printf("ERROR: failed to read json, childtable_offset\n"); - goto PARSE_OVER; + printf("ERROR: failed to read json, childtable_offset\n"); + goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].childTblOffset = childTbl_offset->valueint; } else { @@ -3750,10 +3750,10 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp"); if (ts && ts->type == cJSON_String && ts->valuestring != NULL) { tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, - ts->valuestring, MAX_DB_NAME_SIZE); + ts->valuestring, MAX_DB_NAME_SIZE); } else if (!ts) { tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, - "now", MAX_DB_NAME_SIZE); + "now", MAX_DB_NAME_SIZE); } else { printf("ERROR: failed to read json, start_timestamp not found\n"); goto PARSE_OVER; @@ -3771,9 +3771,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON *sampleFormat = cJSON_GetObjectItem(stbInfo, "sample_format"); if (sampleFormat && sampleFormat->type - == cJSON_String && sampleFormat->valuestring != NULL) { + == cJSON_String && sampleFormat->valuestring != NULL) { tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, - sampleFormat->valuestring, MAX_DB_NAME_SIZE); + sampleFormat->valuestring, MAX_DB_NAME_SIZE); } else if (!sampleFormat) { tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", MAX_DB_NAME_SIZE); } else { @@ -3785,7 +3785,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (sampleFile && sampleFile->type == cJSON_String && sampleFile->valuestring != NULL) { tstrncpy(g_Dbs.db[i].superTbls[j].sampleFile, - sampleFile->valuestring, MAX_FILE_NAME_LEN); + sampleFile->valuestring, MAX_FILE_NAME_LEN); } else if (!sampleFile) { memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, MAX_FILE_NAME_LEN); } else { @@ -3796,7 +3796,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON *tagsFile = cJSON_GetObjectItem(stbInfo, "tags_file"); if (tagsFile && tagsFile->type == cJSON_String && tagsFile->valuestring != NULL) { tstrncpy(g_Dbs.db[i].superTbls[j].tagsFile, - tagsFile->valuestring, MAX_FILE_NAME_LEN); + tagsFile->valuestring, MAX_FILE_NAME_LEN); if (0 == g_Dbs.db[i].superTbls[j].tagsFile[0]) { g_Dbs.db[i].superTbls[j].tagSource = 0; } else { @@ -3823,7 +3823,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len; } else { errorPrint("%s() LN%d, failed to read json, maxSqlLen input mistake\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } /* @@ -3850,9 +3850,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { // rows per table need be less than insert batch if (g_Dbs.db[i].superTbls[j].interlaceRows > g_args.num_of_RPR) { printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %d > num_of_records_per_req %d\n\n", - i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR); + i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR); printf(" interlace rows value will be set to num_of_records_per_req %d\n\n", - g_args.num_of_RPR); + g_args.num_of_RPR); printf(" press Enter key to continue or Ctrl-C to stop."); (void)getchar(); g_Dbs.db[i].superTbls[j].interlaceRows = g_args.num_of_RPR; @@ -3861,8 +3861,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_Dbs.db[i].superTbls[j].interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { errorPrint( - "%s() LN%d, failed to read json, interlace rows input mistake\n", - __func__, __LINE__); + "%s() LN%d, failed to read json, interlace rows input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3899,7 +3899,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; } else { errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } @@ -3908,16 +3908,16 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint; } else if (!insertInterval) { verbosePrint("%s() LN%d: stable insert interval be overrided by global %d.\n", - __func__, __LINE__, g_args.insert_interval); + __func__, __LINE__, g_args.insert_interval); g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval; } else { errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } int retVal = getColumnAndTagTypeFromInsertJsonFile( - stbInfo, &g_Dbs.db[i].superTbls[j]); + stbInfo, &g_Dbs.db[i].superTbls[j]); if (false == retVal) { goto PARSE_OVER; } @@ -3926,7 +3926,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { ret = true; - PARSE_OVER: +PARSE_OVER: return ret; } @@ -3971,7 +3971,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, if (answerPrompt && answerPrompt->type == cJSON_String - && answerPrompt->valuestring != NULL) { + && answerPrompt->valuestring != NULL) { if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { g_args.answer_yes = false; } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { @@ -3993,7 +3993,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_args.query_times = 1; } else { errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } @@ -4032,14 +4032,14 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, - "query_times"); + "query_times"); if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint; } else if (!specifiedQueryTimes) { g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times; } else { errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } @@ -4048,8 +4048,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint; if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { errorPrint("%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n", - __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount, - g_queryInfo.specifiedQueryInfo.concurrent); + __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount, + g_queryInfo.specifiedQueryInfo.concurrent); goto PARSE_OVER; } } else if (!concurrent) { @@ -4065,7 +4065,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.specifiedQueryInfo.mode = ASYNC_QUERY_MODE; } else { errorPrint("%s() LN%d, failed to read json, query mode input error\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } } else { @@ -4097,8 +4097,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* keepProgress = cJSON_GetObjectItem(specifiedQuery, "keepProgress"); if (keepProgress - && keepProgress->type == cJSON_String - && keepProgress->valuestring != NULL) { + && keepProgress->type == cJSON_String + && keepProgress->valuestring != NULL) { if (0 == strcmp("yes", keepProgress->valuestring)) { g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 1; } else if (0 == strcmp("no", keepProgress->valuestring)) { @@ -4117,13 +4117,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.specifiedQueryInfo.sqlCount = 0; } else if (superSqls->type != cJSON_Array) { errorPrint("%s() LN%d, failed to read json, super sqls not found\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } else { int superSqlSize = cJSON_GetArraySize(superSqls); if (superSqlSize > MAX_QUERY_SQL_COUNT) { errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n", - __func__, __LINE__, MAX_QUERY_SQL_COUNT); + __func__, __LINE__, MAX_QUERY_SQL_COUNT); goto PARSE_OVER; } @@ -4176,7 +4176,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.superQueryInfo.queryTimes = g_args.query_times; } else { errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } @@ -4198,10 +4198,10 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (stblname && stblname->type == cJSON_String && stblname->valuestring != NULL) { tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, - MAX_TB_NAME_SIZE); + MAX_TB_NAME_SIZE); } else { errorPrint("%s() LN%d, failed to read json, super table name input error\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } @@ -4214,7 +4214,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.superQueryInfo.mode = ASYNC_QUERY_MODE; } else { errorPrint("%s() LN%d, failed to read json, query mode input error\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } } else { @@ -4247,8 +4247,8 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* subkeepProgress = cJSON_GetObjectItem(superQuery, "keepProgress"); if (subkeepProgress && - subkeepProgress->type == cJSON_String - && subkeepProgress->valuestring != NULL) { + subkeepProgress->type == cJSON_String + && subkeepProgress->valuestring != NULL) { if (0 == strcmp("yes", subkeepProgress->valuestring)) { g_queryInfo.superQueryInfo.subscribeKeepProgress = 1; } else if (0 == strcmp("no", subkeepProgress->valuestring)) { @@ -4267,13 +4267,13 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.superQueryInfo.sqlCount = 0; } else if (subsqls->type != cJSON_Array) { errorPrint("%s() LN%d: failed to read json, super sqls not found\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } else { int superSqlSize = cJSON_GetArraySize(subsqls); if (superSqlSize > MAX_QUERY_SQL_COUNT) { errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n", - __func__, __LINE__, MAX_QUERY_SQL_COUNT); + __func__, __LINE__, MAX_QUERY_SQL_COUNT); goto PARSE_OVER; } @@ -4286,22 +4286,22 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { errorPrint("%s() LN%d, failed to read json, sql not found\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, - MAX_QUERY_SQL_LENGTH); + MAX_QUERY_SQL_LENGTH); cJSON *result = cJSON_GetObjectItem(sql, "result"); if (result != NULL && result->type == cJSON_String && result->valuestring != NULL){ tstrncpy(g_queryInfo.superQueryInfo.result[j], - result->valuestring, MAX_FILE_NAME_LEN); + result->valuestring, MAX_FILE_NAME_LEN); } else if (NULL == result) { memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); } else { errorPrint("%s() LN%d, failed to read json, sub query result file not found\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } } @@ -4310,12 +4310,12 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { ret = true; - PARSE_OVER: +PARSE_OVER: return ret; } static bool getInfoFromJsonFile(char* file) { - debugPrint("%s %d %s\n", __func__, __LINE__, file); + debugPrint("%s %d %s\n", __func__, __LINE__, file); FILE *fp = fopen(file, "r"); if (!fp) { @@ -4363,15 +4363,15 @@ static bool getInfoFromJsonFile(char* file) { if (INSERT_TEST == g_args.test_mode) { ret = getMetaFromInsertJsonFile(root); } else if ((QUERY_TEST == g_args.test_mode) - || (SUBSCRIBE_TEST == g_args.test_mode)) { + || (SUBSCRIBE_TEST == g_args.test_mode)) { ret = getMetaFromQueryJsonFile(root); } else { errorPrint("%s() LN%d, input json file type error! please input correct file type: insert or query or subscribe\n", - __func__, __LINE__); + __func__, __LINE__); goto PARSE_OVER; } - PARSE_OVER: +PARSE_OVER: free(content); cJSON_Delete(root); fclose(fp); @@ -4413,7 +4413,7 @@ static void postFreeResource() { } static int getRowDataFromSample(char* dataBuf, int maxLen, int64_t timestamp, - SSuperTable* superTblInfo, int* sampleUsePos) { + SSuperTable* superTblInfo, int* sampleUsePos) { if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) { /* int ret = readSampleFromCsvFileToMem(superTblInfo); if (0 != ret) { @@ -4428,9 +4428,9 @@ static int getRowDataFromSample(char* dataBuf, int maxLen, int64_t timestamp, int dataLen = 0; dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, - "(%" PRId64 ", ", timestamp); + "(%" PRId64 ", ", timestamp); dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, - "%s", superTblInfo->sampleDataBuf + superTblInfo->lenOfOneRow * (*sampleUsePos)); + "%s", superTblInfo->sampleDataBuf + superTblInfo->lenOfOneRow * (*sampleUsePos)); dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); (*sampleUsePos)++; @@ -4447,10 +4447,10 @@ static int generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo for (int i = 0; i < stbInfo->columnCount; i++) { if ((0 == strncasecmp(stbInfo->columns[i].dataType, "binary", 6)) - || (0 == strncasecmp(stbInfo->columns[i].dataType, "nchar", 5))) { + || (0 == strncasecmp(stbInfo->columns[i].dataType, "nchar", 5))) { if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { errorPrint( "binary or nchar length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); + (uint32_t)TSDB_MAX_BINARY_LEN); return -1; } @@ -4463,37 +4463,37 @@ static int generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\', ", buf); tmfree(buf); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "int", 3)) { + "int", 3)) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%d, ", rand_int()); + "%d, ", rand_int()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "bigint", 6)) { + "bigint", 6)) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%"PRId64", ", rand_bigint()); + "%"PRId64", ", rand_bigint()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "float", 5)) { + "float", 5)) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%f, ", rand_float()); + "%f, ", rand_float()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "double", 6)) { + "double", 6)) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%f, ", rand_double()); + "%f, ", rand_double()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "smallint", 8)) { + "smallint", 8)) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%d, ", rand_smallint()); + "%d, ", rand_smallint()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "tinyint", strlen("tinyint"))) { + "tinyint", strlen("tinyint"))) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%d, ", rand_tinyint()); + "%d, ", rand_tinyint()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "bool", strlen("bool"))) { + "bool", strlen("bool"))) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%d, ", rand_bool()); + "%d, ", rand_bool()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "timestamp", strlen("timestamp"))) { + "timestamp", strlen("timestamp"))) { dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%"PRId64", ", rand_bigint()); + "%"PRId64", ", rand_bigint()); } else { errorPrint( "No support data type: %s\n", stbInfo->columns[i].dataType); return -1; @@ -4509,7 +4509,7 @@ static int generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo } static int32_t generateData(char *recBuf, char **data_type, - int num_of_cols, int64_t timestamp, int lenOfBinary) { + int num_of_cols, int64_t timestamp, int lenOfBinary) { memset(recBuf, 0, MAX_DATA_SIZE); char *pstr = recBuf; pstr += sprintf(pstr, "(%" PRId64, timestamp); @@ -4572,24 +4572,24 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { char* sampleDataBuf = NULL; sampleDataBuf = calloc( - superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); + superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); if (sampleDataBuf == NULL) { - errorPrint("%s() LN%d, Failed to calloc %d Bytes, reason:%s\n", - __func__, __LINE__, - superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, - strerror(errno)); - return -1; + errorPrint("%s() LN%d, Failed to calloc %d Bytes, reason:%s\n", + __func__, __LINE__, + superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, + strerror(errno)); + return -1; } superTblInfo->sampleDataBuf = sampleDataBuf; int ret = readSampleFromCsvFileToMem(superTblInfo); if (0 != ret) { - errorPrint("%s() LN%d, read sample from csv file failed.\n", - __func__, __LINE__); - tmfree(sampleDataBuf); - superTblInfo->sampleDataBuf = NULL; - return -1; + errorPrint("%s() LN%d, read sample from csv file failed.\n", + __func__, __LINE__); + tmfree(sampleDataBuf); + superTblInfo->sampleDataBuf = NULL; + return -1; } return 0; @@ -4601,7 +4601,7 @@ static int execInsert(threadInfo *pThreadInfo, char *buffer, int k) SSuperTable* superTblInfo = pThreadInfo->superTblInfo; verbosePrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, - __func__, __LINE__, buffer); + __func__, __LINE__, buffer); if (superTblInfo) { if (0 == strncasecmp(superTblInfo->insertMode, "taosc", strlen("taosc"))) { affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE, false); @@ -4609,7 +4609,7 @@ static int execInsert(threadInfo *pThreadInfo, char *buffer, int k) if (0 != postProceSql(g_Dbs.host, g_Dbs.port, buffer)) { affectedRows = -1; printf("========restful return fail, threadID[%d]\n", - pThreadInfo->threadID); + pThreadInfo->threadID); } else { affectedRows = k; } @@ -4626,29 +4626,29 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq) SSuperTable* superTblInfo = pThreadInfo->superTblInfo; if (superTblInfo) { if ((superTblInfo->childTblOffset >= 0) - && (superTblInfo->childTblLimit > 0)) { - snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", - superTblInfo->childTblName + - (tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); + && (superTblInfo->childTblLimit > 0)) { + snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", + superTblInfo->childTblName + + (tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); } else { - verbosePrint("[%d] %s() LN%d: from=%d count=%d seq=%d\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->start_table_from, - pThreadInfo->ntables, tableSeq); - snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", - superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); + verbosePrint("[%d] %s() LN%d: from=%d count=%d seq=%d\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->start_table_from, + pThreadInfo->ntables, tableSeq); + snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", + superTblInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN); } } else { snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s%d", - g_args.tb_prefix, tableSeq); + g_args.tb_prefix, tableSeq); } } static int generateDataTail( - SSuperTable* superTblInfo, - int batch, char* buffer, int remainderBufLen, int64_t insertRows, - int64_t startFrom, uint64_t startTime, int *pSamplePos, int *dataLen) { + SSuperTable* superTblInfo, + int batch, char* buffer, int remainderBufLen, int64_t insertRows, + int64_t startFrom, uint64_t startTime, int *pSamplePos, int *dataLen) { int len = 0; int ncols_per_record = 1; // count first col ts @@ -4657,8 +4657,8 @@ static int generateDataTail( if (superTblInfo == NULL) { int datatypeSeq = 0; while(g_args.datatype[datatypeSeq]) { - datatypeSeq ++; - ncols_per_record ++; + datatypeSeq ++; + ncols_per_record ++; } } @@ -4673,15 +4673,15 @@ static int generateDataTail( if (superTblInfo) { if (0 == strncasecmp(superTblInfo->dataSource, - "sample", strlen("sample"))) { - retLen = getRowDataFromSample( - data, - remainderBufLen, - startTime + superTblInfo->timeStampStep * k, - superTblInfo, - pSamplePos); + "sample", strlen("sample"))) { + retLen = getRowDataFromSample( + data, + remainderBufLen, + startTime + superTblInfo->timeStampStep * k, + superTblInfo, + pSamplePos); } else if (0 == strncasecmp(superTblInfo->dataSource, - "rand", strlen("rand"))) { + "rand", strlen("rand"))) { int randTail = superTblInfo->timeStampStep * k; if (superTblInfo->disorderRatio > 0) { @@ -4693,11 +4693,11 @@ static int generateDataTail( } uint64_t d = startTime - + randTail; + + randTail; retLen = generateRowData( - data, - d, - superTblInfo); + data, + d, + superTblInfo); } if (retLen > remainderBufLen) { @@ -4716,18 +4716,18 @@ static int generateDataTail( int randTail; if ((g_args.disorderRatio != 0) - && (rand_num < g_args.disorderRatio)) { + && (rand_num < g_args.disorderRatio)) { randTail = (DEFAULT_TIMESTAMP_STEP * k - + (taosRandom() % g_args.disorderRange + 1)) * (-1); + + (taosRandom() % g_args.disorderRange + 1)) * (-1); debugPrint("rand data generated, back %d\n", randTail); } else { randTail = DEFAULT_TIMESTAMP_STEP * k; } retLen = generateData(data, data_type, - ncols_per_record, - startTime + randTail, - lenOfBinary); + ncols_per_record, + startTime + randTail, + lenOfBinary); if (len > remainderBufLen) break; @@ -4739,7 +4739,7 @@ static int generateDataTail( } verbosePrint("%s() LN%d len=%d k=%d \nbuffer=%s\n", - __func__, __LINE__, len, k, buffer); + __func__, __LINE__, len, k, buffer); startFrom ++; @@ -4753,8 +4753,8 @@ static int generateDataTail( } static int generateSQLHead(char *tableName, int32_t tableSeq, - threadInfo* pThreadInfo, SSuperTable* superTblInfo, - char *buffer, int remainderBufLen) + threadInfo* pThreadInfo, SSuperTable* superTblInfo, + char *buffer, int remainderBufLen) { int len; @@ -4765,50 +4765,50 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { char* tagsValBuf = NULL; if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagVaulesForStb(superTblInfo, tableSeq); + tagsValBuf = generateTagVaulesForStb(superTblInfo, tableSeq); } else { - tagsValBuf = getTagValueFromTagSample( - superTblInfo, - tableSeq % superTblInfo->tagSampleCount); + tagsValBuf = getTagValueFromTagSample( + superTblInfo, + tableSeq % superTblInfo->tagSampleCount); } if (NULL == tagsValBuf) { errorPrint("%s() LN%d, tag buf failed to allocate memory\n", - __func__, __LINE__); + __func__, __LINE__); return -1; } len = snprintf( headBuf, - HEAD_BUFF_LEN, - "%s.%s using %s.%s tags %s values", - pThreadInfo->db_name, - tableName, - pThreadInfo->db_name, - superTblInfo->sTblName, - tagsValBuf); + HEAD_BUFF_LEN, + "%s.%s using %s.%s tags %s values", + pThreadInfo->db_name, + tableName, + pThreadInfo->db_name, + superTblInfo->sTblName, + tagsValBuf); tmfree(tagsValBuf); } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) { len = snprintf( headBuf, - HEAD_BUFF_LEN, - "%s.%s values", - pThreadInfo->db_name, - tableName); + HEAD_BUFF_LEN, + "%s.%s values", + pThreadInfo->db_name, + tableName); } else { len = snprintf( headBuf, - HEAD_BUFF_LEN, - "%s.%s values", - pThreadInfo->db_name, - tableName); + HEAD_BUFF_LEN, + "%s.%s values", + pThreadInfo->db_name, + tableName); } } else { - len = snprintf( - headBuf, - HEAD_BUFF_LEN, - "%s.%s values", - pThreadInfo->db_name, - tableName); + len = snprintf( + headBuf, + HEAD_BUFF_LEN, + "%s.%s values", + pThreadInfo->db_name, + tableName); } if (len > remainderBufLen) @@ -4820,26 +4820,26 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, } static int generateInterlaceDataBuffer( - char *tableName, int batchPerTbl, int i, int batchPerTblTimes, - int32_t tableSeq, - threadInfo *pThreadInfo, char *buffer, - int64_t insertRows, - int64_t startTime, - int *pRemainderBufLen) + char *tableName, int batchPerTbl, int i, int batchPerTblTimes, + int32_t tableSeq, + threadInfo *pThreadInfo, char *buffer, + int64_t insertRows, + int64_t startTime, + int *pRemainderBufLen) { assert(buffer); char *pstr = buffer; SSuperTable* superTblInfo = pThreadInfo->superTblInfo; int headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, - superTblInfo, pstr, *pRemainderBufLen); + superTblInfo, pstr, *pRemainderBufLen); if (headLen <= 0) { return 0; } // generate data buffer verbosePrint("[%d] %s() LN%d i=%d buffer:\n%s\n", - pThreadInfo->threadID, __func__, __LINE__, i, buffer); + pThreadInfo->threadID, __func__, __LINE__, i, buffer); pstr += headLen; *pRemainderBufLen -= headLen; @@ -4847,27 +4847,29 @@ static int generateInterlaceDataBuffer( int dataLen = 0; verbosePrint("[%d] %s() LN%d i=%d batchPerTblTimes=%d batchPerTbl = %d\n", - pThreadInfo->threadID, __func__, __LINE__, - i, batchPerTblTimes, batchPerTbl); + pThreadInfo->threadID, __func__, __LINE__, + i, batchPerTblTimes, batchPerTbl); if (superTblInfo) { if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { startTime = taosGetTimestamp(pThreadInfo->time_precision); } } else { - startTime = 1500000000000; + startTime = 1500000000000; } int k = generateDataTail( - superTblInfo, - batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0, - startTime, - &(pThreadInfo->samplePos), &dataLen); + superTblInfo, + batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0, + startTime, + &(pThreadInfo->samplePos), &dataLen); if (k == batchPerTbl) { pstr += dataLen; *pRemainderBufLen -= dataLen; } else { + debugPrint("%s() LN%d, generated data tail: %d, not equal batch per table: %d\n", + __func__, __LINE__, k, batchPerTbl); pstr -= headLen; pstr[0] = '\0'; k = 0; @@ -4877,12 +4879,12 @@ static int generateInterlaceDataBuffer( } static int generateProgressiveDataBuffer( - char *tableName, - int32_t tableSeq, - threadInfo *pThreadInfo, char *buffer, - int64_t insertRows, - int64_t startFrom, int64_t startTime, int *pSamplePos, - int *pRemainderBufLen) + char *tableName, + int32_t tableSeq, + threadInfo *pThreadInfo, char *buffer, + int64_t insertRows, + int64_t startFrom, int64_t startTime, int *pSamplePos, + int *pRemainderBufLen) { SSuperTable* superTblInfo = pThreadInfo->superTblInfo; @@ -4891,8 +4893,8 @@ static int generateProgressiveDataBuffer( if (superTblInfo == NULL) { int datatypeSeq = 0; while(g_args.datatype[datatypeSeq]) { - datatypeSeq ++; - ncols_per_record ++; + datatypeSeq ++; + ncols_per_record ++; } } @@ -4904,7 +4906,7 @@ static int generateProgressiveDataBuffer( memset(buffer, 0, *pRemainderBufLen); int headLen = generateSQLHead(tableName, tableSeq, pThreadInfo, superTblInfo, - buffer, *pRemainderBufLen); + buffer, *pRemainderBufLen); if (headLen <= 0) { return 0; @@ -4914,21 +4916,35 @@ static int generateProgressiveDataBuffer( int dataLen; k = generateDataTail(superTblInfo, - g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, startFrom, - startTime, - pSamplePos, &dataLen); + g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, startFrom, + startTime, + pSamplePos, &dataLen); return k; } static void* syncWriteInterlace(threadInfo *pThreadInfo) { debugPrint("[%d] %s() LN%d: ### interlace write\n", - pThreadInfo->threadID, __func__, __LINE__); + pThreadInfo->threadID, __func__, __LINE__); + + int64_t insertRows; + int interlaceRows; SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; - int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows; + if (superTblInfo) { + insertRows = superTblInfo->insertRows; + + if ((superTblInfo->interlaceRows == 0) + && (g_args.interlace_rows > 0)) { + interlaceRows = g_args.interlace_rows; + } else { + interlaceRows = superTblInfo->interlaceRows; + } + } else { + insertRows = g_args.num_of_DPT; + interlaceRows = g_args.interlace_rows; + } if (interlaceRows > insertRows) interlaceRows = insertRows; @@ -4951,7 +4967,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { char* buffer = calloc(maxSqlLen, 1); if (NULL == buffer) { errorPrint( "%s() LN%d, Failed to alloc %d Bytes, reason:%s\n", - __func__, __LINE__, maxSqlLen, strerror(errno)); + __func__, __LINE__, maxSqlLen, strerror(errno)); return NULL; } @@ -4974,8 +4990,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int tableSeq = pThreadInfo->start_table_from; debugPrint("[%d] %s() LN%d: start_table_from=%d ntables=%d insertRows=%"PRId64"\n", - pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from, - pThreadInfo->ntables, insertRows); + pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->start_table_from, + pThreadInfo->ntables, insertRows); int64_t startTime = pThreadInfo->start_time; @@ -5000,8 +5016,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { if ((flagSleep) && (insert_interval)) { - st = taosGetTimestampMs(); - flagSleep = false; + st = taosGetTimestampMs(); + flagSleep = false; } // generate data memset(buffer, 0, maxSqlLen); @@ -5019,23 +5035,23 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { getTableName(tableName, pThreadInfo, tableSeq); if (0 == strlen(tableName)) { errorPrint("[%d] %s() LN%d, getTableName return null\n", - pThreadInfo->threadID, __func__, __LINE__); + pThreadInfo->threadID, __func__, __LINE__); free(buffer); return NULL; } int oldRemainderLen = remainderBufLen; int generated = generateInterlaceDataBuffer( - tableName, batchPerTbl, i, batchPerTblTimes, - tableSeq, - pThreadInfo, pstr, - insertRows, - startTime, - &remainderBufLen); + tableName, batchPerTbl, i, batchPerTblTimes, + tableSeq, + pThreadInfo, pstr, + insertRows, + startTime, + &remainderBufLen); if (generated < 0) { debugPrint("[%d] %s() LN%d, generated data is %d\n", - pThreadInfo->threadID, __func__, __LINE__, generated); + pThreadInfo->threadID, __func__, __LINE__, generated); goto free_and_statistics_interlace; } else if (generated == 0) { break; @@ -5047,44 +5063,44 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { // startTime += batchPerTbl * superTblInfo->timeStampStep; pThreadInfo->totalInsertRows += batchPerTbl; verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", - pThreadInfo->threadID, __func__, __LINE__, - batchPerTbl, recOfBatch); + pThreadInfo->threadID, __func__, __LINE__, + batchPerTbl, recOfBatch); if (insertMode == INTERLACE_INSERT_MODE) { - if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { - // turn to first table - tableSeq = pThreadInfo->start_table_from; - generatedRecPerTbl += batchPerTbl; + if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { + // turn to first table + tableSeq = pThreadInfo->start_table_from; + generatedRecPerTbl += batchPerTbl; - startTime = pThreadInfo->start_time - + generatedRecPerTbl * nTimeStampStep; + startTime = pThreadInfo->start_time + + generatedRecPerTbl * nTimeStampStep; - flagSleep = true; - if (generatedRecPerTbl >= insertRows) - break; + flagSleep = true; + if (generatedRecPerTbl >= insertRows) + break; - if (pThreadInfo->ntables * batchPerTbl < g_args.num_of_RPR) - break; - } + int remainRows = insertRows - generatedRecPerTbl; + if ((remainRows > 0) && (batchPerTbl > remainRows)) + batchPerTbl = remainRows; + + if (pThreadInfo->ntables * batchPerTbl < g_args.num_of_RPR) + break; + } } - int remainRows = insertRows - generatedRecPerTbl; - if ((remainRows > 0) && (batchPerTbl > remainRows)) - batchPerTbl = remainRows; - verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%d insertRows=%"PRId64"\n", - pThreadInfo->threadID, __func__, __LINE__, - generatedRecPerTbl, insertRows); + pThreadInfo->threadID, __func__, __LINE__, + generatedRecPerTbl, insertRows); if ((g_args.num_of_RPR - recOfBatch) < batchPerTbl) break; } verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRId64"\n", - pThreadInfo->threadID, __func__, __LINE__, recOfBatch, - pThreadInfo->totalInsertRows); + pThreadInfo->threadID, __func__, __LINE__, recOfBatch, + pThreadInfo->totalInsertRows); verbosePrint("[%d] %s() LN%d, buffer=%s\n", - pThreadInfo->threadID, __func__, __LINE__, buffer); + pThreadInfo->threadID, __func__, __LINE__, buffer); startTs = taosGetTimestampMs(); @@ -5093,7 +5109,7 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { endTs = taosGetTimestampMs(); int64_t delay = endTs - startTs; performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n", - __func__, __LINE__, delay); + __func__, __LINE__, delay); if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; @@ -5101,12 +5117,12 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->totalDelay += delay; verbosePrint("[%d] %s() LN%d affectedRows=%d\n", pThreadInfo->threadID, - __func__, __LINE__, affectedRows); + __func__, __LINE__, affectedRows); if ((affectedRows < 0) || (recOfBatch != affectedRows)) { - errorPrint("[%d] %s() LN%d execInsert insert %d, affected rows: %d\n%s\n", - pThreadInfo->threadID, __func__, __LINE__, - recOfBatch, affectedRows, buffer); - goto free_and_statistics_interlace; + errorPrint("[%d] %s() LN%d execInsert insert %d, affected rows: %d\n%s\n", + pThreadInfo->threadID, __func__, __LINE__, + recOfBatch, affectedRows, buffer); + goto free_and_statistics_interlace; } pThreadInfo->totalAffectedRows += affectedRows; @@ -5114,9 +5130,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int64_t currentPrintTime = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); lastPrintTime = currentPrintTime; } @@ -5126,20 +5142,20 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { if (insert_interval > (et - st) ) { int sleepTime = insert_interval - (et -st); performancePrint("%s() LN%d sleep: %d ms for insert interval\n", - __func__, __LINE__, sleepTime); + __func__, __LINE__, sleepTime); taosMsleep(sleepTime); // ms sleepTimeTotal += insert_interval; } } } - free_and_statistics_interlace: +free_and_statistics_interlace: tmfree(buffer); printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); return NULL; } @@ -5160,8 +5176,8 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { char* buffer = calloc(maxSqlLen, 1); if (NULL == buffer) { errorPrint( "Failed to alloc %d Bytes, reason:%s\n", - maxSqlLen, - strerror(errno)); + maxSqlLen, + strerror(errno)); return NULL; } @@ -5183,25 +5199,25 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->samplePos = 0; for (uint32_t tableSeq = - pThreadInfo->start_table_from; tableSeq <= pThreadInfo->end_table_to; - tableSeq ++) { + pThreadInfo->start_table_from; tableSeq <= pThreadInfo->end_table_to; + tableSeq ++) { int64_t start_time = pThreadInfo->start_time; int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; verbosePrint("%s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, insertRows); for (int64_t i = 0; i < insertRows;) { - /* - if (insert_interval) { - st = taosGetTimestampMs(); - } - */ + /* + if (insert_interval) { + st = taosGetTimestampMs(); + } + */ char tableName[TSDB_TABLE_NAME_LEN]; getTableName(tableName, pThreadInfo, tableSeq); verbosePrint("%s() LN%d: tid=%d seq=%d tableName=%s\n", - __func__, __LINE__, - pThreadInfo->threadID, tableSeq, tableName); + __func__, __LINE__, + pThreadInfo->threadID, tableSeq, tableName); int remainderBufLen = maxSqlLen; char *pstr = buffer; @@ -5213,10 +5229,10 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { remainderBufLen -= len; int generated = generateProgressiveDataBuffer( - tableName, tableSeq, pThreadInfo, pstr, insertRows, - i, start_time, - &(pThreadInfo->samplePos), - &remainderBufLen); + tableName, tableSeq, pThreadInfo, pstr, insertRows, + i, start_time, + &(pThreadInfo->samplePos), + &remainderBufLen); if (generated > 0) i += generated; else @@ -5232,7 +5248,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { endTs = taosGetTimestampMs(); int64_t delay = endTs - startTs; performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n", - __func__, __LINE__, delay); + __func__, __LINE__, delay); if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; @@ -5247,9 +5263,9 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { int64_t currentPrintTime = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); lastPrintTime = currentPrintTime; } @@ -5271,21 +5287,21 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { if (g_args.verbose_print) { if ((tableSeq == pThreadInfo->ntables - 1) && superTblInfo && - (0 == strncasecmp( - superTblInfo->dataSource, "sample", strlen("sample")))) { - verbosePrint("%s() LN%d samplePos=%d\n", - __func__, __LINE__, pThreadInfo->samplePos); + (0 == strncasecmp( + superTblInfo->dataSource, "sample", strlen("sample")))) { + verbosePrint("%s() LN%d samplePos=%d\n", + __func__, __LINE__, pThreadInfo->samplePos); } } } // tableSeq - free_and_statistics_2: +free_and_statistics_2: tmfree(buffer); printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); return NULL; } @@ -5294,7 +5310,18 @@ static void* syncWrite(void *sarg) { threadInfo *pThreadInfo = (threadInfo *)sarg; SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows; + int interlaceRows; + + if (superTblInfo) { + if ((superTblInfo->interlaceRows == 0) + && (g_args.interlace_rows > 0)) { + interlaceRows = g_args.interlace_rows; + } else { + interlaceRows = superTblInfo->interlaceRows; + } + } else { + interlaceRows = g_args.interlace_rows; + } if (interlaceRows > 0) { // interlace mode @@ -5322,7 +5349,7 @@ static void callBack(void *param, TAOS_RES *res, int code) { char data[MAX_DATA_SIZE]; char *pstr = buffer; pstr += sprintf(pstr, "insert into %s.%s%d values", pThreadInfo->db_name, pThreadInfo->tb_prefix, - pThreadInfo->start_table_from); + pThreadInfo->start_table_from); // if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { if (pThreadInfo->counter >= g_args.num_of_RPR) { pThreadInfo->start_table_from++; @@ -5338,7 +5365,7 @@ static void callBack(void *param, TAOS_RES *res, int code) { for (int i = 0; i < g_args.num_of_RPR; i++) { int rand_num = taosRandom() % 100; if (0 != pThreadInfo->superTblInfo->disorderRatio - && rand_num < pThreadInfo->superTblInfo->disorderRatio) { + && rand_num < pThreadInfo->superTblInfo->disorderRatio) { int64_t d = pThreadInfo->lastTs - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1); generateRowData(data, d, pThreadInfo->superTblInfo); } else { @@ -5382,7 +5409,7 @@ static void *asyncWrite(void *sarg) { } static void startMultiThreadInsertData(int threads, char* db_name, - char* precision,SSuperTable* superTblInfo) { + char* precision,SSuperTable* superTblInfo) { pthread_t *pids = malloc(threads * sizeof(pthread_t)); assert(pids != NULL); @@ -5417,48 +5444,48 @@ static void startMultiThreadInsertData(int threads, char* db_name, int64_t start_time; if (superTblInfo) { if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { - start_time = taosGetTimestamp(timePrec); + start_time = taosGetTimestamp(timePrec); } else { if (TSDB_CODE_SUCCESS != taosParseTime( - superTblInfo->startTimestamp, - &start_time, - strlen(superTblInfo->startTimestamp), - timePrec, 0)) { - ERROR_EXIT("failed to parse time!\n"); + superTblInfo->startTimestamp, + &start_time, + strlen(superTblInfo->startTimestamp), + timePrec, 0)) { + ERROR_EXIT("failed to parse time!\n"); } } } else { - start_time = 1500000000000; + start_time = 1500000000000; } int64_t start = taosGetTimestampMs(); // read sample data from file first if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, - "sample", strlen("sample")))) { + "sample", strlen("sample")))) { if (0 != prepareSampleDataForSTable(superTblInfo)) { errorPrint("%s() LN%d, prepare sample data for stable failed!\n", - __func__, __LINE__); + __func__, __LINE__); exit(-1); } } // read sample data from file first if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, - "sample", strlen("sample")))) { + "sample", strlen("sample")))) { if (0 != prepareSampleDataForSTable(superTblInfo)) { errorPrint("%s() LN%d, prepare sample data for stable failed!\n", - __func__, __LINE__); + __func__, __LINE__); exit(-1); } } TAOS* taos = taos_connect( - g_Dbs.host, g_Dbs.user, - g_Dbs.password, db_name, g_Dbs.port); + g_Dbs.host, g_Dbs.user, + g_Dbs.password, db_name, g_Dbs.port); if (NULL == taos) { errorPrint("%s() LN%d, connect to server fail , reason: %s\n", - __func__, __LINE__, taos_errstr(NULL)); + __func__, __LINE__, taos_errstr(NULL)); exit(-1); } @@ -5469,15 +5496,15 @@ static void startMultiThreadInsertData(int threads, char* db_name, int limit, offset; if ((NULL != g_args.sqlFile) && (superTblInfo->childTblExists == TBL_NO_EXISTS) && - ((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit >= 0))) { + ((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit >= 0))) { printf("WARNING: offset and limit will not be used since the child tables not exists!\n"); } if ((superTblInfo->childTblExists == TBL_ALREADY_EXISTS) - && (superTblInfo->childTblOffset >= 0)) { + && (superTblInfo->childTblOffset >= 0)) { if ((superTblInfo->childTblLimit < 0) || ((superTblInfo->childTblOffset + superTblInfo->childTblLimit) - > (superTblInfo->childTblCount))) { + > (superTblInfo->childTblCount))) { superTblInfo->childTblLimit = superTblInfo->childTblCount - superTblInfo->childTblOffset; } @@ -5503,7 +5530,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, } if ((superTblInfo->childTblExists != TBL_NO_EXISTS) - && (0 == superTblInfo->childTblLimit)) { + && (0 == superTblInfo->childTblLimit)) { printf("WARNING: specified limit = 0, which cannot find table name to insert or query! \n"); if (!g_args.answer_yes) { printf(" Press enter key to continue or Ctrl-C to stop\n\n"); @@ -5512,7 +5539,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, } superTblInfo->childTblName = (char*)calloc(1, - limit * TSDB_TABLE_NAME_LEN); + limit * TSDB_TABLE_NAME_LEN); if (superTblInfo->childTblName == NULL) { errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); taos_close(taos); @@ -5555,15 +5582,15 @@ static void startMultiThreadInsertData(int threads, char* db_name, t_info->minDelay = INT16_MAX; if ((NULL == superTblInfo) || - (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5))) { + (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5))) { //t_info->taos = taos; t_info->taos = taos_connect( - g_Dbs.host, g_Dbs.user, - g_Dbs.password, db_name, g_Dbs.port); + g_Dbs.host, g_Dbs.user, + g_Dbs.password, db_name, g_Dbs.port); if (NULL == t_info->taos) { errorPrint( - "connect to server fail from insert sub thread, reason: %s\n", - taos_errstr(NULL)); + "connect to server fail from insert sub thread, reason: %s\n", + taos_errstr(NULL)); exit(-1); } } else { @@ -5571,7 +5598,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, } if ((NULL == superTblInfo) - || (0 == superTblInfo->multiThreadWriteOneTbl)) { + || (0 == superTblInfo->multiThreadWriteOneTbl)) { t_info->start_table_from = startFrom; t_info->ntables = iend_table_to = i < b ? startFrom + a : startFrom + a - 1; @@ -5607,15 +5634,15 @@ static void startMultiThreadInsertData(int threads, char* db_name, taos_close(t_info->taos); debugPrint("%s() LN%d, [%d] totalInsert=%"PRId64" totalAffected=%"PRId64"\n", - __func__, __LINE__, - t_info->threadID, t_info->totalInsertRows, - t_info->totalAffectedRows); + __func__, __LINE__, + t_info->threadID, t_info->totalInsertRows, + t_info->totalAffectedRows); if (superTblInfo) { - superTblInfo->totalAffectedRows += t_info->totalAffectedRows; - superTblInfo->totalInsertRows += t_info->totalInsertRows; + superTblInfo->totalAffectedRows += t_info->totalAffectedRows; + superTblInfo->totalInsertRows += t_info->totalInsertRows; } else { - g_args.totalAffectedRows += t_info->totalAffectedRows; - g_args.totalInsertRows += t_info->totalInsertRows; + g_args.totalAffectedRows += t_info->totalAffectedRows; + g_args.totalInsertRows += t_info->totalInsertRows; } totalDelay += t_info->totalDelay; @@ -5633,32 +5660,32 @@ static void startMultiThreadInsertData(int threads, char* db_name, if (superTblInfo) { printf("Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n", - t / 1000.0, superTblInfo->totalInsertRows, - superTblInfo->totalAffectedRows, - threads, db_name, superTblInfo->sTblName, - (double)superTblInfo->totalInsertRows / (t / 1000.0)); + t / 1000.0, superTblInfo->totalInsertRows, + superTblInfo->totalAffectedRows, + threads, db_name, superTblInfo->sTblName, + (double)superTblInfo->totalInsertRows / (t / 1000.0)); fprintf(g_fpOfInsertResult, - "Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n", - t / 1000.0, superTblInfo->totalInsertRows, - superTblInfo->totalAffectedRows, - threads, db_name, superTblInfo->sTblName, - (double)superTblInfo->totalInsertRows / (t / 1000.0)); + "Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s.%s. %2.f records/second\n\n", + t / 1000.0, superTblInfo->totalInsertRows, + superTblInfo->totalAffectedRows, + threads, db_name, superTblInfo->sTblName, + (double)superTblInfo->totalInsertRows / (t / 1000.0)); } else { printf("Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s %2.f records/second\n\n", - t / 1000.0, g_args.totalInsertRows, - g_args.totalAffectedRows, - threads, db_name, - (double)g_args.totalInsertRows / (t / 1000.0)); + t / 1000.0, g_args.totalInsertRows, + g_args.totalAffectedRows, + threads, db_name, + (double)g_args.totalInsertRows / (t / 1000.0)); fprintf(g_fpOfInsertResult, - "Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s %2.f records/second\n\n", - t * 1000.0, g_args.totalInsertRows, - g_args.totalAffectedRows, - threads, db_name, - (double)g_args.totalInsertRows / (t / 1000.0)); + "Spent %.2f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s) into %s %2.f records/second\n\n", + t * 1000.0, g_args.totalInsertRows, + g_args.totalAffectedRows, + threads, db_name, + (double)g_args.totalInsertRows / (t / 1000.0)); } printf("insert delay, avg: %10.2fms, max: %"PRId64"ms, min: %"PRId64"ms\n\n", - avgDelay, maxDelay, minDelay); + avgDelay, maxDelay, minDelay); fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %"PRId64"ms, min: %"PRId64"ms\n\n", avgDelay, maxDelay, minDelay); @@ -5681,12 +5708,12 @@ static void *readTable(void *sarg) { return NULL; } - int num_of_DPT; + int num_of_DPT; /* if (rinfo->superTblInfo) { num_of_DPT = rinfo->superTblInfo->insertRows; // nrecords_per_table; } else { */ - num_of_DPT = g_args.num_of_DPT; + num_of_DPT = g_args.num_of_DPT; // } int num_of_tables = rinfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1; @@ -5858,7 +5885,7 @@ static int insertTestProcess() { if (g_totalChildTables > 0) { printf("Spent %.4f seconds to create %d tables with %d thread(s)\n\n", - (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl); + (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl); fprintf(g_fpOfInsertResult, "Spent %.4f seconds to create %d tables with %d thread(s)\n\n", (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl); @@ -5876,15 +5903,15 @@ static int insertTestProcess() { if (superTblInfo && (superTblInfo->insertRows > 0)) { startMultiThreadInsertData( - g_Dbs.threadCount, - g_Dbs.db[i].dbName, - g_Dbs.db[i].dbCfg.precision, - superTblInfo); + g_Dbs.threadCount, + g_Dbs.db[i].dbName, + g_Dbs.db[i].dbCfg.precision, + superTblInfo); } } } } else { - startMultiThreadInsertData( + startMultiThreadInsertData( g_Dbs.threadCount, g_Dbs.db[i].dbName, g_Dbs.db[i].dbCfg.precision, @@ -5912,13 +5939,13 @@ static void *specifiedTableQuery(void *sarg) { if (pThreadInfo->taos == NULL) { TAOS * taos = NULL; taos = taos_connect(g_queryInfo.host, - g_queryInfo.user, - g_queryInfo.password, - NULL, - g_queryInfo.port); + g_queryInfo.user, + g_queryInfo.password, + NULL, + g_queryInfo.port); if (taos == NULL) { errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - pThreadInfo->threadID, taos_errstr(NULL)); + pThreadInfo->threadID, taos_errstr(NULL)); return NULL; } else { pThreadInfo->taos = taos; @@ -5945,7 +5972,7 @@ static void *specifiedTableQuery(void *sarg) { while(queryTimes --) { if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < - (int64_t)g_queryInfo.specifiedQueryInfo.queryInterval) { + (int64_t)g_queryInfo.specifiedQueryInfo.queryInterval) { taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval - (et - st)); // ms } @@ -5960,15 +5987,15 @@ static void *specifiedTableQuery(void *sarg) { pThreadInfo->threadID); } selectAndGetResult(pThreadInfo->taos, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], tmpFile); + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], tmpFile); int64_t t2 = taosGetTimestampMs(); printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %10.3f s\n", - taosGetSelfPthreadId(), (t2 - t1)/1000.0); + taosGetSelfPthreadId(), (t2 - t1)/1000.0); } else { int64_t t1 = taosGetTimestampMs(); int retCode = postProceSql(g_queryInfo.host, - g_queryInfo.port, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]); + g_queryInfo.port, + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]); if (0 != retCode) { printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID); return NULL; @@ -5981,7 +6008,6 @@ static void *specifiedTableQuery(void *sarg) { totalQueried ++; g_queryInfo.specifiedQueryInfo.totalQueried ++; - et = taosGetTimestampMs(); int64_t currentPrintTime = taosGetTimestampMs(); @@ -5993,8 +6019,8 @@ static void *specifiedTableQuery(void *sarg) { pThreadInfo->threadID, totalQueried, (double)(totalQueried/((endTs-startTs)/1000.0))); + lastPrintTime = currentPrintTime; } - lastPrintTime = currentPrintTime; } return NULL; } @@ -6034,7 +6060,7 @@ static void *superTableQuery(void *sarg) { g_queryInfo.port); if (taos == NULL) { errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - pThreadInfo->threadID, taos_errstr(NULL)); + pThreadInfo->threadID, taos_errstr(NULL)); return NULL; } else { pThreadInfo->taos = taos; @@ -6079,8 +6105,8 @@ static void *superTableQuery(void *sarg) { pThreadInfo->threadID, totalQueried, (double)(totalQueried/((endTs-startTs)/1000.0))); + lastPrintTime = currentPrintTime; } - lastPrintTime = currentPrintTime; } } et = taosGetTimestampMs(); From 05ac0ee64d01338d868d2048bed207632abfc9b8 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 26 Apr 2021 16:09:59 +0800 Subject: [PATCH 04/14] [TM-210] : comment out "having" related doc. --- documentation20/cn/12.taos-sql/docs.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index c754eae088..eeeeb3c8c4 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -407,7 +407,7 @@ SELECT select_expr [, select_expr ...] [INTERVAL (interval_val [, interval_offset])] [SLIDING sliding_val] [FILL fill_val] - [GROUP BY col_list [HAVING having_condition]] + [GROUP BY col_list ] [ORDER BY col_list { DESC | ASC }] [SLIMIT limit_val [SOFFSET offset_val]] [LIMIT limit_val [OFFSET offset_val]] @@ -648,6 +648,7 @@ Query OK, 1 row(s) in set (0.001091s) 2. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如:((value > 20 AND value < 30) OR (value < 12)) 。 3. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 + ### SQL 示例 From 99beeb356bd7f47a3160f828ef908b7778e338db Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 26 Apr 2021 16:23:36 +0800 Subject: [PATCH 05/14] [TM-210] : remove nanosecond related words. --- documentation20/cn/08.connector/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index ad3179c310..54cc1a9e03 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -213,7 +213,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时,需要包含TDengine - `int taos_result_precision(TAOS_RES *res)` - 返回结果集时间戳字段的精度,`0` 代表毫秒,`1` 代表微秒,`2` 代表纳秒。 + 返回结果集时间戳字段的精度,`0` 代表毫秒,`1` 代表微秒。 - `TAOS_ROW taos_fetch_row(TAOS_RES *res)` From 8af0ada70cf3a05da8d45629ab477575a4d778d3 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 26 Apr 2021 16:43:50 +0800 Subject: [PATCH 06/14] [TD-1316] : "apercentile" func can be applied on super table. --- documentation20/cn/12.taos-sql/docs.md | 42 ++++++++++++++++++++------ 1 file changed, 33 insertions(+), 9 deletions(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index eeeeb3c8c4..4a4bc4c2eb 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -707,11 +707,11 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 应用字段:应用全部字段。 - 适用于:表、超级表。 + 适用于:**表、超级表**。 说明: - 1)可以使用星号*来替代具体的字段,使用星号(*)返回全部记录数量。 + 1)可以使用星号\*来替代具体的字段,使用星号(\*)返回全部记录数量。 2)针对同一表的(不包含NULL值)字段查询结果均相同。 @@ -742,7 +742,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 应用字段:不能应用在timestamp、binary、nchar、bool字段。 - 适用于:表、超级表。 + 适用于:**表、超级表**。 示例: ```mysql @@ -769,7 +769,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 - 适用于:表。 + 适用于:**表**。 - **SUM** ```mysql @@ -781,7 +781,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 - 适用于:表、超级表。 + 适用于:**表、超级表**。 示例: ```mysql @@ -808,7 +808,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 - 适用于:表。(从 2.0.15.1 版本开始,本函数也支持超级表) + 适用于:**表**。(从 2.0.15.1 版本开始,本函数也支持**超级表**) 示例: ```mysql @@ -831,7 +831,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 说明:自变量是时间戳,因变量是该列的值。 - 适用于:表。 + 适用于:**表**。 示例: ```mysql @@ -854,6 +854,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 + 适用于:**表、超级表**。 + 示例: ```mysql taos> SELECT MIN(current), MIN(voltage) FROM meters; @@ -879,6 +881,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 + 适用于:**表、超级表**。 + 示例: ```mysql taos> SELECT MAX(current), MAX(voltage) FROM meters; @@ -904,6 +908,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 应用字段:所有字段。 + 适用于:**表、超级表**。 + 说明: 1)如果要返回各个列的首个(时间戳最小)非NULL值,可以使用FIRST(\*); @@ -937,6 +943,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 应用字段:所有字段。 + 适用于:**表、超级表**。 + 说明: 1)如果要返回各个列的最后(时间戳最大)一个非NULL值,可以使用LAST(\*); @@ -968,6 +976,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 + 适用于:**表、超级表**。 + 说明: 1)*k*值取值范围1≤*k*≤100; @@ -1002,6 +1012,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 + 适用于:**表、超级表**。 + 说明: 1)*k*值取值范围1≤*k*≤100; @@ -1035,6 +1047,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 + 适用于:**表**。 + 说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。 示例: @@ -1050,12 +1064,14 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ```mysql SELECT APERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause]; ``` - 功能说明:统计表中某列的值百分比分位数,与PERCENTILE函数相似,但是返回近似结果。 + 功能说明:统计表/超级表中某列的值百分比分位数,与PERCENTILE函数相似,但是返回近似结果。 返回结果数据类型: 双精度浮点数Double。 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 + 适用于:**表、超级表**。 + 说明:*P*值取值范围0≤*P*≤100,为0的时候等同于MIN,为100的时候等同于MAX。推荐使用```APERCENTILE```函数,该函数性能远胜于```PERCENTILE```函数 ```mysql @@ -1070,12 +1086,14 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ```mysql SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }; ``` - 功能说明:返回表(超级表)的最后一条记录。 + 功能说明:返回表/超级表的最后一条记录。 返回结果数据类型:同应用的字段。 应用字段:所有字段。 + 适用于:**表、超级表**。 + 说明:与last函数不同,last_row不支持时间范围限制,强制返回最后一条记录。 示例: @@ -1104,6 +1122,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 + 适用于:**表**。 + 说明:输出结果行数是范围内总行数减一,第一行没有结果输出。 示例: @@ -1126,6 +1146,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 应用字段:不能应用在binary、nchar、bool类型字段。 + 适用于:**表、超级表**。 + 说明:可用于TIMESTAMP字段,此时表示记录的时间覆盖范围。 示例: @@ -1154,6 +1176,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 + 适用于:**表、超级表**。 + 说明: 1)支持两列或多列之间进行计算,可使用括号控制计算优先级; From 8f0db3d79acaf92685137f104f61cfa0af990669 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 26 Apr 2021 17:50:44 +0800 Subject: [PATCH 07/14] [TD-850] : describe usage limitation about calculation expression. --- documentation20/cn/12.taos-sql/docs.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 4a4bc4c2eb..0f2a972495 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -470,7 +470,7 @@ Query OK, 1 row(s) in set (0.020443s) ``` 在使用SQL函数来进行查询过程中,部分SQL函数支持通配符操作。其中的区别在于: -```count(\*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。 +```count(*)```函数只返回一列。```first```、```last```、```last_row```函数则是返回全部列。 ```mysql taos> SELECT COUNT(*) FROM d1001; @@ -622,13 +622,15 @@ taos> SELECT COUNT(tbname) FROM meters WHERE groupId > 2; Query OK, 1 row(s) in set (0.001091s) ``` -- 可以使用 * 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名 -- WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串 +- 可以使用 * 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名。 + * 暂不支持对列的四则运算算式用于条件过滤算子(例如,不支持 `where a*2>6;`,但可以写 `where a>6/2;`)。 + * 暂不支持对列的四则运算算式作为 SQL 函数的应用对象(例如,不支持 `select min(2*a) from t;`,但可以写 `select 2*min(a) from t;`)。 +- WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串。 - 输出结果缺省按首列时间戳升序排序,但可以指定按降序排序( _c0 指首列时间戳)。使用 ORDER BY 对其他字段进行排序为非法操作。 - 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。 * 在有 GROUP BY 子句的情况下,LIMIT 参数控制的是每个分组中至多允许输出的条数。 - 参数 SLIMIT 控制由 GROUP BY 指令划分的分组中,至多允许输出几个分组的数据。 -- 通过”>>"输出结果可以导出到指定文件 +- 通过 ">>" 输出结果可以导出到指定文件。 ### 支持的条件过滤操作 From 25349a4f672fbaea883df28178d707c01990c07d Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 26 Apr 2021 18:17:34 +0800 Subject: [PATCH 08/14] [TD-3666] : update description about Binary column. --- documentation20/cn/12.taos-sql/docs.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 0f2a972495..edf36a4da1 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -48,7 +48,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM | 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL | | 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] | | 5 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] | -| 6 | BINARY | 自定义 | 记录二进制字节型字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端使用单引号引用,否则英文全部自动转化为小写。使用时须指定大小,如 binary(20) 定义了最长为 20 个字节型字符的字符串,每个字节型字符占 1 byte 的存储空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 | +| 6 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。理论上,最长可以有 16374 字节,但由于每行数据最多 16K 字节,实际上限一般小于理论值。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。 | | 7 | SMALLINT | 2 | 短整型, 范围 [-32767, 32767], -32768 用于 NULL | | 8 | TINYINT | 1 | 单字节整型,范围 [-127, 127], -128 用于 NULL | | 9 | BOOL | 1 | 布尔型,{true, false} | @@ -56,7 +56,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableM **Tips**: 1. TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。 -2. **注意**,虽然 Binary 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 Binary 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 nchar 类型进行保存。如果强行使用 Binary 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏。 +2. **注意**,虽然 Binary 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 Binary 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 nchar 类型进行保存。如果强行使用 Binary 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。 ## 数据库管理 @@ -623,8 +623,8 @@ Query OK, 1 row(s) in set (0.001091s) ``` - 可以使用 * 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名。 - * 暂不支持对列的四则运算算式用于条件过滤算子(例如,不支持 `where a*2>6;`,但可以写 `where a>6/2;`)。 - * 暂不支持对列的四则运算算式作为 SQL 函数的应用对象(例如,不支持 `select min(2*a) from t;`,但可以写 `select 2*min(a) from t;`)。 + * 暂不支持含列名的四则运算表达式用于条件过滤算子(例如,不支持 `where a*2>6;`,但可以写 `where a>6/2;`)。 + * 暂不支持含列名的四则运算表达式作为 SQL 函数的应用对象(例如,不支持 `select min(2*a) from t;`,但可以写 `select 2*min(a) from t;`)。 - WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串。 - 输出结果缺省按首列时间戳升序排序,但可以指定按降序排序( _c0 指首列时间戳)。使用 ORDER BY 对其他字段进行排序为非法操作。 - 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。 From 37fd1ad91cb4a09f5c25d8e55622666b0c27e12a Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 26 Apr 2021 18:48:52 +0800 Subject: [PATCH 09/14] Hotfix/sangshuduo/td 3968 taosdemo datalen 16k (#5930) * [TD-3414]: add insert function with json files of taodemo-testcase -repeat * [TD-3879]add stmt mode for taodemo go * [TD-3414]: add insert function with json files of taodemo-testcase * [TD-3918] add case to verify the bug of TD-3897 * [TD-3453]:modify filepath in scripts * Update fulltest.sh * [TD-3918] fix the case * Update queryFilterTswithDateUnit.py * [TD-3850]: fix vnode write enqueue flowctrl UAF & no response * Hotfix/sangshuduo/td 3401 query statistic (#5907) * [TD-3401]: taosdemo query statistic. refactor func name. * [TD-3401]: taosdemo query statistic. refactor func name 2. * [TD-3401]: taosdemo support query statistic. implementation. * cleanup * [TD-3401]: taosdemo query statistic. change us to ms. * [TD-3401]: taosdemo query statistic. increase sql buffer for query. * [TD-3401]: taosdemo query statistic more accurate result. * [TD-3401]: taosdemo query statistic. modify last time logic. Co-authored-by: Shuduo Sang * improve coverage of operations * [TD-3944]: make default offline threshold to 10 days. (#5912) Co-authored-by: Shuduo Sang * [TD-3572]: response out of dnodes if #dnodes <= maxDbReplica * [TD-3937]: add taosdemo performance test compare scripts * update script * Hotfix/sangshuduo/td 3317 for master (#5921) * [TD-3317]: taosdemo interlace insertion. patch for master. * [TD-3317]: taosdemo support interlace mode. adjust remainder rows logic. * [TD-3317]: taosdemo support interlace mode. fix global and stable interlace rows logic. * [TD-3317]: taosdemo support interlace mode. fix 'interlaceRows' is used uninitialized Co-authored-by: Shuduo Sang * [TD-3968]: taosdemo data length should be 16*1024 Co-authored-by: tomchon Co-authored-by: liuyq-617 Co-authored-by: wu champion Co-authored-by: wu champion Co-authored-by: Minglei Jin Co-authored-by: huili <52318143+plum-lihui@users.noreply.github.com> Co-authored-by: Shengliang Guan Co-authored-by: Shuduo Sang Co-authored-by: Ping Xiao --- src/kit/taosdemo/taosdemo.c | 2 +- src/mnode/inc/mnodeDb.h | 1 + src/mnode/src/mnodeDb.c | 18 ++ src/mnode/src/mnodeDnode.c | 9 + src/vnode/src/vnodeWrite.c | 6 +- .../perftest-taosdemo-compare.sh | 147 +++++++++++ tests/pytest/client/thousandsofClient.py | 55 +++++ tests/pytest/cluster/TD-3693/how-to-use | 9 + tests/pytest/cluster/TD-3693/insert1Data.json | 88 +++++++ tests/pytest/cluster/TD-3693/insert2Data.json | 88 +++++++ tests/pytest/cluster/TD-3693/multClient.py | 74 ++++++ tests/pytest/cluster/TD-3693/multQuery.py | 72 ++++++ tests/pytest/cluster/TD-3693/queryCount.json | 15 ++ tests/pytest/fulltest.sh | 5 + .../functions/function_count_last_stab.py | 70 ++++++ tests/pytest/functions/function_operations.py | 4 +- .../pytest/query/queryFilterTswithDateUnit.py | 47 ++-- tests/pytest/query/queryTscomputWithNow.py | 177 ++++++++++++++ .../TD-3453/query-interrupt.json | 62 +++++ .../TD-3453/query-interrupt.py | 89 +++++++ .../taosdemoAllTest/TD-3453/queryall.json | 20 ++ .../tools/taosdemoAllTest/convertResFile.py | 35 +++ .../taosdemoAllTest/insert-1s1tnt1r.json | 88 +++++++ .../taosdemoAllTest/insert-1s1tntmr.json | 88 +++++++ .../taosdemoAllTest/insert-disorder.json | 88 +++++++ .../insert-illegal-columns-count-0.json | 88 +++++++ .../insert-illegal-columns-lmax.json | 88 +++++++ .../insert-illegal-columns.json | 88 +++++++ .../insert-illegal-tags-count129.json | 88 +++++++ .../taosdemoAllTest/insert-interlace-row.json | 62 +++++ .../insert-interval-speed.json | 88 +++++++ .../tools/taosdemoAllTest/insert-newdb.json | 166 +++++++++++++ .../taosdemoAllTest/insert-newtable.json | 166 +++++++++++++ .../taosdemoAllTest/insert-nodbnodrop.json | 62 +++++ .../tools/taosdemoAllTest/insert-offset.json | 166 +++++++++++++ .../tools/taosdemoAllTest/insert-renewdb.json | 166 +++++++++++++ .../tools/taosdemoAllTest/insert-sample.json | 88 +++++++ .../taosdemoAllTest/insert-timestep.json | 88 +++++++ .../taosdemoAllTest/moredemo-insert-offset.py | 72 ++++++ .../moredemo-offset-limit1.json | 62 +++++ .../moredemo-offset-limit5.json | 62 +++++ .../moredemo-offset-limit94.json | 62 +++++ .../moredemo-offset-newdb.json | 61 +++++ tests/pytest/tools/taosdemoAllTest/sample.csv | 3 + .../tools/taosdemoAllTest/speciQuery.json | 36 +++ .../taosdemoAllTest/speciQueryInsertdata.json | 86 +++++++ tests/pytest/tools/taosdemoAllTest/tags.csv | 2 + .../taosdemoTestInsertWithJson.py | 229 ++++++++++++++++++ .../taosdemoTestQueryWithJson.py | 91 +++++++ tests/script/unique/cluster/balance2.sim | 23 +- tests/script/unique/dnode/remove1.sim | 4 +- tests/script/unique/dnode/remove2.sim | 21 +- 52 files changed, 3541 insertions(+), 34 deletions(-) create mode 100755 tests/perftest-scripts/perftest-taosdemo-compare.sh create mode 100644 tests/pytest/client/thousandsofClient.py create mode 100644 tests/pytest/cluster/TD-3693/how-to-use create mode 100644 tests/pytest/cluster/TD-3693/insert1Data.json create mode 100644 tests/pytest/cluster/TD-3693/insert2Data.json create mode 100644 tests/pytest/cluster/TD-3693/multClient.py create mode 100644 tests/pytest/cluster/TD-3693/multQuery.py create mode 100644 tests/pytest/cluster/TD-3693/queryCount.json create mode 100644 tests/pytest/functions/function_count_last_stab.py create mode 100644 tests/pytest/query/queryTscomputWithNow.py create mode 100644 tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json create mode 100644 tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py create mode 100644 tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json create mode 100644 tests/pytest/tools/taosdemoAllTest/convertResFile.py create mode 100644 tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insert-disorder.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-count-0.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-lmax.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insert-illegal-columns.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insert-illegal-tags-count129.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insert-newdb.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insert-newtable.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insert-offset.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insert-renewdb.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insert-sample.json create mode 100644 tests/pytest/tools/taosdemoAllTest/insert-timestep.json create mode 100644 tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py create mode 100644 tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json create mode 100644 tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json create mode 100644 tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json create mode 100644 tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json create mode 100644 tests/pytest/tools/taosdemoAllTest/sample.csv create mode 100644 tests/pytest/tools/taosdemoAllTest/speciQuery.json create mode 100644 tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json create mode 100644 tests/pytest/tools/taosdemoAllTest/tags.csv create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py create mode 100644 tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 68292692fa..83ecd145a0 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -81,7 +81,7 @@ enum QUERY_MODE { #define MAX_DB_NAME_SIZE 64 #define MAX_HOSTNAME_SIZE 64 #define MAX_TB_NAME_SIZE 64 -#define MAX_DATA_SIZE 16000 +#define MAX_DATA_SIZE (16*1024) #define MAX_NUM_DATATYPE 10 #define OPT_ABORT 1 /* –abort */ #define STRING_LEN 60000 diff --git a/src/mnode/inc/mnodeDb.h b/src/mnode/inc/mnodeDb.h index d03ba8d717..da0865833d 100644 --- a/src/mnode/inc/mnodeDb.h +++ b/src/mnode/inc/mnodeDb.h @@ -31,6 +31,7 @@ enum _TSDB_DB_STATUS { int32_t mnodeInitDbs(); void mnodeCleanupDbs(); int64_t mnodeGetDbNum(); +int32_t mnodeGetDbMaxReplica(); SDbObj *mnodeGetDb(char *db); SDbObj *mnodeGetDbByTableName(char *db); void * mnodeGetNextDb(void *pIter, SDbObj **pDb); diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c index 909ca7cac6..8af20aa862 100644 --- a/src/mnode/src/mnodeDb.c +++ b/src/mnode/src/mnodeDb.c @@ -74,6 +74,24 @@ int64_t mnodeGetDbNum() { return sdbGetNumOfRows(tsDbSdb); } +int32_t mnodeGetDbMaxReplica() { + int32_t maxReplica = 0; + SDbObj *pDb = NULL; + void *pIter = NULL; + + while (1) { + pIter = mnodeGetNextDb(pIter, &pDb); + if (pDb == NULL) break; + + if (pDb->cfg.replications > maxReplica) + maxReplica = pDb->cfg.replications; + + mnodeDecDbRef(pDb); + } + + return maxReplica; +} + static int32_t mnodeDbActionInsert(SSdbRow *pRow) { SDbObj *pDb = pRow->pObj; SAcctObj *pAcct = mnodeGetAcct(pDb->acct); diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index 85d9f94b88..b513da29f4 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -29,6 +29,7 @@ #include "mnodeDef.h" #include "mnodeInt.h" #include "mnodeDnode.h" +#include "mnodeDb.h" #include "mnodeMnode.h" #include "mnodeSdb.h" #include "mnodeShow.h" @@ -745,6 +746,14 @@ static int32_t mnodeDropDnodeByEp(char *ep, SMnodeMsg *pMsg) { return TSDB_CODE_MND_NO_REMOVE_MASTER; } + int32_t maxReplica = mnodeGetDbMaxReplica(); + int32_t dnodesNum = mnodeGetDnodesNum(); + if (dnodesNum <= maxReplica) { + mError("dnode:%d, can't drop dnode:%s, #dnodes: %d, replia: %d", pDnode->dnodeId, ep, dnodesNum, maxReplica); + mnodeDecDnodeRef(pDnode); + return TSDB_CODE_MND_NO_ENOUGH_DNODES; + } + mInfo("dnode:%d, start to drop it", pDnode->dnodeId); int32_t code = bnDropDnode(pDnode); diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index aab685e678..def9cf3b32 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -347,9 +347,11 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) { vDebug("vgId:%d, msg:%p, write into vwqueue after flowctrl, retry:%d", pVnode->vgId, pWrite, pWrite->processedCount); pWrite->processedCount = 0; + void *handle = pWrite->rpcMsg.handle; code = vnodeWriteToWQueueImp(pWrite); - if (code != 0) { - dnodeSendRpcVWriteRsp(pWrite->pVnode, pWrite, code); + if (code != TSDB_CODE_SUCCESS) { + SRpcMsg rpcRsp = {.handle = handle, .code = code}; + rpcSendResponse(&rpcRsp); } } } diff --git a/tests/perftest-scripts/perftest-taosdemo-compare.sh b/tests/perftest-scripts/perftest-taosdemo-compare.sh new file mode 100755 index 0000000000..60b6d1310d --- /dev/null +++ b/tests/perftest-scripts/perftest-taosdemo-compare.sh @@ -0,0 +1,147 @@ +#!/bin/bash + +WORK_DIR=/home/ubuntu/pxiao +TDENGINE_DIR=/home/ubuntu/pxiao/TDengine +NUM_OF_VERSIONS=5 +CURRENT_VERSION=0 +today=`date +"%Y%m%d"` +TAOSDEMO_COMPARE_TEST_REPORT=$TDENGINE_DIR/tests/taosdemo-compare-test-report-$today.log + +# Coloured Echoes +function red_echo { echo -e "\033[31m$@\033[0m"; } +function green_echo { echo -e "\033[32m$@\033[0m"; } +function yellow_echo { echo -e "\033[33m$@\033[0m"; } +function white_echo { echo -e "\033[1;37m$@\033[0m"; } +# Coloured Printfs +function red_printf { printf "\033[31m$@\033[0m"; } +function green_printf { printf "\033[32m$@\033[0m"; } +function yellow_printf { printf "\033[33m$@\033[0m"; } +function white_printf { printf "\033[1;37m$@\033[0m"; } +# Debugging Outputs +function white_brackets { local args="$@"; white_printf "["; printf "${args}"; white_printf "]"; } +function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") && echo " ${args}"; } +function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; } +function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } + +function getCurrentVersion { + echoInfo "Build TDengine" + cd $WORK_DIR/TDengine + + git remote update > /dev/null + git reset --hard HEAD + git checkout master + REMOTE_COMMIT=`git rev-parse --short remotes/origin/master` + LOCAL_COMMIT=`git rev-parse --short @` + + echo " LOCAL: $LOCAL_COMMIT" + echo "REMOTE: $REMOTE_COMMIT" + if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then + echo "repo up-to-date" + else + echo "repo need to pull" + git pull > /dev/null 2>&1 + fi + cd debug + rm -rf * + cmake .. > /dev/null 2>&1 + make > /dev/null 2>&1 + make install > /dev/null 2>&1 + + rm -rf $WORK_DIR/taosdemo + cp -r $TDENGINE_DIR/src/kit/taosdemo $WORK_DIR + CURRENT_VERSION=`taosd -V | grep version | awk '{print $3}' | awk -F. '{print $3}'` +} + +function buildTDengineByVersion() { + echoInfo "build TDengine on branch: $1" + git reset --hard HEAD + git checkout $1 + git pull > /dev/null + + rm -rf $TDENGINE_DIR/src/kit/taosdemo + cp -r $WORK_DIR/taosdemo $TDENGINE_DIR/src/kit + + cd $TDENGINE_DIR/debug + rm -rf * + cmake .. > /dev/null 2>&1 + make > /dev/null 2>&1 + make install > /dev/null 2>&1 +} + +function stopTaosd { + echo "Stop taosd" + systemctl stop taosd + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + while [ -n "$PID" ] + do + pkill -TERM -x taosd + sleep 1 + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` + done +} + +function startTaosd { + echo "Start taosd" + rm -rf /var/lib/perf/* + rm -rf /var/log/perf/* + nohup taosd -c /etc/perf/ > /dev/null 2>&1 & + sleep 10 +} + +function runTaosdemoCompare { + echoInfo "Stop Taosd" + stopTaosd + + getCurrentVersion + release="master" + + [ -f $TAOSDEMO_COMPARE_TEST_REPORT ] && rm $TAOSDEMO_COMPARE_TEST_REPORT + + for((i=0;i<$NUM_OF_VERSIONS;i++)) + do + startTaosd + taos -s "drop database if exists demodb;" + taosdemo -y -d demodb > taosdemoperf.txt + + echo "==================== taosdemo performance for $release ====================" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT + CREATE_TABLE_TIME=`grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'` + INSERT_RECORDS_TIME=`grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'` + RECORDS_PER_SECOND=`grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'` + AVG_DELAY=`grep 'delay' taosdemoperf.txt | awk '{print $4}' | awk -Fm '{print $1}'` + MAX_DELAY=`grep 'delay' taosdemoperf.txt | awk '{print $6}' | awk -Fm '{print $1}'` + MIN_DELAY=`grep 'delay' taosdemoperf.txt | awk '{print $8}' | awk -Fm '{print $1}'` + + echo "create table time: $CREATE_TABLE_TIME seconds" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT + echo "insert records time: $INSERT_RECORDS_TIME seconds" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT + echo "records per second: $RECORDS_PER_SECOND records/second" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT + echo "avg delay: $AVG_DELAY ms" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT + echo "max delay: $MAX_DELAY ms" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT + echo "min delay: $MIN_DELAY ms" | tee -a $TAOSDEMO_COMPARE_TEST_REPORT + + [ -f taosdemoperf.txt ] && rm taosdemoperf.txt + + stopTaosd + version=`expr $CURRENT_VERSION - $i` + release="release/s1$version" + buildTDengineByVersion $release + done +} + +function sendReport { + echo "send report" + receiver="develop@taosdata.com" + mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n" + + cd $TDENGINE_DIR + + sed -i 's/\x1b\[[0-9;]*m//g' $TAOSDEMO_COMPARE_TEST_REPORT + BODY_CONTENT=`cat $TAOSDEMO_COMPARE_TEST_REPORT` + echo -e "to: ${receiver}\nsubject: taosdemo performance compare test report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \ + (cat - && uuencode $TAOSDEMO_COMPARE_TEST_REPORT taosdemo-compare-test-report-$today.log) | \ + ssmtp "${receiver}" && echo "Report Sent!" +} + +runTaosdemoCompare +sendReport + +echoInfo "End of Taosdemo Compare Test" | tee -a $WORK_DIR/cron.log \ No newline at end of file diff --git a/tests/pytest/client/thousandsofClient.py b/tests/pytest/client/thousandsofClient.py new file mode 100644 index 0000000000..36c816aa5b --- /dev/null +++ b/tests/pytest/client/thousandsofClient.py @@ -0,0 +1,55 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import os +import sys +sys.path.insert(0, os.getcwd()) +from util.log import * +from util.sql import * +from util.dnodes import * +import taos +import threading + + +class TwoClients: + def initConnection(self): + self.host = "127.0.0.1" + self.user = "root" + self.password = "taosdata" + self.config = "/home/chr/taosdata/TDengine/sim/dnode1/cfg " + + def newCloseCon(times): + newConList = [] + for times in range(0,times) : + newConList.append(taos.connect(self.host, self.user, self.password, self.config)) + for times in range(0,times) : + newConList[times].close() + + def run(self): + tdDnodes.init("") + tdDnodes.setTestCluster(False) + tdDnodes.setValgrind(False) + + tdDnodes.stopAll() + tdDnodes.deploy(1) + tdDnodes.start(1) + + # multiple new and cloes connection + for m in range(1,101) : + t= threading.Thread(target=newCloseCon,args=(10,)) + t.start() + + +clients = TwoClients() +clients.initConnection() +clients.run() \ No newline at end of file diff --git a/tests/pytest/cluster/TD-3693/how-to-use b/tests/pytest/cluster/TD-3693/how-to-use new file mode 100644 index 0000000000..05a16a8534 --- /dev/null +++ b/tests/pytest/cluster/TD-3693/how-to-use @@ -0,0 +1,9 @@ +execute: +cd TDengine/tests/pytest && python3 ./test.py -f cluster/TD-3693/multClient.py && python3 cluster/TD-3693/multQuery.py + +1. 使用测试的集群,三个节点fc1、fct2、fct4。 +2. 用taosdemo建两个库db1和db2,副本数目为1,插入一定数据。 +3. db1在mnode的master上(fct2),db2在mnode的slave上(fct4)。 +4. 珲哥修改taosdemo,变成多线程查询,修改后的软件我命名成taosdemoMul,然后做持续多线程查询db2上的数据,建立多个连接 +5. 4中查询过程放到后台,同时再次在db2执行建表、插入,查询操作。循环执行查询10次,每次间隔91s。 +6. 然后查询taosd的log日志,看是否还存在上述问题“send auth msg to mnodes”。 \ No newline at end of file diff --git a/tests/pytest/cluster/TD-3693/insert1Data.json b/tests/pytest/cluster/TD-3693/insert1Data.json new file mode 100644 index 0000000000..3ac289a63a --- /dev/null +++ b/tests/pytest/cluster/TD-3693/insert1Data.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "192.168.1.104", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db1", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/cluster/TD-3693/insert2Data.json b/tests/pytest/cluster/TD-3693/insert2Data.json new file mode 100644 index 0000000000..25717df4c7 --- /dev/null +++ b/tests/pytest/cluster/TD-3693/insert2Data.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "192.168.1.104", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db2", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/cluster/TD-3693/multClient.py b/tests/pytest/cluster/TD-3693/multClient.py new file mode 100644 index 0000000000..24c27d9de9 --- /dev/null +++ b/tests/pytest/cluster/TD-3693/multClient.py @@ -0,0 +1,74 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.rowNum = 100000 + self.ts = 1537146000000 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert data to cluster'db + os.system("%staosdemo -f cluster/TD-3693/insert1Data.json -y " % binPath) + # multiple new and cloes connection with query data + os.system("%staosdemo -f cluster/TD-3693/insert2Data.json -y " % binPath) + os.system("nohup %staosdemoMul -f cluster/TD-3693/queryCount.json -y & " % binPath) + + + + # delete useless files + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf ./querySystemInfo*") + os.system("rm -rf cluster/TD-3693/multClient.py.sql") + os.system("rm -rf ./querySystemInfo*") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/cluster/TD-3693/multQuery.py b/tests/pytest/cluster/TD-3693/multQuery.py new file mode 100644 index 0000000000..70061a27f2 --- /dev/null +++ b/tests/pytest/cluster/TD-3693/multQuery.py @@ -0,0 +1,72 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import os +import sys +sys.path.insert(0, os.getcwd()) +from util.log import * +from util.sql import * +from util.dnodes import * +import taos +import threading + + +class TwoClients: + def initConnection(self): + self.host = "fct4" + self.user = "root" + self.password = "taosdata" + self.config = "/etc/taos/" + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + # query data from cluster'db + conn = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config) + cur = conn.cursor() + tdSql.init(cur, True) + tdSql.execute("use db2") + cur.execute("select count (tbname) from stb0") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 20) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 100000) + tdSql.query("select count(*) from stb01_0") + tdSql.checkData(0, 0, 20000) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 400000) + tdSql.execute("drop table if exists squerytest") + tdSql.execute("drop table if exists querytest") + tdSql.execute('''create stable squerytest(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + tdSql.execute("create table querytest using squerytest tags('beijing')") + tdSql.execute("insert into querytest(ts) values(%d)" % (self.ts - 1)) + for i in range(self.rowNum): + tdSql.execute("insert into querytest values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" % (self.ts + i, i + 1, 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + for j in range(10): + tdSql.execute("use db2") + tdSql.query("select count(*),last(*) from querytest group by col1") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 2, 2) + tdSql.checkData(1, 3, 1) + sleep(88) + tdSql.execute("drop table if exists squerytest") + tdSql.execute("drop table if exists querytest") + +clients = TwoClients() +clients.initConnection() +clients.run() \ No newline at end of file diff --git a/tests/pytest/cluster/TD-3693/queryCount.json b/tests/pytest/cluster/TD-3693/queryCount.json new file mode 100644 index 0000000000..089ae42aab --- /dev/null +++ b/tests/pytest/cluster/TD-3693/queryCount.json @@ -0,0 +1,15 @@ +{ + "filetype":"query", + "cfgdir": "/etc/taos", + "host": "192.168.1.104", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db2", + "query_times": 1000000, + "specified_table_query": + {"query_interval":1, "concurrent":100, + "sqls": [{"sql": "select count(*) from db.stb0", "result": ""}] + } +} \ No newline at end of file diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 86fbe2860d..d47eca13e5 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -151,6 +151,9 @@ python3 test.py -f tools/taosdemoTestTblAlt.py python3 test.py -f tools/taosdemoTestSampleData.py python3 test.py -f tools/taosdemoTestInterlace.py python3 test.py -f tools/taosdemoTestQuery.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py + # update python3 ./test.py -f update/allow_update.py @@ -221,6 +224,7 @@ python3 ./test.py -f query/queryJoin10tables.py python3 ./test.py -f query/queryStddevWithGroupby.py python3 ./test.py -f query/querySecondtscolumnTowherenow.py python3 ./test.py -f query/queryFilterTswithDateUnit.py +python3 ./test.py -f query/queryTscomputWithNow.py @@ -280,6 +284,7 @@ python3 ./test.py -f functions/all_null_value.py python3 ./test.py -f functions/function_avg.py -r 1 python3 ./test.py -f functions/function_bottom.py -r 1 python3 ./test.py -f functions/function_count.py -r 1 +python3 ./test.py -f functions/function_count_last_stab.py python3 ./test.py -f functions/function_diff.py -r 1 python3 ./test.py -f functions/function_first.py -r 1 python3 ./test.py -f functions/function_last.py -r 1 diff --git a/tests/pytest/functions/function_count_last_stab.py b/tests/pytest/functions/function_count_last_stab.py new file mode 100644 index 0000000000..1d777c6bd3 --- /dev/null +++ b/tests/pytest/functions/function_count_last_stab.py @@ -0,0 +1,70 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + tdSql.execute('''create stable stest(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + tdSql.execute("create table test1 using stest tags('beijing')") + tdSql.execute("insert into test1(ts) values(%d)" % (self.ts - 1)) + + + # last verifacation + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i, i + 1, 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + tdSql.query("select count(*),last(*) from stest group by col1") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 2, 2) + tdSql.checkData(1, 3, 1) + + tdSql.query("select count(*),last(*) from stest group by col2") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 10) + tdSql.checkData(0, 2, 10) + tdSql.checkData(0, 3, 1) + + tdSql.query("select count(*),last(ts,stest.*) from stest group by col1") + tdSql.checkRows(10) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 2, "2018-09-17 09:00:00") + tdSql.checkData(1, 4, 1) + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + diff --git a/tests/pytest/functions/function_operations.py b/tests/pytest/functions/function_operations.py index 162aa3eb65..859cd78a3d 100644 --- a/tests/pytest/functions/function_operations.py +++ b/tests/pytest/functions/function_operations.py @@ -82,14 +82,14 @@ class TDTestCase: self.ts = self.ts + self.rowNum + 10 - tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, 1.1, 1, NULL, '涛思数据3', 1, 1, 1, 1)" % ( self.ts + self.rowNum + 1 )) + tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, 1.1, 1, NULL, '涛思数据3', 254, 65534, 4294967294, 18446744073709551614)" % ( self.ts + self.rowNum + 1 )) tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, 1.1, 1, 'taosdata', NULL, 1, 1, 1, 1)" % ( self.ts + self.rowNum + 2 )) tdSql.execute("insert into test1 values(%d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)" % ( self.ts + self.rowNum + 3 )) tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, NULL, 1.1, 1, NULL, '涛思数据3', 1, 1, 1, 1)" % ( self.ts + self.rowNum + 4 )) tdSql.execute("insert into test1 values(%d, 1, 1, 1, 1, 1.1, NULL, 1, 'taosdata', NULL, 1, 1, 1, 1)" % ( self.ts + self.rowNum + 5 )) self.rowNum = self.rowNum + 5 - col_list = [ 'col1' , 'col2' , 'col3' , 'col4' , 'col5' , 'col6' , 'col7' , 'col8' , 'col9' , 'col11' , 'col12' , 'col13' , 'col14' , '1' , '1.1' , 'NULL' ] + col_list = [ 'col1' , 'col2' , 'col3' , 'col4' , 'col5' , 'col6' , 'col7' , 'col8' , 'col9' , 'col11' , 'col12' , 'col13' , 'col14' , '1' , '1.1' , 'NULL' , '18446744073709551614' ] op_list = [ '+' , '-' , '*' , '/' , '%' ] err_list = [ 'col7' , 'col8' , 'col9' , 'NULL' ] order_lsit = [ ' order by ts ', ' order by ts desc ', ' order by ts asc '] diff --git a/tests/pytest/query/queryFilterTswithDateUnit.py b/tests/pytest/query/queryFilterTswithDateUnit.py index 90e30c5156..eb9eb02afd 100644 --- a/tests/pytest/query/queryFilterTswithDateUnit.py +++ b/tests/pytest/query/queryFilterTswithDateUnit.py @@ -47,53 +47,53 @@ class TDTestCase: for col in cols: tdSql.error(f" select * from tts1 where {col} = 1d ") - tdSql.error(f" select * from tts1 where {col} < 1d ") + tdSql.error(f" select * from tts1 where {col} < -1d ") tdSql.error(f" select * from tts1 where {col} > 1d ") - tdSql.error(f" select * from tts1 where {col} >= 1d ") + tdSql.error(f" select * from tts1 where {col} >= -1d ") tdSql.error(f" select * from tts1 where {col} <= 1d ") tdSql.error(f" select * from tts1 where {col} <> 1d ") - tdSql.error(f" select * from tts1 where {col} = 1m ") + tdSql.error(f" select * from tts1 where {col} = -1m ") tdSql.error(f" select * from tts1 where {col} < 1m ") tdSql.error(f" select * from tts1 where {col} > 1m ") - tdSql.error(f" select * from tts1 where {col} >= 1m ") + tdSql.error(f" select * from tts1 where {col} >= -1m ") tdSql.error(f" select * from tts1 where {col} <= 1m ") tdSql.error(f" select * from tts1 where {col} <> 1m ") - tdSql.error(f" select * from tts1 where {col} = 1s ") + tdSql.error(f" select * from tts1 where {col} = -1s ") tdSql.error(f" select * from tts1 where {col} < 1s ") tdSql.error(f" select * from tts1 where {col} > 1s ") - tdSql.error(f" select * from tts1 where {col} >= 1s ") + tdSql.error(f" select * from tts1 where {col} >= -1s ") tdSql.error(f" select * from tts1 where {col} <= 1s ") tdSql.error(f" select * from tts1 where {col} <> 1s ") - tdSql.error(f" select * from tts1 where {col} = 1a ") + tdSql.error(f" select * from tts1 where {col} = -1a ") tdSql.error(f" select * from tts1 where {col} < 1a ") tdSql.error(f" select * from tts1 where {col} > 1a ") - tdSql.error(f" select * from tts1 where {col} >= 1a ") + tdSql.error(f" select * from tts1 where {col} >= -1a ") tdSql.error(f" select * from tts1 where {col} <= 1a ") tdSql.error(f" select * from tts1 where {col} <> 1a ") - tdSql.error(f" select * from tts1 where {col} = 1h ") + tdSql.error(f" select * from tts1 where {col} = -1h ") tdSql.error(f" select * from tts1 where {col} < 1h ") tdSql.error(f" select * from tts1 where {col} > 1h ") - tdSql.error(f" select * from tts1 where {col} >= 1h ") + tdSql.error(f" select * from tts1 where {col} >= -1h ") tdSql.error(f" select * from tts1 where {col} <= 1h ") tdSql.error(f" select * from tts1 where {col} <> 1h ") - tdSql.error(f" select * from tts1 where {col} = 1w ") + tdSql.error(f" select * from tts1 where {col} = -1w ") tdSql.error(f" select * from tts1 where {col} < 1w ") tdSql.error(f" select * from tts1 where {col} > 1w ") - tdSql.error(f" select * from tts1 where {col} >= 1w ") + tdSql.error(f" select * from tts1 where {col} >= -1w ") tdSql.error(f" select * from tts1 where {col} <= 1w ") tdSql.error(f" select * from tts1 where {col} <> 1w ") - tdSql.error(f" select * from tts1 where {col} = 1u ") + tdSql.error(f" select * from tts1 where {col} = -1u ") tdSql.error(f" select * from tts1 where {col} < 1u ") tdSql.error(f" select * from tts1 where {col} > 1u ") - tdSql.error(f" select * from tts1 where {col} >= 1u ") + tdSql.error(f" select * from tts1 where {col} >= -1u ") tdSql.error(f" select * from tts1 where {col} <= 1u ") - tdSql.error(f" select * from tts1 where {col} <> 1u ") + tdSql.error(f" select * from tts1 where {col} <> u ") tdSql.error(f" select * from tts1 where {col} = 0d ") tdSql.error(f" select * from tts1 where {col} < 0s ") @@ -125,6 +125,12 @@ class TDTestCase: tdSql.error(f" select * from tts1 where {col} <> 0/1d ") tdSql.error(f" select * from tts1 where {col} <> 1w+'2010-01-01 00:00:00' ") + tdSql.error(f" select * from tts1 where {col} = 1-1h ") + tdSql.error(f" select * from tts1 where {col} < 1w-d ") + tdSql.error(f" select * from tts1 where {col} > 0/u ") + tdSql.error(f" select * from tts1 where {col} >= d/s ") + tdSql.error(f" select * from tts1 where {col} <= 1/a ") + tdSql.error(f" select * from tts1 where {col} <> d/1 ") def run(self): tdSql.execute("drop database if exists dbms") @@ -148,19 +154,16 @@ class TDTestCase: # create databases precision is us tdSql.execute("create database if not exists dbus keep 36500 precision 'us' ") tdSql.execute("use dbus") - tsp2 = -28800000 * 1000 - tsp3 = -946800000000 * 1000 + tsp2 = tsp2 * 1000 + tsp3 = tsp3 * 1000 self.insertnow(tsp1,tsp2,tsp3) self.querynow() - - - - + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/queryTscomputWithNow.py b/tests/pytest/query/queryTscomputWithNow.py new file mode 100644 index 0000000000..3b808d551c --- /dev/null +++ b/tests/pytest/query/queryTscomputWithNow.py @@ -0,0 +1,177 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def inertnow(self): + tsp1 = 0 + tsp2 = -28800000 + tsp3 = -946800000000 + + tdSql.execute( + "create table stbts (ts timestamp, ts1 timestamp, c1 int, ts2 timestamp) TAGS(t1 int)" + ) + tdSql.execute("create table tts1 using stbts tags(1)") + + tdSql.execute("insert into tts1 values (now+1d, now+1d, 6, now+1d)") + tdSql.execute("insert into tts1 values (now, now, 5, now)") + tdSql.execute("insert into tts1 values (now-1d, now-1d, 4, now-1d)") + tdSql.execute(f"insert into tts1 values ({tsp1}, {tsp1}, 3, {tsp1})") + tdSql.execute(f"insert into tts1 values ({tsp2}, {tsp2}, 2, {tsp2})") + tdSql.execute(f"insert into tts1 values ({tsp3}, {tsp3}, 1, {tsp3})") + + def querynow(self): + interval_day1 = (datetime.date.today() - datetime.date(1970, 1, 1)).days + interval_day2 = (datetime.date.today() - datetime.date(1940, 1, 1)).days + + tdLog.printNoPrefix("==========step query: execute query operation") + time.sleep(1) + tdSql.execute(" select * from tts1 where ts > now+1d ") + ts_len1 = len(tdSql.cursor.fetchall()) + tdSql.execute(" select * from tts1 where ts < now+1d ") + ts_len2 = len(tdSql.cursor.fetchall()) + tdSql.execute(" select * from tts1 where ts > now-1d ") + ts_len3 = len(tdSql.cursor.fetchall()) + tdSql.execute(" select * from tts1 where ts < now-1d ") + ts_len4 = len(tdSql.cursor.fetchall()) + tdSql.execute(f" select * from tts1 where ts > now-{interval_day1+1}d ") + ts_len5 = len(tdSql.cursor.fetchall()) + tdSql.execute(f" select * from tts1 where ts < now-{interval_day1+1}d ") + ts_len6 = len(tdSql.cursor.fetchall()) + tdSql.execute(f" select * from tts1 where ts > now-{interval_day1-1}d ") + ts_len7 = len(tdSql.cursor.fetchall()) + tdSql.execute(f" select * from tts1 where ts < now-{interval_day1-1}d ") + ts_len8 = len(tdSql.cursor.fetchall()) + tdSql.execute(f" select * from tts1 where ts > now-{interval_day2+1}d ") + ts_len9 = len(tdSql.cursor.fetchall()) + tdSql.execute(f" select * from tts1 where ts < now-{interval_day2+1}d ") + ts_len10 = len(tdSql.cursor.fetchall()) + tdSql.execute(f" select * from tts1 where ts > now-{interval_day2-1}d ") + ts_len11 = len(tdSql.cursor.fetchall()) + tdSql.execute(f" select * from tts1 where ts < now-{interval_day2-1}d ") + ts_len12 = len(tdSql.cursor.fetchall()) + + tdSql.query(" select * from tts1 where ts1 > now+1d ") + tdSql.checkRows(ts_len1) + tdSql.query(" select * from tts1 where ts2 > now+1440m ") + tdSql.checkRows(ts_len1) + + tdSql.query(" select * from tts1 where ts1 < now+1d ") + tdSql.checkRows(ts_len2) + tdSql.query(" select * from tts1 where ts2 < now+1440m ") + tdSql.checkRows(ts_len2) + + tdSql.query(" select * from tts1 where ts1 > now-1d ") + tdSql.checkRows(ts_len3) + tdSql.query(" select * from tts1 where ts2 > now-1440m ") + tdSql.checkRows(ts_len3) + + tdSql.query(" select * from tts1 where ts1 < now-1d ") + tdSql.checkRows(ts_len4) + tdSql.query(" select * from tts1 where ts2 < now-1440m ") + tdSql.checkRows(ts_len4) + + tdSql.query(f" select * from tts1 where ts1 > now-{interval_day1+1}d ") + tdSql.checkRows(ts_len5) + tdSql.query(f" select * from tts1 where ts2 > now-{(interval_day1+1)*1440}m " ) + tdSql.checkRows(ts_len5) + + tdSql.query(f" select * from tts1 where ts1 < now-{interval_day1+1}d ") + tdSql.checkRows(ts_len6) + tdSql.query(f" select * from tts1 where ts2 < now-{(interval_day1+1)*1440}m ") + tdSql.checkRows(ts_len6) + + tdSql.query(f" select * from tts1 where ts1 > now-{interval_day1-1}d ") + tdSql.checkRows(ts_len7) + tdSql.query(f" select * from tts1 where ts2 > now-{(interval_day1-1)*1440}m ") + tdSql.checkRows(ts_len7) + + tdSql.query(f" select * from tts1 where ts1 < now-{interval_day1-1}d ") + tdSql.checkRows(ts_len8) + tdSql.query(f" select * from tts1 where ts2 < now-{(interval_day1-1)*1440}m ") + tdSql.checkRows(ts_len8) + + tdSql.query(f" select * from tts1 where ts1 > now-{interval_day2 + 1}d ") + tdSql.checkRows(ts_len9) + tdSql.query(f" select * from tts1 where ts2 > now-{(interval_day2 + 1)*1440}m ") + tdSql.checkRows(ts_len9) + + tdSql.query(f" select * from tts1 where ts1 < now-{interval_day2 + 1}d ") + tdSql.checkRows(ts_len10) + tdSql.query(f" select * from tts1 where ts2 < now-{(interval_day2 + 1)*1440}m ") + tdSql.checkRows(ts_len10) + + tdSql.query(f" select * from tts1 where ts1 > now-{interval_day2 - 1}d ") + tdSql.checkRows(ts_len11) + tdSql.query(f" select * from tts1 where ts2 > now-{(interval_day2 - 1)*1440}m ") + tdSql.checkRows(ts_len11) + + tdSql.query(f" select * from tts1 where ts1 < now-{interval_day2 - 1}d ") + tdSql.checkRows(ts_len12) + tdSql.query(f" select * from tts1 where ts2 < now-{(interval_day2 - 1)*1440}m ") + tdSql.checkRows(ts_len12) + + + + def run(self): + tdSql.execute("drop database if exists dbms") + tdSql.execute("drop database if exists dbus") + + # timestamp list: + # 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00" + # -631180800000 -> "1950-01-01 00:00:00" + + tdLog.printNoPrefix("==========step1:create table precision ms && insert data && query") + # create databases precision is ms + tdSql.execute("create database if not exists dbms keep 36500") + tdSql.execute("use dbms") + self.inertnow() + self.querynow() + + tdLog.printNoPrefix("==========step2:create table precision us && insert data && query") + # create databases precision is us + tdSql.execute("create database if not exists dbus keep 36500 precision 'us' ") + tdSql.execute("use dbus") + self.inertnow() + self.querynow() + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + + tdLog.printNoPrefix("==========step3:after wal, query table precision ms") + tdSql.execute("use dbus") + self.querynow() + + tdLog.printNoPrefix("==========step4: query table precision us") + tdSql.execute("use dbus") + self.querynow() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json new file mode 100644 index 0000000000..5e53bd7e7d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 60, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100000, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py new file mode 100644 index 0000000000..1401716da9 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py @@ -0,0 +1,89 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import subprocess +import time +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # # insert 1000w rows in stb0 + os.system("%staosdemo -f tools/taosdemoAllTest/TD-3453/query-interrupt.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0,60) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 6000000) + os.system('%staosdemo -f tools/taosdemoAllTest/TD-3453/queryall.json -y & ' % binPath) + time.sleep(2) + query_pid = int(subprocess.getstatusoutput('ps aux|grep "TD-3453/queryall.json" |grep -v "grep"|awk \'{print $2}\'')[1]) + taosd_cpu_load_1 = float(subprocess.getstatusoutput('top -n 1 -b -p $(ps aux|grep "bin/taosd -c"|grep -v "grep" |awk \'{print $2}\')|awk \'END{print}\' |awk \'{print $9}\'')[1]) + if taosd_cpu_load_1 > 10.0 : + os.system("kill -9 %d" % query_pid) + time.sleep(5) + taosd_cpu_load_2 = float(subprocess.getstatusoutput('top -n 1 -b -p $(ps aux|grep "bin/taosd -c"|grep -v "grep" |awk \'{print $2}\')|awk \'END{print}\' |awk \'{print $9}\'')[1]) + if taosd_cpu_load_2 < 10.0 : + suc_kill = 60 + else: + suc_kill = 10 + print("taosd_cpu_load is higher than 10%") + else: + suc_kill = 20 + print("taosd_cpu_load is still less than 10%") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, "%d" % suc_kill) + os.system("rm -rf querySystemInfo*") + os.system("rm -rf insert_res.txt") + os.system("rm -rf insert_res.txt") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json b/tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json new file mode 100644 index 0000000000..a92906fa73 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json @@ -0,0 +1,20 @@ +{ + "filetype":"query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "specified_table_query":{ + "query_interval":1, + "concurrent":1, + "sqls":[ + { + "sql": "select * from stb0", + "result": "" + } + ] + } +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/convertResFile.py b/tests/pytest/tools/taosdemoAllTest/convertResFile.py new file mode 100644 index 0000000000..52bb8f40d0 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/convertResFile.py @@ -0,0 +1,35 @@ +from datetime import datetime +import time +import os + +os.system("awk -v OFS=',' '{$1=$1;print$0}' ./all_query_res0.txt > ./new_query_res0.txt") +with open('./new_query_res0.txt','r+') as f0: + contents = f0.readlines() + if os.path.exists('./test_query_res0.txt'): + os.system("rm -rf ./test_query_res0.txt") + for i in range(len(contents)): + content = contents[i].rstrip('\n') + stimestamp = content.split(',')[0] + timestamp = int(stimestamp) + d = datetime.fromtimestamp(timestamp/1000) + str0 = d.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + ts = "'"+str0+"'" + str1 = "'"+content.split(',')[1]+"'" + str2 = "'"+content.split(',')[2]+"'" + content = ts + "," + str1 + "," + str2 + "," + content.split(',',3)[3] + contents[i] = content + "\n" + with open('./test_query_res0.txt','a') as fi: + fi.write(contents[i]) + +os.system("rm -rf ./new_query_res0.txt") + + + + + + + +# timestamp = 1604160000099 +# d = datetime.fromtimestamp(timestamp/1000) +# str1 = d.strftime("%Y-%m-%d %H:%M:%S.%f") +# print(str1[:-3]) diff --git a/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json b/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json new file mode 100644 index 0000000000..8e40ad812d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 1, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 200, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":4}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json b/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json new file mode 100644 index 0000000000..e741fd5c05 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-disorder.json b/tests/pytest/tools/taosdemoAllTest/insert-disorder.json new file mode 100644 index 0000000000..fddaa4b4b9 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-disorder.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "": 4, + "result_file":"./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 100, + "timestamp_step": 1000, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count":1, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 1, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 100, + "disorder_range": 1, + "timestamp_step": 1000, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-count-0.json b/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-count-0.json new file mode 100644 index 0000000000..f6a103f001 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-count-0.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 100, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":0}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 2000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-lmax.json b/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-lmax.json new file mode 100644 index 0000000000..17050278c8 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns-lmax.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 100, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1024}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 2000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1004}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns.json b/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns.json new file mode 100644 index 0000000000..53735dc413 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-illegal-columns.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 100, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1005}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 2000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-illegal-tags-count129.json b/tests/pytest/tools/taosdemoAllTest/insert-illegal-tags-count129.json new file mode 100644 index 0000000000..115c42b502 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-illegal-tags-count129.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 100, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db1", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BIGINT", "count":1}, {"type": "float", "count":1}, {"type": "double", "count":1}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":127}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 2000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json b/tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json new file mode 100644 index 0000000000..26e8b7e88d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 150, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 151, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json b/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json new file mode 100644 index 0000000000..c7c5150a06 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 100, + "interlace_rows": 0, + "num_of_records_per_req": 2000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "insert_interval": 2000, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":9}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-newdb.json b/tests/pytest/tools/taosdemoAllTest/insert-newdb.json new file mode 100644 index 0000000000..72e380a66c --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-newdb.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-newtable.json b/tests/pytest/tools/taosdemoAllTest/insert-newtable.json new file mode 100644 index 0000000000..3115c9ba72 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-newtable.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 30, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json b/tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json new file mode 100644 index 0000000000..7fdba4add1 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "dbno", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-offset.json b/tests/pytest/tools/taosdemoAllTest/insert-offset.json new file mode 100644 index 0000000000..611b4a8989 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-offset.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"yes", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"yes", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"yes", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset":7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"yes", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-renewdb.json b/tests/pytest/tools/taosdemoAllTest/insert-renewdb.json new file mode 100644 index 0000000000..72e380a66c --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-renewdb.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-sample.json b/tests/pytest/tools/taosdemoAllTest/insert-sample.json new file mode 100644 index 0000000000..015993227e --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-sample.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file":"./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "dbtest123", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "sample", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/sample.csv", + "tags_file": "", + "columns": [{"type": "INT", "count":3}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}, {"type": "BOOL"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count":2, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "./tools/taosdemoAllTest/tags.csv", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":3}, {"type": "BINARY", "len": 16, "count":2}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insert-timestep.json b/tests/pytest/tools/taosdemoAllTest/insert-timestep.json new file mode 100644 index 0000000000..01d8ac9098 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insert-timestep.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file":"./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count":20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py b/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py new file mode 100644 index 0000000000..703f755c31 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py @@ -0,0 +1,72 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: drop and child_table_exists combination test + # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset + os.system("%staosdemo -f tools/taosdemoAllTest/moredemo-offset-newdb.json" % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/moredemo-offset-limit1.json & " % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/moredemo-offset-limit94.json & " % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/moredemo-offset-limit5.json & " % binPath) + sleep(15) + tdSql.execute("use db") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 1000000) + + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf tools/taosdemoAllTest/taosdemoTestWithJson-1.py.sql") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json new file mode 100644 index 0000000000..ad6cb8118d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json @@ -0,0 +1,62 @@ + +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_limit": 1, + "childtable_offset": 99, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json new file mode 100644 index 0000000000..7109dab53f --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json @@ -0,0 +1,62 @@ + +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_limit": 5, + "childtable_offset": 0, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json new file mode 100644 index 0000000000..a98a185b54 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json @@ -0,0 +1,62 @@ + +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10000, + "childtable_limit": 94, + "childtable_offset": 5, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json new file mode 100644 index 0000000000..e2f3fb0379 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json @@ -0,0 +1,61 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 0, + "childtable_limit": 0, + "childtable_offset": 0, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/sample.csv b/tests/pytest/tools/taosdemoAllTest/sample.csv new file mode 100644 index 0000000000..471118a2ce --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sample.csv @@ -0,0 +1,3 @@ +1,-1,2147483647,0,2247483647.1,-12.2,'12ac,;\[uer]','23ac,;\[uer23423]123123','true' +0,-1,2147483647,0,2247483647.1,-12.2,'12ac,;\[uer]','23ac,;\[uer23423]123123','true' +0,-1,2147483647,0,2247483647.1,-12.2,'12ac,;\[uer]','23ac,;\[uer23423]123123','false' \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/speciQuery.json b/tests/pytest/tools/taosdemoAllTest/speciQuery.json new file mode 100644 index 0000000000..5e99e80108 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/speciQuery.json @@ -0,0 +1,36 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 2, + "specified_table_query": { + "query_interval": 1, + "concurrent": 3, + "sqls": [ + { + "sql": "select last_row(*) from stb0 ", + "result": "./query_res0.txt" + }, + { + "sql": "select count(*) from stb00_1", + "result": "./query_res1.txt" + } + ] + }, + "super_table_query": { + "stblname": "stb1", + "query_interval": 1, + "threads": 3, + "sqls": [ + { + "sql": "select last_row(ts) from xxxx", + "result": "./query_res2.txt" + } + ] + } +} diff --git a/tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json b/tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json new file mode 100644 index 0000000000..ec9cb5a40d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json @@ -0,0 +1,86 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_limit": 0, + "childtable_offset": 0, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 200, + "childtable_limit": 0, + "childtable_offset": 0, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/tags.csv b/tests/pytest/tools/taosdemoAllTest/tags.csv new file mode 100644 index 0000000000..89bf8e3fb3 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/tags.csv @@ -0,0 +1,2 @@ +1,-127,127,'23ac,;\[uer]3','true' +1,-127,126,'23ac,;\[uer]3','true' diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py new file mode 100644 index 0000000000..2dd50bf639 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py @@ -0,0 +1,229 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tnt1r.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 100000) + tdSql.query("select count(*) from stb01_1") + tdSql.checkData(0, 0, 200) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 200000) + + + # insert: create mutiple tables per sql and insert one rows per sql . + os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tntmr.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 20) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 100000) + tdSql.query("select count(*) from stb01_0") + tdSql.checkData(0, 0, 20000) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 400000) + + # insert: using parament "insert_interval to controls spped of insert. + # but We need to have accurate methods to control the speed, such as getting the speed value, checking the count and so on。 + os.system("%staosdemo -f tools/taosdemoAllTest/insert-interval-speed.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 20000) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 2000000) + tdSql.query("show stables") + tdSql.checkData(1, 4, 100) + tdSql.query("select count(*) from stb01_0") + tdSql.checkData(0, 0, 20000) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 2000000) + + # spend 2min30s for 3 testcases. + # insert: drop and child_table_exists combination test + # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset + os.system("%staosdemo -f tools/taosdemoAllTest/insert-nodbnodrop.json -y" % binPath) + tdSql.error("show dbno.stables") + os.system("%staosdemo -f tools/taosdemoAllTest/insert-newdb.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 5) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 6) + tdSql.query("select count (tbname) from stb2") + tdSql.checkData(0, 0, 7) + tdSql.query("select count (tbname) from stb3") + tdSql.checkData(0, 0, 8) + tdSql.query("select count (tbname) from stb4") + tdSql.checkData(0, 0, 8) + os.system("%staosdemo -f tools/taosdemoAllTest/insert-offset.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 50) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 240) + tdSql.query("select count(*) from stb2") + tdSql.checkData(0, 0, 220) + tdSql.query("select count(*) from stb3") + tdSql.checkData(0, 0, 180) + tdSql.query("select count(*) from stb4") + tdSql.checkData(0, 0, 160) + os.system("%staosdemo -f tools/taosdemoAllTest/insert-newtable.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 150) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 360) + tdSql.query("select count(*) from stb2") + tdSql.checkData(0, 0, 360) + tdSql.query("select count(*) from stb3") + tdSql.checkData(0, 0, 340) + tdSql.query("select count(*) from stb4") + tdSql.checkData(0, 0, 400) + os.system("%staosdemo -f tools/taosdemoAllTest/insert-renewdb.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 50) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 120) + tdSql.query("select count(*) from stb2") + tdSql.checkData(0, 0, 140) + tdSql.query("select count(*) from stb3") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from stb4") + tdSql.checkData(0, 0, 160) + + + # insert: let parament in json file is illegal ,i need know how to write exception. + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insert-illegal-columns.json -y " % binPath) + tdSql.error("use db") + os.system("%staosdemo -f tools/taosdemoAllTest/insert-illegal-columns-lmax.json -y " % binPath) + tdSql.error("select * from db.stb0") + os.system("%staosdemo -f tools/taosdemoAllTest/insert-illegal-columns-count-0.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count(*) from db.stb0") + tdSql.checkData(0, 0, 10000) + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insert-illegal-tags-count129.json -y " % binPath) + tdSql.error("use db1") + + # insert: timestamp and step + os.system("%staosdemo -f tools/taosdemoAllTest/insert-timestep.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 20) + tdSql.query("select last(ts) from db.stb00_0") + tdSql.checkData(0, 0, "2020-10-01 00:00:00.019000") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 200) + tdSql.query("select last(ts) from db.stb01_0") + tdSql.checkData(0, 0, "2020-11-01 00:00:00.190000") + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 400) + + # # insert: disorder_ratio + os.system("%staosdemo -f tools/taosdemoAllTest/insert-disorder.json -g 2>&1 -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 1) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 10) + + # insert: sample json + os.system("%staosdemo -f tools/taosdemoAllTest/insert-sample.json -y " % binPath) + tdSql.execute("use dbtest123") + tdSql.query("select col2 from stb0") + tdSql.checkData(0, 0, 2147483647) + tdSql.query("select t1 from stb1") + tdSql.checkData(0, 0, -127) + tdSql.query("select t2 from stb1") + tdSql.checkData(1, 0, 126) + + # insert: test interlace parament + os.system("%staosdemo -f tools/taosdemoAllTest/insert-interlace-row.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count (*) from stb0") + tdSql.checkData(0, 0, 15000) + + + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf tools/taosdemoAllTest/taosdemoTestInsertWithJson.py.sql") + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py new file mode 100644 index 0000000000..00b387e398 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py @@ -0,0 +1,91 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import time +from datetime import datetime + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: drop and child_table_exists combination test + # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset + os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryInsertdata.json" % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/speciQuery.json" % binPath) + os.system("cat query_res0.txt* |sort -u > all_query_res0.txt") + os.system("cat query_res1.txt* |sort -u > all_query_res1.txt") + os.system("cat query_res2.txt* |sort -u > all_query_res2.txt") + tdSql.execute("use db") + tdSql.execute('create table result0 using stb0 tags(121,43,"beijing","beijing","beijing","beijing","beijing")') + os.system("python3 tools/taosdemoAllTest/convertResFile.py") + tdSql.execute("insert into result0 file './test_query_res0.txt'") + tdSql.query("select ts from result0") + tdSql.checkData(0, 0, "2020-11-01 00:00:00.099000") + tdSql.query("select count(*) from result0") + tdSql.checkData(0, 0, 1) + with open('./all_query_res1.txt','r+') as f1: + result1 = int(f1.readline()) + tdSql.query("select count(*) from stb00_1") + tdSql.checkData(0, 0, "%d" % result1) + + with open('./all_query_res2.txt','r+') as f2: + result2 = int(f2.readline()) + d2 = datetime.fromtimestamp(result2/1000) + timest = d2.strftime("%Y-%m-%d %H:%M:%S.%f") + tdSql.query("select last_row(ts) from stb1") + tdSql.checkData(0, 0, "%s" % timest) + + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf tools/taosdemoAllTest/taosdemoTestQuerytWithJson.py.sql") + os.system("rm -rf ./querySystemInfo*") + os.system("rm -rf ./query_res*") + os.system("rm -rf ./all_query*") + os.system("rm -rf ./test_query_res0.txt") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/script/unique/cluster/balance2.sim b/tests/script/unique/cluster/balance2.sim index 026678af7c..0b80acbe6c 100644 --- a/tests/script/unique/cluster/balance2.sim +++ b/tests/script/unique/cluster/balance2.sim @@ -338,10 +338,6 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT print stop dnode1 and sleep 3000 sleep 3000 -sql drop dnode $hostname1 -print drop dnode1 and sleep 9000 -sleep 9000 - sql show mnodes $dnode1Role = $data2_1 $dnode4Role = $data2_4 @@ -357,6 +353,25 @@ endi print ============================== step6.1 system sh/exec.sh -n dnode1 -s start +$x = 0 +step6.1: + $x = $x + 1 + sleep 1000 + if $x == 10 then + return -1 + endi + +sql show dnodes +print dnode1 $data4_1 + +if $data4_1 != ready then + goto step6.1 +endi + +sql drop dnode $hostname1 +print drop dnode1 and sleep 9000 +sleep 9000 + $x = 0 show6: $x = $x + 1 diff --git a/tests/script/unique/dnode/remove1.sim b/tests/script/unique/dnode/remove1.sim index 6f830d2cf8..25e0846129 100644 --- a/tests/script/unique/dnode/remove1.sim +++ b/tests/script/unique/dnode/remove1.sim @@ -97,7 +97,6 @@ if $data2_2 != 3 then endi print ========== step3 -sql drop dnode $hostname2 $x = 0 show3: @@ -114,6 +113,7 @@ print dnode2 openVnodes $data2_2 print ========== step4 sql create dnode $hostname3 system sh/exec.sh -n dnode3 -s start +sql drop dnode $hostname2 $x = 0 show4: @@ -224,4 +224,4 @@ system sh/exec.sh -n dnode4 -s stop -x SIGINT system sh/exec.sh -n dnode5 -s stop -x SIGINT system sh/exec.sh -n dnode6 -s stop -x SIGINT system sh/exec.sh -n dnode7 -s stop -x SIGINT -system sh/exec.sh -n dnode8 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode8 -s stop -x SIGINT diff --git a/tests/script/unique/dnode/remove2.sim b/tests/script/unique/dnode/remove2.sim index ff92ff7eb0..1d707bc4a3 100644 --- a/tests/script/unique/dnode/remove2.sim +++ b/tests/script/unique/dnode/remove2.sim @@ -98,7 +98,6 @@ endi print ========== step3 system sh/exec.sh -n dnode2 -s stop -x SIGINT -sql drop dnode $hostname2 sql show dnodes print dnode1 openVnodes $data2_1 @@ -128,6 +127,26 @@ endi print ============ step 4.1 system sh/exec.sh -n dnode2 -s start +$x = 0 +step4.1: + $x = $x + 1 + sleep 1000 + if $x == 10 then + return -1 + endi + +sql show dnodes +print dnode1 $data4_1 +print dnode2 $data4_2 +print dnode3 $data4_3 +print dnode4 $data4_4 + +if $data4_2 != ready then + goto step4.1 +endi + +sql drop dnode $hostname2 + $x = 0 show4: $x = $x + 1 From 55d531504dc145135bddb1fb770e3096d6ab6845 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Tue, 27 Apr 2021 10:04:46 +0800 Subject: [PATCH 10/14] [TD-850] : describe limitation about calculation between columns. --- documentation20/cn/12.taos-sql/docs.md | 1 + 1 file changed, 1 insertion(+) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index edf36a4da1..e977b8ff7a 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -623,6 +623,7 @@ Query OK, 1 row(s) in set (0.001091s) ``` - 可以使用 * 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名。 + * 暂不支持列与列之间的四则运算(例如,不支持 `select a + 2*b from t;`),而是需要在应用程序中进行处理。 * 暂不支持含列名的四则运算表达式用于条件过滤算子(例如,不支持 `where a*2>6;`,但可以写 `where a>6/2;`)。 * 暂不支持含列名的四则运算表达式作为 SQL 函数的应用对象(例如,不支持 `select min(2*a) from t;`,但可以写 `select 2*min(a) from t;`)。 - WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串。 From 70ac290aa43a7f82f2fa715f1029981bc5776ff4 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 27 Apr 2021 12:24:07 +0800 Subject: [PATCH 11/14] Hotfix/sangshuduo/td 3969 reenable batch create tbl num (#5935) * [TD-3414]: add insert function with json files of taodemo-testcase -repeat * [TD-3879]add stmt mode for taodemo go * [TD-3414]: add insert function with json files of taodemo-testcase * [TD-3918] add case to verify the bug of TD-3897 * [TD-3453]:modify filepath in scripts * Update fulltest.sh * [TD-3918] fix the case * Update queryFilterTswithDateUnit.py * [TD-3850]: fix vnode write enqueue flowctrl UAF & no response * Hotfix/sangshuduo/td 3401 query statistic (#5907) * [TD-3401]: taosdemo query statistic. refactor func name. * [TD-3401]: taosdemo query statistic. refactor func name 2. * [TD-3401]: taosdemo support query statistic. implementation. * cleanup * [TD-3401]: taosdemo query statistic. change us to ms. * [TD-3401]: taosdemo query statistic. increase sql buffer for query. * [TD-3401]: taosdemo query statistic more accurate result. * [TD-3401]: taosdemo query statistic. modify last time logic. Co-authored-by: Shuduo Sang * improve coverage of operations * [TD-3944]: make default offline threshold to 10 days. (#5912) Co-authored-by: Shuduo Sang * [TD-3572]: response out of dnodes if #dnodes <= maxDbReplica * [TD-3937]: add taosdemo performance test compare scripts * update script * Hotfix/sangshuduo/td 3317 for master (#5921) * [TD-3317]: taosdemo interlace insertion. patch for master. * [TD-3317]: taosdemo support interlace mode. adjust remainder rows logic. * [TD-3317]: taosdemo support interlace mode. fix global and stable interlace rows logic. * [TD-3317]: taosdemo support interlace mode. fix 'interlaceRows' is used uninitialized Co-authored-by: Shuduo Sang * [TD-3968]: taosdemo data length should be 16*1024 (#5926) Co-authored-by: Shuduo Sang * [TD-3969]: taosdemo re-enable batch-create-tbl-num Co-authored-by: tomchon Co-authored-by: liuyq-617 Co-authored-by: wu champion Co-authored-by: wu champion Co-authored-by: Minglei Jin Co-authored-by: huili <52318143+plum-lihui@users.noreply.github.com> Co-authored-by: Shengliang Guan Co-authored-by: Shuduo Sang Co-authored-by: Ping Xiao --- src/kit/taosdemo/taosdemo.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 83ecd145a0..0fc8687b22 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -3657,7 +3657,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } - /* cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num"); if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) { g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint; @@ -3667,7 +3666,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { printf("ERROR: failed to read json, batch_create_tbl_num not found\n"); goto PARSE_OVER; } - */ cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no if (childTblExists From aa88942c3868b07dd3d969b595e97a1a236b0fe0 Mon Sep 17 00:00:00 2001 From: Huo Linhe Date: Tue, 27 Apr 2021 13:45:08 +0800 Subject: [PATCH 12/14] [TD-3639] : compare timestamp with datetime in python (#5940) By this commit, It will close TD-3639. It will not require test case to write *correct* datetime string to pass a test case. One test could use seconds, milliseconds or microseconds to check data. That means the three tests are equivalent: ```python .checkData(0, 0, "2020-01-01 00:00:00") .checkData(0, 0, "2020-01-01 00:00:00.000") .checkData(0, 0, "2020-01-01 00:00:00.000000") ``` --- tests/pytest/util/sql.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index ba9cb4d53d..8f62c5932b 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -136,6 +136,11 @@ class TDSql: def checkData(self, row, col, data): self.checkRowCol(row, col) if self.queryResult[row][col] != data: + if self.cursor.istype(col, "TIMESTAMP") and self.queryResult[row][col] == datetime.datetime.fromisoformat(data): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (self.sql, row, col, self.queryResult[row][col], data)) + return + if str(self.queryResult[row][col]) == str(data): tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % (self.sql, row, col, self.queryResult[row][col], data)) From 23e8cd293db513c1faee53cbe1f8aa09c5caa5a6 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Tue, 27 Apr 2021 15:45:14 +0800 Subject: [PATCH 13/14] [TD-2639] : remove ambiguous description about select expression. --- documentation20/cn/12.taos-sql/docs.md | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index e977b8ff7a..5317f871d5 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -414,11 +414,7 @@ SELECT select_expr [, select_expr ...] [>> export_file]; ``` -#### SELECT子句 - -一个选择子句可以是联合查询(UNION)和另一个查询的子查询(SUBQUERY)。 - -##### 通配符 +#### 通配符 通配符 * 可以用于代指全部列。对于普通表,结果中只有普通列。 ```mysql @@ -488,7 +484,7 @@ taos> SELECT FIRST(*) FROM d1001; Query OK, 1 row(s) in set (0.000849s) ``` -##### 标签列 +#### 标签列 从 2.0.14 版本开始,支持在普通表的查询中指定 _标签列_,且标签列的值会与普通列的数据一起返回。 ```mysql From 324f6308fde663aa70cbb329023e9665fbcabc8c Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Tue, 27 Apr 2021 16:20:30 +0800 Subject: [PATCH 14/14] [TD-850] : fix typo. --- documentation20/cn/12.taos-sql/docs.md | 1 - 1 file changed, 1 deletion(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 5317f871d5..04c90748f2 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -619,7 +619,6 @@ Query OK, 1 row(s) in set (0.001091s) ``` - 可以使用 * 返回所有列,或指定列名。可以对数字列进行四则运算,可以给输出的列取列名。 - * 暂不支持列与列之间的四则运算(例如,不支持 `select a + 2*b from t;`),而是需要在应用程序中进行处理。 * 暂不支持含列名的四则运算表达式用于条件过滤算子(例如,不支持 `where a*2>6;`,但可以写 `where a>6/2;`)。 * 暂不支持含列名的四则运算表达式作为 SQL 函数的应用对象(例如,不支持 `select min(2*a) from t;`,但可以写 `select 2*min(a) from t;`)。 - WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串。