diff --git a/docs/examples/c/CMakeLists.txt b/docs/examples/c/CMakeLists.txt new file mode 100644 index 0000000000..f636084bab --- /dev/null +++ b/docs/examples/c/CMakeLists.txt @@ -0,0 +1,101 @@ +PROJECT(TDengine) + +IF (TD_LINUX) + INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc) + AUX_SOURCE_DIRECTORY(. SRC) + + add_executable(docs_connect_example "") + add_executable(docs_create_db_demo "") + add_executable(docs_insert_data_demo "") + add_executable(docs_query_data_demo "") + add_executable(docs_with_reqid_demo "") + add_executable(docs_sml_insert_demo "") + add_executable(docs_stmt_insert_demo "") + add_executable(docs_tmq_demo "") + + target_sources(docs_connect_example + PRIVATE + "connect_example.c" + ) + + target_sources(docs_create_db_demo + PRIVATE + "create_db_demo.c" + ) + + target_sources(docs_insert_data_demo + PRIVATE + "insert_data_demo.c" + ) + + target_sources(docs_query_data_demo + PRIVATE + "query_data_demo.c" + ) + + target_sources(docs_with_reqid_demo + PRIVATE + "with_reqid_demo.c" + ) + + target_sources(docs_sml_insert_demo + PRIVATE + "sml_insert_demo.c" + ) + + target_sources(docs_stmt_insert_demo + PRIVATE + "stmt_insert_demo.c" + ) + + target_sources(docs_tmq_demo + PRIVATE + "tmq_demo.c" + ) + + target_link_libraries(docs_connect_example + taos + ) + + target_link_libraries(docs_create_db_demo + taos + ) + + target_link_libraries(docs_insert_data_demo + taos + ) + + target_link_libraries(docs_query_data_demo + taos + ) + + target_link_libraries(docs_with_reqid_demo + taos + ) + + target_link_libraries(docs_sml_insert_demo + taos + ) + + target_link_libraries(docs_stmt_insert_demo + taos + ) + + target_link_libraries(docs_tmq_demo + taos + pthread + ) + + SET_TARGET_PROPERTIES(docs_connect_example PROPERTIES OUTPUT_NAME docs_connect_example) + SET_TARGET_PROPERTIES(docs_create_db_demo PROPERTIES OUTPUT_NAME docs_create_db_demo) + SET_TARGET_PROPERTIES(docs_insert_data_demo PROPERTIES OUTPUT_NAME docs_insert_data_demo) + SET_TARGET_PROPERTIES(docs_query_data_demo PROPERTIES OUTPUT_NAME docs_query_data_demo) + SET_TARGET_PROPERTIES(docs_with_reqid_demo PROPERTIES OUTPUT_NAME docs_with_reqid_demo) + SET_TARGET_PROPERTIES(docs_sml_insert_demo PROPERTIES OUTPUT_NAME docs_sml_insert_demo) + SET_TARGET_PROPERTIES(docs_stmt_insert_demo PROPERTIES OUTPUT_NAME docs_stmt_insert_demo) + SET_TARGET_PROPERTIES(docs_tmq_demo PROPERTIES OUTPUT_NAME docs_tmq_demo) +ENDIF () +IF (TD_DARWIN) + INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc) + AUX_SOURCE_DIRECTORY(. SRC) +ENDIF () diff --git a/docs/examples/c/Makefile b/docs/examples/c/Makefile new file mode 100644 index 0000000000..9fda575ec6 --- /dev/null +++ b/docs/examples/c/Makefile @@ -0,0 +1,34 @@ +# Makefile for building TDengine examples on TD Linux platform + +INCLUDE_DIRS = + +TARGETS = connect_example \ + create_db_demo \ + insert_data_demo \ + query_data_demo \ + with_reqid_demo \ + sml_insert_demo \ + stmt_insert_demo \ + tmq_demo + +SOURCES = connect_example.c \ + create_db_demo.c \ + insert_data_demo.c \ + query_data_demo.c \ + with_reqid_demo.c \ + sml_insert_demo.c \ + stmt_insert_demo.c \ + tmq_demo.c + +LIBS = -ltaos -lpthread + + +CFLAGS = -g + +all: $(TARGETS) + +$(TARGETS): + $(CC) $(CFLAGS) -o $@ $(wildcard $(@F).c) $(LIBS) + +clean: + rm -f $(TARGETS) \ No newline at end of file diff --git a/docs/zh/06-advanced/05-data-in/03-pi.md b/docs/zh/06-advanced/05-data-in/03-pi.md index 6b65b1337f..8a2c5ada35 100644 --- a/docs/zh/06-advanced/05-data-in/03-pi.md +++ b/docs/zh/06-advanced/05-data-in/03-pi.md @@ -27,7 +27,7 @@ PI 系统是一套用于数据收集、查找、分析、传递和可视化的 在数据写入页面中,点击 **+新增数据源** 按钮,进入新增数据源页面。 -![kafka-01.png](./kafka-01.png) +![new.png](./pic/pi-01-new.png) ### 基本配置 diff --git a/docs/zh/06-advanced/05-data-in/07-mqtt.md b/docs/zh/06-advanced/05-data-in/07-mqtt.md index af99cd3621..f54086b61b 100644 --- a/docs/zh/06-advanced/05-data-in/07-mqtt.md +++ b/docs/zh/06-advanced/05-data-in/07-mqtt.md @@ -33,13 +33,14 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T ### 3. 配置连接和认证信息 -在 **MQTT地址** 中填写 MQTT 代理的地址,例如:`192.168.1.42:1883` +在 **MQTT 地址** 中填写 MQTT 代理的地址,例如:`192.168.1.42` + +在 **MQTT 端口** 中填写 MQTT 代理的端口,例如:`1883` 在 **用户** 中填写 MQTT 代理的用户名。 在 **密码** 中填写 MQTT 代理的密码。 -点击 **连通性检查** 按钮,检查数据源是否可用。 ![mqtt-03.png](./mqtt-03.png) @@ -64,6 +65,8 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T 在 **订阅主题及 QoS 配置** 中填写要消费的 Topic 名称。使用如下格式设置: `topic1::0,topic2::1`。 +点击 **检查连通性** 按钮,检查数据源是否可用。 + ![mqtt-05.png](./mqtt-05.png) ### 6. 配置 MQTT Payload 解析 diff --git a/docs/zh/06-advanced/05-data-in/08-kafka.md b/docs/zh/06-advanced/05-data-in/08-kafka.md index 837aa8d8fb..8cca24930e 100644 --- a/docs/zh/06-advanced/05-data-in/08-kafka.md +++ b/docs/zh/06-advanced/05-data-in/08-kafka.md @@ -102,7 +102,7 @@ kcat \ 在 **主题** 中填写要消费的 Topic 名称。可以配置多个 Topic , Topic 之间用逗号分隔。例如:`tp1,tp2`。 -在 **Client ID** 中填写客户端标识,填写后会生成带有 `taosx` 前缀的客户端 ID (例如,如果填写的标识为 `foo`,则生成的客户端 ID 为 `taosxfoo`)。如果打开末尾处的开关,则会把当前任务的任务 ID 拼接到 `taosx` 之后,输入的标识之前(生成的客户端 ID 形如 `taosx100foo`)。连接到同一个 Kafka 集群的所有客户端 ID 必须保证唯一。 +在 **Client ID** 中填写客户端标识,填写后会生成带有 `taosx` 前缀的客户端 ID (例如,如果填写的标识为 `foo`,则生成的客户端 ID 为 `taosxfoo`)。如果打开末尾处的开关,则会把当前任务的任务 ID 拼接到 `taosx` 之后,输入的标识之前(生成的客户端 ID 形如 `taosx100foo`)。需要注意的是,当使用多个 taosX 订阅同一 Topic 需要进行负载均衡时,必须填写一致的客户端 ID 才能达到均衡效果。 在 **消费者组 ID** 中填写消费者组标识,填写后会生成带有 `taosx` 前缀的消费者组 ID (例如,如果填写的标识为 `foo`,则生成的消费者组 ID 为 `taosxfoo`)。如果打开末尾处的开关,则会把当前任务的任务 ID 拼接到 `taosx` 之后,输入的标识之前(生成的消费者组 ID 形如 `taosx100foo`)。 diff --git a/docs/zh/06-advanced/05-data-in/csv-03.png b/docs/zh/06-advanced/05-data-in/csv-03.png index 4165469db5..1e0bd97a51 100644 Binary files a/docs/zh/06-advanced/05-data-in/csv-03.png and b/docs/zh/06-advanced/05-data-in/csv-03.png differ diff --git a/docs/zh/06-advanced/05-data-in/migrate-step2.png b/docs/zh/06-advanced/05-data-in/migrate-step2.png index 54412d0536..03cfa205a2 100644 Binary files a/docs/zh/06-advanced/05-data-in/migrate-step2.png and b/docs/zh/06-advanced/05-data-in/migrate-step2.png differ diff --git a/docs/zh/06-advanced/05-data-in/migrate-step3.png b/docs/zh/06-advanced/05-data-in/migrate-step3.png index 17b3024ca7..8ebfae8d1e 100644 Binary files a/docs/zh/06-advanced/05-data-in/migrate-step3.png and b/docs/zh/06-advanced/05-data-in/migrate-step3.png differ diff --git a/docs/zh/06-advanced/05-data-in/migrate-step4.png b/docs/zh/06-advanced/05-data-in/migrate-step4.png index e6b88ed080..273e588def 100644 Binary files a/docs/zh/06-advanced/05-data-in/migrate-step4.png and b/docs/zh/06-advanced/05-data-in/migrate-step4.png differ diff --git a/docs/zh/06-advanced/05-data-in/mqtt-02.png b/docs/zh/06-advanced/05-data-in/mqtt-02.png index 5c25b27c67..d2d813144d 100644 Binary files a/docs/zh/06-advanced/05-data-in/mqtt-02.png and b/docs/zh/06-advanced/05-data-in/mqtt-02.png differ diff --git a/docs/zh/06-advanced/05-data-in/mqtt-03.png b/docs/zh/06-advanced/05-data-in/mqtt-03.png index c53da8f14d..292fed0d1a 100644 Binary files a/docs/zh/06-advanced/05-data-in/mqtt-03.png and b/docs/zh/06-advanced/05-data-in/mqtt-03.png differ diff --git a/docs/zh/06-advanced/05-data-in/mqtt-04.png b/docs/zh/06-advanced/05-data-in/mqtt-04.png index 430c5b8fef..e099875679 100644 Binary files a/docs/zh/06-advanced/05-data-in/mqtt-04.png and b/docs/zh/06-advanced/05-data-in/mqtt-04.png differ diff --git a/docs/zh/06-advanced/05-data-in/mqtt-05.png b/docs/zh/06-advanced/05-data-in/mqtt-05.png index d362e8be86..2b4cfcabb3 100644 Binary files a/docs/zh/06-advanced/05-data-in/mqtt-05.png and b/docs/zh/06-advanced/05-data-in/mqtt-05.png differ diff --git a/docs/zh/06-advanced/05-data-in/mqtt-14.png b/docs/zh/06-advanced/05-data-in/mqtt-14.png index b1fe456354..0388d8a705 100644 Binary files a/docs/zh/06-advanced/05-data-in/mqtt-14.png and b/docs/zh/06-advanced/05-data-in/mqtt-14.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-02zh-SelectTheTypeAsInfluxDB.png b/docs/zh/06-advanced/05-data-in/pic/InfluxDB-02zh-SelectTheTypeAsInfluxDB.png index 71393ec24e..d9b806926e 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-02zh-SelectTheTypeAsInfluxDB.png and b/docs/zh/06-advanced/05-data-in/pic/InfluxDB-02zh-SelectTheTypeAsInfluxDB.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-03zh-FillInTheConnectionInformation.png b/docs/zh/06-advanced/05-data-in/pic/InfluxDB-03zh-FillInTheConnectionInformation.png index 70c1ffe89e..9fbc6dccaa 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-03zh-FillInTheConnectionInformation.png and b/docs/zh/06-advanced/05-data-in/pic/InfluxDB-03zh-FillInTheConnectionInformation.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-04zh-SelectVersion1.x.png b/docs/zh/06-advanced/05-data-in/pic/InfluxDB-04zh-SelectVersion1.x.png index d3542a38a1..836b566ec4 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-04zh-SelectVersion1.x.png and b/docs/zh/06-advanced/05-data-in/pic/InfluxDB-04zh-SelectVersion1.x.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-05zh-SelectVersion2.x.png b/docs/zh/06-advanced/05-data-in/pic/InfluxDB-05zh-SelectVersion2.x.png index 161705aa44..1ebf13601c 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-05zh-SelectVersion2.x.png and b/docs/zh/06-advanced/05-data-in/pic/InfluxDB-05zh-SelectVersion2.x.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-08zh-GetSchemaAndSelectOneBucket.png b/docs/zh/06-advanced/05-data-in/pic/InfluxDB-08zh-GetSchemaAndSelectOneBucket.png index 21714b6b51..55bee503a0 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-08zh-GetSchemaAndSelectOneBucket.png and b/docs/zh/06-advanced/05-data-in/pic/InfluxDB-08zh-GetSchemaAndSelectOneBucket.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-10zh-AdvancedOptionsExpand.png b/docs/zh/06-advanced/05-data-in/pic/InfluxDB-10zh-AdvancedOptionsExpand.png index 02c3f73629..dbb188852c 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/InfluxDB-10zh-AdvancedOptionsExpand.png and b/docs/zh/06-advanced/05-data-in/pic/InfluxDB-10zh-AdvancedOptionsExpand.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-02zh-SelectTheTypeAsOpenTSDB.png b/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-02zh-SelectTheTypeAsOpenTSDB.png index 7b039fbe77..9d2569880b 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-02zh-SelectTheTypeAsOpenTSDB.png and b/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-02zh-SelectTheTypeAsOpenTSDB.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-03zh-FillInTheConnectionInformation.png b/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-03zh-FillInTheConnectionInformation.png index 25f5aaca83..8b3bbd1ffa 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-03zh-FillInTheConnectionInformation.png and b/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-03zh-FillInTheConnectionInformation.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-06zh-GetAndSelectMetrics.png b/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-06zh-GetAndSelectMetrics.png index 447ba589cc..18101c256b 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-06zh-GetAndSelectMetrics.png and b/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-06zh-GetAndSelectMetrics.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-08zh-AdvancedOptionsExpand.png b/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-08zh-AdvancedOptionsExpand.png index 1c57bc19af..ea5dc538e5 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-08zh-AdvancedOptionsExpand.png and b/docs/zh/06-advanced/05-data-in/pic/OpenTSDB-08zh-AdvancedOptionsExpand.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-02.png b/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-02.png index edbaff6595..8fa2419c56 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-02.png and b/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-02.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-03.png b/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-03.png index 3183a02c26..3645668e3b 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-03.png and b/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-03.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-04.png b/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-04.png index 15a126fe46..776e37b153 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-04.png and b/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-04.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-05.png b/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-05.png index 12380aa4b6..1abe21436e 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-05.png and b/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-05.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-06.png b/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-06.png index 9a1533fe4d..74ffb467d2 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-06.png and b/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-06.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-08.png b/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-08.png index d941b9b700..234f2805bc 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-08.png and b/docs/zh/06-advanced/05-data-in/pic/avevaHistorian-08.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/opcda-01-basic.png b/docs/zh/06-advanced/05-data-in/pic/opcda-01-basic.png index 4c8808fd10..bcb7566cee 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/opcda-01-basic.png and b/docs/zh/06-advanced/05-data-in/pic/opcda-01-basic.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/opcda-02-endpoint.png b/docs/zh/06-advanced/05-data-in/pic/opcda-02-endpoint.png index d58e59f38a..f732fe633f 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/opcda-02-endpoint.png and b/docs/zh/06-advanced/05-data-in/pic/opcda-02-endpoint.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/opcda-03-connect.png b/docs/zh/06-advanced/05-data-in/pic/opcda-03-connect.png deleted file mode 100644 index 323e3adc6e..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/opcda-03-connect.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/opcda-05-csv.png b/docs/zh/06-advanced/05-data-in/pic/opcda-05-csv.png deleted file mode 100644 index 8286355c0d..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/opcda-05-csv.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/opcua-01-basic.png b/docs/zh/06-advanced/05-data-in/pic/opcua-01-basic.png index dc2c994b2e..38bde91ce4 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/opcua-01-basic.png and b/docs/zh/06-advanced/05-data-in/pic/opcua-01-basic.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/opcua-02-endpoint.png b/docs/zh/06-advanced/05-data-in/pic/opcua-02-endpoint.png index 0901431eea..53c67c47d2 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/opcua-02-endpoint.png and b/docs/zh/06-advanced/05-data-in/pic/opcua-02-endpoint.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/opcua-03-connect.png b/docs/zh/06-advanced/05-data-in/pic/opcua-03-connect.png deleted file mode 100644 index 5654c99f9a..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/opcua-03-connect.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/opcua-04-auth.png b/docs/zh/06-advanced/05-data-in/pic/opcua-04-auth.png index 51911fe88a..0fda46face 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/opcua-04-auth.png and b/docs/zh/06-advanced/05-data-in/pic/opcua-04-auth.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/opcua-05-csv.png b/docs/zh/06-advanced/05-data-in/pic/opcua-05-csv.png deleted file mode 100644 index 40c1149c68..0000000000 Binary files a/docs/zh/06-advanced/05-data-in/pic/opcua-05-csv.png and /dev/null differ diff --git a/docs/zh/06-advanced/05-data-in/pic/opcua-07-advance.png b/docs/zh/06-advanced/05-data-in/pic/opcua-07-advance.png index 6a96573fae..22cb89a6da 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/opcua-07-advance.png and b/docs/zh/06-advanced/05-data-in/pic/opcua-07-advance.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/pi-01-agent.png b/docs/zh/06-advanced/05-data-in/pic/pi-01-agent.png index 4ea3274ea5..e5a3b19880 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/pi-01-agent.png and b/docs/zh/06-advanced/05-data-in/pic/pi-01-agent.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/pi-01-new.png b/docs/zh/06-advanced/05-data-in/pic/pi-01-new.png new file mode 100644 index 0000000000..386b2462ee Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/pic/pi-01-new.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/pi-02-connect-archive-only.png b/docs/zh/06-advanced/05-data-in/pic/pi-02-connect-archive-only.png index dc530cb9ba..2f210b0c9d 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/pi-02-connect-archive-only.png and b/docs/zh/06-advanced/05-data-in/pic/pi-02-connect-archive-only.png differ diff --git a/docs/zh/06-advanced/05-data-in/pic/pi-03-connect-af.png b/docs/zh/06-advanced/05-data-in/pic/pi-03-connect-af.png index e38fffc4b6..8b62c36574 100644 Binary files a/docs/zh/06-advanced/05-data-in/pic/pi-03-connect-af.png and b/docs/zh/06-advanced/05-data-in/pic/pi-03-connect-af.png differ diff --git a/docs/zh/06-advanced/05-data-in/tmq-step2.png b/docs/zh/06-advanced/05-data-in/tmq-step2.png index 01dded1372..d0e158f883 100644 Binary files a/docs/zh/06-advanced/05-data-in/tmq-step2.png and b/docs/zh/06-advanced/05-data-in/tmq-step2.png differ diff --git a/docs/zh/06-advanced/05-data-in/tmq-step3.png b/docs/zh/06-advanced/05-data-in/tmq-step3.png index 67461efe17..6d7abeaa95 100644 Binary files a/docs/zh/06-advanced/05-data-in/tmq-step3.png and b/docs/zh/06-advanced/05-data-in/tmq-step3.png differ diff --git a/docs/zh/06-advanced/05-data-in/tmq-step4.png b/docs/zh/06-advanced/05-data-in/tmq-step4.png index 0ec896de3e..f55856f28f 100644 Binary files a/docs/zh/06-advanced/05-data-in/tmq-step4.png and b/docs/zh/06-advanced/05-data-in/tmq-step4.png differ diff --git a/docs/zh/06-advanced/05-data-in/tmq-step5.png b/docs/zh/06-advanced/05-data-in/tmq-step5.png index 568658bdaf..336f888d02 100644 Binary files a/docs/zh/06-advanced/05-data-in/tmq-step5.png and b/docs/zh/06-advanced/05-data-in/tmq-step5.png differ diff --git a/docs/zh/14-reference/01-components/04-taosx.md b/docs/zh/14-reference/01-components/04-taosx.md index ce372a8007..114a6b1ce5 100644 --- a/docs/zh/14-reference/01-components/04-taosx.md +++ b/docs/zh/14-reference/01-components/04-taosx.md @@ -239,40 +239,45 @@ d4,2017-07-14T10:40:00.006+08:00,-2.740636,10,-0.893545,7,California.LosAngles - `plugins_home`:外部数据源连接器所在目录。 - `data_dir`:数据文件存放目录。 -- `logs_home`:日志文件存放目录,`taosX` 日志文件的前缀为 `taosx.log`,外部数据源有自己的日志文件名前缀。 -- `log_level`:日志等级,可选级别包括 `error`、`warn`、`info`、`debug`、`trace`,默认值为 `info`。 -- `log_keep_days`:日志的最大存储天数,`taosX` 日志将按天划分为不同的文件。 +- `instanceId`:当前 taosX 服务的实例 ID,如果同一台机器上启动了多个 taosX 实例,必须保证各个实例的实例 ID 互不相同。 +- `logs_home`:日志文件存放目录,`taosX` 日志文件的前缀为 `taosx.log`,外部数据源有自己的日志文件名前缀。已弃用,请使用 `log.path` 代替。 +- `log_level`:日志等级,可选级别包括 `error`、`warn`、`info`、`debug`、`trace`,默认值为 `info`。已弃用,请使用 `log.level` 代替。 +- `log_keep_days`:日志的最大存储天数,`taosX` 日志将按天划分为不同的文件。已弃用,请使用 `log.keepDays` 代替。 - `jobs`:每个运行时的最大线程数。在服务模式下,线程总数为 `jobs*2`,默认线程数为`当前服务器内核*2`。 - `serve.listen`:是 `taosX` REST API 监听地址,默认值为 `0.0.0.0:6050`。 - `serve.database_url`:`taosX` 数据库的地址,格式为 `sqlite:`。 +- `serve.request_timeout`:全局接口 API 超时时间。 - `monitor.fqdn`:`taosKeeper` 服务的 FQDN,没有默认值,置空则关闭监控功能。 - `monitor.port`:`taosKeeper` 服务的端口,默认`6043`。 - `monitor.interval`:向 `taosKeeper` 发送指标的频率,默认为每 10 秒一次,只有 1 到 10 之间的值才有效。 +- `log.path`:日志文件存放的目录。 +- `log.level`:日志级别,可选值为 "error", "warn", "info", "debug", "trace"。 +- `log.compress`:日志文件滚动后的文件是否进行压缩。 +- `log.rotationCount`:日志文件目录下最多保留的文件数,超出数量的旧文件被删除。 +- `log.rotationSize`:触发日志文件滚动的文件大小(单位为字节),当日志文件超出此大小后会生成一个新文件,新的日志会写入新文件。 +- `log.reservedDiskSize`:日志所在磁盘停止写入日志的阈值(单位为字节),当磁盘剩余空间达到此大小后停止写入日志。 +- `log.keepDays`:日志文件保存的天数,超过此天数的旧日志文件会被删除。 +- `log.watching`:是否对日志文件中 `log.loggers` 配置内容的变更进行监听并尝试重载。 +- `log.loggers`:指定模块的日志输出级别,格式为 `"modname" = "level"`,同时适配 tracing 库语法,可以根据 `modname[span{field=value}]=level`,其中 `level` 为日志级别。 如下所示: ```toml -# plugins home -#plugins_home = "/usr/local/taos/plugins" # on linux/macOS -#plugins_home = "C:\\TDengine\\plugins" # on windows - # data dir #data_dir = "/var/lib/taos/taosx" # on linux/macOS #data_dir = "C:\\TDengine\\data\\taosx" # on windows -# logs home -#logs_home = "/var/log/taos" # on linux/macOS -#logs_home = "C:\\TDengine\\log" # on windows - -# log level: off/error/warn/info/debug/trace -#log_level = "info" - -# log keep days -#log_keep_days = 30 - -# number of jobs, default to 0, will use `jobs` number of works for TMQ +# number of threads used for tokio workers, default to 0 (means cores * 2) #jobs = 0 +# enable OpenTelemetry tracing and metrics exporter +#otel = false + +# server instance id +# +# The instanceId of each instance is unique on the host +# instanceId = 16 + [serve] # listen to ip:port address #listen = "0.0.0.0:6050" @@ -280,13 +285,66 @@ d4,2017-07-14T10:40:00.006+08:00,-2.740636,10,-0.893545,7,California.LosAngles # database url #database_url = "sqlite:taosx.db" +# default global request timeout which unit is second. This parameter takes effect for certain interfaces that require a timeout setting +#request_timeout = 30 + [monitor] # FQDN of taosKeeper service, no default value #fqdn = "localhost" -# port of taosKeeper service, default 6043 + +# Port of taosKeeper service, default 6043 #port = 6043 -# how often to send metrics to taosKeeper, default every 10 seconds. Only value from 1 to 10 is valid. + +# How often to send metrics to taosKeeper, default every 10 seconds. Only value from 1 to 10 is valid. #interval = 10 + + +# log configuration +[log] +# All log files are stored in this directory +# +#path = "/var/log/taos" # on linux/macOS +#path = "C:\\TDengine\\log" # on windows + +# log filter level +# +#level = "info" + +# Compress archived log files or not +# +#compress = false + +# The number of log files retained by the current explorer server instance in the `path` directory +# +#rotationCount = 30 + +# Rotate when the log file reaches this size +# +#rotationSize = "1GB" + +# Log downgrade when the remaining disk space reaches this size, only logging `ERROR` level logs +# +#reservedDiskSize = "1GB" + +# The number of days log files are retained +# +#keepDays = 30 + +# Watching the configuration file for log.loggers changes, default to true. +# +#watching = true + +# Customize the log output level of modules, and changes will be applied after modifying the file when log.watching is enabled +# +# ## Examples: +# +# crate = "error" +# crate::mod1::mod2 = "info" +# crate::span[field=value] = "warn" +# +[log.loggers] +#"actix_server::accept" = "warn" +#"taos::query" = "warn" ``` ### 启动 @@ -451,6 +509,16 @@ taosX 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeep | written_blocks | 本次运行此任务写人成功的 raw block 数 | | failed_blocks | 本次运行此任务写入失败的 raw block 数 | +### Kafka 数据源相关指标 + +| 字段 | 描述 | +| ----------------------------- | ---------------------------- | +| kafka_consumers | 本次运行任务 Kafka 消费者数 | +| kafka_total_partitions | Kafka 主题总分区数 | +| kafka_consuming_partitions | 本次运行任务正在消费的分区数 | +| kafka_consumed_messages | 本次运行任务已经消费的消息数 | +| total_kafka_consumed_messages | 累计消费的消息总数 | + ## taosX 数据解析插件 接入 kafka / mqtt 消息中间件时,需要对原始数据进行解析,如果使用 json/regex 等模式解析器无法满足解析需求,同时 UDT(自定义解析脚本) 也无法满足性能要求时,可以自定义数据解析插件。 diff --git a/docs/zh/14-reference/01-components/05-taosx-agent.md b/docs/zh/14-reference/01-components/05-taosx-agent.md index da1c395b3d..bf2e6f7e78 100644 --- a/docs/zh/14-reference/01-components/05-taosx-agent.md +++ b/docs/zh/14-reference/01-components/05-taosx-agent.md @@ -11,18 +11,69 @@ sidebar_label: taosX-Agent - `endpoint`: 必填,`taosX` 的 GRPC 服务地址。 - `token`: 必填,在 `Explorer` 上创建 `Agent` 时,产生的 Token。 +- `instanceId`:当前 taosx-agent 服务的实例 ID,如果同一台机器上启动了多个 taosx-agent 实例,必须保证各个实例的实例 ID 互不相同。 - `compression`: 非必填,可配置为 `ture` 或 `false`, 默认为 `false`。配置为`true`, 则开启 `Agent` 和 `taosX` 通信数据压缩。 -- `log_level`: 非必填,日志级别,默认为 `info`, 同 `taosX` 一样,支持 `error`,`warn`,`info`,`debug`,`trace` 五级。 -- `log_keep_days`:非必填,日志保存天数,默认为 `30` 天。 +- `log_level`: 非必填,日志级别,默认为 `info`, 同 `taosX` 一样,支持 `error`,`warn`,`info`,`debug`,`trace` 五级。已弃用,请使用 `log.level` 代替。 +- `log_keep_days`:非必填,日志保存天数,默认为 `30` 天。已弃用,请使用 `log.keepDays` 代替。 +- `log.path`:日志文件存放的目录。 +- `log.level`:日志级别,可选值为 "error", "warn", "info", "debug", "trace"。 +- `log.compress`:日志文件滚动后的文件是否进行压缩。 +- `log.rotationCount`:日志文件目录下最多保留的文件数,超出数量的旧文件被删除。 +- `log.rotationSize`:触发日志文件滚动的文件大小(单位为字节),当日志文件超出此大小后会生成一个新文件,新的日志会写入新文件。 +- `log.reservedDiskSize`:日志所在磁盘停止写入日志的阈值(单位为字节),当磁盘剩余空间达到此大小后停止写入日志。 +- `log.keepDays`:日志文件保存的天数,超过此天数的旧日志文件会被删除。 如下所示: ```TOML -endpoint = "grpc://:6055" -token = "" -compression = true -log_level = "info" -log_keep_days = 30 +# taosX service endpoint +# +#endpoint = "http://localhost:6055" + +# !important! +# Uncomment it and copy-paste the token generated in Explorer. +# +#token = "" + +# server instance id +# +# The instanceId of each instance is unique on the host +# instanceId = 48 + +# enable communication data compression between Agent and taosX +# +#compression = true + +# log configuration +[log] +# All log files are stored in this directory +# +#path = "/var/log/taos" # on linux/macOS +#path = "C:\\TDengine\\log" # on windows + +# log filter level +# +#level = "info" + +# Compress archived log files or not +# +#compress = false + +# The number of log files retained by the current explorer server instance in the `path` directory +# +#rotationCount = 30 + +# Rotate when the log file reaches this size +# +#rotationSize = "1GB" + +# Log downgrade when the remaining disk space reaches this size, only logging `ERROR` level logs +# +#reservedDiskSize = "1GB" + +# The number of days log files are retained +# +#keepDays = 30 ``` 您不必对配置文件如何设置感到疑惑,阅读并跟随 `Explorer` 中创建 `Agent` 的提示进行操作,您可以对配置文件进行查看、修改和检查。 diff --git a/docs/zh/14-reference/01-components/07-explorer.md b/docs/zh/14-reference/01-components/07-explorer.md index 6a8972deea..c63bc703e2 100644 --- a/docs/zh/14-reference/01-components/07-explorer.md +++ b/docs/zh/14-reference/01-components/07-explorer.md @@ -15,36 +15,111 @@ taosEexplorer 无需单独安装,从 TDengine 3.3.0.0 版本开始,它随着 在启动 taosExplorer 之前,请确保配置文件中的内容正确。 ```TOML -# listen port +# This is a automacically generated configuration file for Explorer in [TOML](https://toml.io/) format. +# +# Here is a full list of available options. + +# Explorer server port to listen on. +# Default is 6060. +# port = 6060 -# listen address for IPv4 +# IPv4 listen address. +# Default is 0.0.0.0 addr = "0.0.0.0" -# listen address for IPv4 -#ipv6 = "::1" +# IPv6 listen address. -# log level. Possible: error,warn,info,debug,trace +# ipv6 = "::1" + +# explorer server instance id +# +# The instanceId of each instance is unique on the host +# instanceId = 1 + +# Explorer server log level. +# Default is "info" +# +# Deprecated: use log.level instead log_level = "info" -# taosAdapter address. +# All data files are stored in this directory +# data_dir = "/var/lib/taos/explorer" # Default for Linux +# data_dir = "C:\\TDengine\\data\\explorer" # Default for Windows + +# REST API endpoint to connect to the cluster. +# This configuration is also the target for data migration tasks. +# +# Default is "http://localhost:6041" - the default endpoint for REST API. +# cluster = "http://localhost:6041" -# taosX gRPC address +# native endpoint to connect to the cluster. +# Default is disabled. To enable it, set it to the native API URL like "taos://localhost:6030" and uncomment it. +# If you enable it, you will get more performance for data migration tasks. +# +# cluster_native = "taos://localhost:6030" + +# API endpoint for data replication/backup/data sources. No default option. +# Set it to API URL like "http://localhost:6050". +# x_api = "http://localhost:6050" # GRPC endpoint for "Agent"s. +# Default is "http://localhost:6055" - the default endpoint for taosX grpc API. +# You should set it to public IP or FQDN name like: +# "http://192.168.111.111:6055" or "http://node1.company.domain:6055" and +# ensure to add the port to the exception list of the firewall if it enabled. grpc = "http://localhost:6055" # CORS configuration switch, it allows cross-origin access -cors = false +cors = true -# Enable ssl: if the following two files exist, enable ssl protocol +# Enable ssl +# If the following two files exist, enable ssl protocol +# [ssl] + # SSL certificate -#certificate = "/path/to/ca.file" +# +# certificate = "/path/to/ca.file" # on linux/macOS +# certificate = "C:\\path\\to\\ca.file" # on windows + # SSL certificate key -#certificate_key = "/path/to/key.file" +# +# certificate_key = "/path/to/key.file" # on linux/macOS +# certificate_key = "C:\\path\\to\\key.file" # on windows + +# log configuration +[log] +# All log files are stored in this directory +# +# path = "/var/log/taos" # on linux/macOS +# path = "C:\\TDengine\\log" # on windows + +# log filter level +# +# level = "info" + +# Compress archived log files or not +# +# compress = false + +# The number of log files retained by the current explorer server instance in the `path` directory +# +# rotationCount = 30 + +# Rotate when the log file reaches this size +# +# rotationSize = "1GB" + +# Log downgrade when the remaining disk space reaches this size, only logging `ERROR` level logs +# +# reservedDiskSize = "1GB" + +# The number of days log files are retained +# +# keepDays = 30 ``` 说明: @@ -52,13 +127,23 @@ cors = false - `port`:taosExplorer 服务绑定的端口。 - `addr`:taosExplorer 服务绑定的 IPv4 地址,默认为 `0.0.0.0`。如需修改,请配置为 `localhost` 之外的地址以对外提供服务。 - `ipv6`:taosExplorer 服务绑定的 IPv6 地址,默认不绑定 IPv6 地址。 -- `log_level`:日志级别,可选值为 "error", "warn", "info", "debug", "trace"。 +- `instanceId`:当前 explorer 服务的实例 ID,如果同一台机器上启动了多个 explorer 实例,必须保证各个实例的实例 ID 互不相同。 +- `log_level`:日志级别,可选值为 "error", "warn", "info", "debug", "trace"。此参数已弃用,请使用 `log.level` 代替。 - `cluster`:TDengine 集群的 taosAdapter 地址。 +- `cluster_native`:TDengine 集群的原生连接地址,默认关闭。 - `x_api`:taosX 的 gRPC 地址。 -- `grpc`: taosX 代理向 taosX 建立连接的 gRPC 地址. +- `grpc`:taosX 代理向 taosX 建立连接的 gRPC 地址。 - `cors`:CORS 配置开关,默认为 `false`。当为 `true` 时,允许跨域访问。 -- `ssl.certificate`: SSL 证书(如果同时设置了 certificate 与 certificate_key 两个参数,则启用 HTTPS 服务,否则不启用)。 -- `ssl.certificate_key`: SSL 证书密钥。 +- `ssl.certificate`:SSL 证书(如果同时设置了 certificate 与 certificate_key 两个参数,则启用 HTTPS 服务,否则不启用)。 +- `ssl.certificate_key`:SSL 证书密钥。 +- `log.path`:日志文件存放的目录。 +- `log.level`:日志级别,可选值为 "error", "warn", "info", "debug", "trace"。 +- `log.compress`:日志文件滚动后的文件是否进行压缩。 +- `log.rotationCount`:日志文件目录下最多保留的文件数,超出数量的旧文件被删除。 +- `log.rotationSize`:触发日志文件滚动的文件大小(单位为字节),当日志文件超出此大小后会生成一个新文件,新的日志会写入新文件。 +- `log.reservedDiskSize`:日志所在磁盘停止写入日志的阈值(单位为字节),当磁盘剩余空间达到此大小后停止写入日志。 +- `log.keepDays`:日志文件保存的天数,超过此天数的旧日志文件会被删除。 + ## 启动停止 diff --git a/docs/zh/14-reference/05-connector/50-odbc.mdx b/docs/zh/14-reference/05-connector/50-odbc.mdx index ee69cf9364..38dd88b86d 100644 --- a/docs/zh/14-reference/05-connector/50-odbc.mdx +++ b/docs/zh/14-reference/05-connector/50-odbc.mdx @@ -60,6 +60,8 @@ TDengine ODBC 支持两种连接 TDengine 数据库方式:Websocket 连接与 4.6【密码】仅供第5步测试连接使用,选填,数据库用户密码,如果不填,TDengine 默认 taosdata + 4.7【兼容软件】支持对工业软件 KingSCADA、Kepware 等的兼容性适配,通常情况下,选择默认值 General 即可 + 5. 点【测试连接】测试连接情况,如果成功,提示"成功连接到URL" 6. 点【确定】,即可保存配置并退出 @@ -90,12 +92,449 @@ TDengine ODBC 支持两种连接 TDengine 数据库方式:Websocket 连接与 4.6 【密码】仅供第5步测试连接使用,选填,数据库用户密码,如果不填,TDengine 默认 taosdata + 4.7【兼容软件】支持对工业软件 KingSCADA、Kepware 等的兼容性适配,通常情况下,选择默认值 General 即可 + 5. 点【测试连接】测试连接情况,如果成功,提示"连接成功" 6. 点【确定】,即可保存配置并退出 7. 也可以在第2步选择已经配置好的数据源名通过【配置】按钮进入配置页面,修改已有配置 + +## 支持的平台 + +原生连接方式支持的平台和 TDengine Windows X64版 客户端驱动支持的平台一致。 +WebSocket 连接方式除此之外还支持 Windows X64系统上运行的 32 位应用程序上使用。 + + +## 版本历史 + +| taos_odbc版本 | 主要变化 | TDengine 版本 | +| :----------- | :-------------------------------------------------------------------------------------------------- | :---------------- | +| v1.1.0 | 1. 支持视图功能;
2. 支持 VARBINARY/GEOMETRY 数据类型; | 3.3.3.0及更高版本 | +| v1.0.2 | 支持 CP1252 字符编码; | 3.2.3.0及更高版本 | +| v1.0.1 | 1. 支持 DSN 设置 BI 模式,在 BI 模式下 TDengine 数据库不返回系统数据库和超级表子表信息;
2. 重构字符集转换模块,提升读写性能;
3. ODBC 数据源配置对话框中默认修改默认连接方式为“WebSocket”;
4. ODBC 数据源配置对话框增加“测试连接”控件;
5. ODBC 数据源配置支持中文/英文界面; | - | +| v1.0.0.0 | 发布初始版本,支持与Tdengine数据库交互以读写数据,具体请参考“API 参考”一节 | 3.2.2.0及更高版本 | + + +## 数据类型映射 + +下表说明了 ODBC 连接器如何将服务器数据类型映射到默认的 SQL 和 C 数据类型。 + +| TDengine Type | SQL Type | C Type | +|--------------------|-------------------|-------------------| +| TIMESTAMP | SQL_TYPE_TIMESTAMP| SQL_C_TIMESTAMP | +| INT | SQL_INTEGER | SQL_C_SLONG | +| INT UNSIGNED | SQL_INTEGER | SQL_C_ULONG | +| BIGINT | SQL_BIGINT | SQL_C_SBIGINT | +| BIGINT UNSIGNED | SQL_BIGINT | SQL_C_UBIGINT | +| FLOAT | SQL_REAL | SQL_C_FLOAT | +| DOUBLE | SQL_REAL | SQL_C_DOUBLE | +| BINARY | SQL_BINARY | SQL_C_BINARY | +| SMALLINT | SQL_SMALLINT | SQL_C_SSHORT | +| SMALLINT UNSIGNED | SQL_SMALLINT | SQL_C_USHORT | +| TINYINT | SQL_TINYINT | SQL_C_STINYINT | +| TINYINT UNSIGNED | SQL_TINYINT | SQL_C_UTINYINT | +| BOOL | SQL_BIT | SQL_C_BIT | +| NCHAR | SQL_VARCHAR | SQL_C_CHAR | +| JSON | SQL_VARCHAR | SQL_C_CHAR | +| VARCHAR | SQL_VARCHAR | SQL_C_CHAR | +| GEOMETRY | SQL_VARBINARY | SQL_C_BINARY | +| VARBINARY | SQL_VARBINARY | SQL_C_BINARY | + + +## API 参考 + +本节按功能分类汇总了 ODBC API,关于完整的 ODBC API 参考,请访问 http://msdn.microsoft.com/en-us/library/ms714177.aspx 的ODBC程序员参考页面。 + +### 数据源和驱动程序管理 + +- API: ConfigDSN + - **是否支持**: 支持 + - **标准**: ODBC + - **作用**: 配置数据源 + +- API: ConfigDriver + - **是否支持**: 支持 + - **标准**: ODBC + - **作用**: 用于执行与特定驱动程序相关的安装和配置任务 + +- API: ConfigTranslator + - **是否支持**: 支持 + - **标准**: ODBC + - **作用**: 用于解析DSN的配置,在DSN配置和实际数据库驱动程序配置之间进行翻译或转换 + + +### 连接到数据源 + +- API: SQLAllocHandle + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 分配环境、连接、语句或描述符句柄 + +- API: SQLConnect + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 通过数据源名称、用户 ID 和密码连接到特定驱动程序 + +- API: SQLDriverConnect + - **是否支持**: 支持 + - **标准**: ODBC + - **作用**: 通过连接字符串连接到特定驱动程序,支持更多连接信息 + +- API: SQLBrowseConnect + - **是否支持**: 不支持 + - **标准**: ODBC + - **作用**: 用于发现和枚举连接到数据源所需的特性和属性值。每次调用 SQLBrowseConnect 都会返回属性和属性值的连续级别 + +- API: SQLAllocEnv + - **是否支持**: 不支持 + - **标准**: 弃用 + - **作用**: 在 ODBC 3.x 中,ODBC 2.x 函数 SQLAllocEnv 已替换为 SQLAllocHandle + +- API: SQLAllocConnect + - **是否支持**: 不支持 + - **标准**: 弃用 + - **作用**: 在 ODBC 3.x 中,ODBC 2.x 函数 SQLAllocConnect 已替换为 SQLAllocHandle + + +### 获取有关驱动程序和数据源的信息 + +- API: SQLDataSources + - **是否支持**: 不支持 + - **标准**: ISO 92 + - **作用**: 返回可用数据源的列表,由驱动程序管理器处理 + +- API: SQLDrivers + - **是否支持**: 不支持 + - **标准**: ISO 92 + - **作用**: 返回由驱动程序管理器处理的已安装驱动程序及其属性的列表 + +- API: SQLGetInfo + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 返回有关数据库环境的详细信息,如数据库产品名称、驱动程序名、数据库的SQL语法特性、连接能力等等 + +- API: SQLGetFunctions + - **是否支持**: 不支持 + - **标准**: ISO 92 + - **作用**: 用于查询驱动程序支持的函数 + +- API: SQLGetTypeInfo + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 返回有关支持的数据类型的信息 + + +### 设置和检索驱动程序属性 + +- API: SQLSetConnectAttr + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 设置连接属性,当设置SQL_ATTR_AUTOCOMMIT属性时,用于控制自动提交模式 + +- API: SQLGetConnectAttr + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 返回连接属性的值 + +- API: SQLSetConnectOption + - **是否支持**: 不支持 + - **标准**: 弃用 + - **作用**: 在 ODBC 3.x 中,ODBC 2.0 函数 SQLSetConnectOption 已替换为 SQLSetConnectAttr + +- API: SQLGetConnectOption + - **是否支持**: 不支持 + - **标准**: 弃用 + - **作用**: 在 ODBC 3.x 中,ODBC 2.0 函数 SQLSetConnectOption 已替换为 SQLGetConnectAttr + +- API: SQLSetEnvAttr + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 设置控制环境的属性 + +- API: SQLGetEnvAttr + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 返回环境属性的当前设置 + +- API: SQLSetStmtAttr + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 设置与语句相关的属性 + +- API: SQLGetStmtAttr + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 返回语句属性的当前设置 + +- API: SQLSetStmtOption + - **是否支持**: 不支持 + - **标准**: 弃用 + - **作用**: 在 ODBC 3.x 中,ODBC 2.0 函数 SQLSetStmtOption 已替换为 SQLSetStmtAttr + +- API: SQLGetStmtOption + - **是否支持**: 不支持 + - **标准**: 弃用 + - **作用**: 在 ODBC 3.x 中,ODBC 2.0 函数 SQLSetStmtOption 已替换为 SQLGetStmtAttr + + +### 准备SQL请求 + +- API: SQLAllocStmt + - **是否支持**: 不支持 + - **标准**: 弃用 + - **作用**: 在 ODBC 3.x 中,ODBC 2.x 函数 SQLAllocStmt 已替换为 SQLAllocHandle + +- API: SQLPrepare + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 用于预处理SQL语句,这通常是SQLExecute之前的一个步骤 + +- API: SQLBindCol + - **是否支持**: 支持 + - **标准**: ODBC + - **作用**: 用于将结果集中的列绑定到应用程序缓冲区 + +- API: SQLBindParameter + - **是否支持**: 支持 + - **标准**: ODBC + - **作用**: 用于将SQL语句的参数绑定到应用程序缓冲区 + +- API: SQLGetCursorName + - **是否支持**: 不支持 + - **标准**: ISO 92 + - **作用**: 返回与指定语句关联的游标名称 + +- API: SQLSetCursorName + - **是否支持**: 不支持 + - **标准**: ISO 92 + - **作用**: 设置游标名称,允许在查询中使用命名游标 + +- API: SQLSetScrollOptions + - **是否支持**: 不支持 + - **标准**: ODBC + - **作用**: 设置控制光标行为的选项 + + +### 提交请求 + +- API: SQLExecute + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 用于执行之前通过 SQLPrepare 准备好的SQL语句 + +- API: SQLExecDirect + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 用于执行包含SQL语句的字符串 + +- API: SQLNativeSql + - **是否支持**: 不支持 + - **标准**: ODBC + - **作用**: 用于将应用程序提供的SQL语句转换为数据库驱动程序的本机SQL语法 + +- API: SQLDescribeParam + - **是否支持**: 支持 + - **标准**: ODBC + - **作用**: 返回语句中特定参数的描述 + +- API: SQLNumParams + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 用于查询预编译SQL语句中的参数数量 + +- API: SQLParamData + - **是否支持**: 不支持 + - **标准**: ISO 92 + - **作用**: 用于从参数数据流中获取下一个参数值 + +- API: SQLPutData + - **是否支持**: 不支持 + - **标准**: ISO 92 + - **作用**: 当使用流输入方式时,可以用于向输出参数发送数据块 + + +### 检索结果和关于结果的信息 + +- API: SQLRowCount + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 返回受插入或删除请求影响的行数 + +- API: SQLNumResultCols + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 返回结果集中的列数 + +- API: SQLDescribeCol + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 用于描述结果集中列的属性。它提供了关于列的数据类型、列名、列的最大宽度、小数位数和是否可为空等信息 + +- API: SQLColAttribute + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 返回结果集中列的描述符信息,如标题、排序规则等 + +- API: SQLColAttributes + - **是否支持**: 不支持 + - **标准**: 弃用 + - **作用**: 在 ODBC 3.x 中,ODBC 2.0 函数 SQLColAttributes 已替换为 SQLColAttribute + +- API: SQLGetData + - **是否支持**: 支持 + - **标准**: ODBC + - **作用**: 用于从结果集中的当前行获取特定列的数据 + +- API: SQLMoreResults + - **是否支持**: 支持 + - **标准**: ODBC + - **作用**: 多个结果集的 SQL 语句执行后(例如:一个批处理或存储过程),移动到下一个结果集 + +- API: SQLFetch + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 用于从结果集中提取下一行数据,并返回所有绑定列的数据 + +- API: SQLFetchScroll + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 用于从结果集中提取指定的数据行集,并返回所有绑定列的数据 + +- API: SQLExtendedFetch + - **是否支持**: 不支持 + - **标准**: 弃用 + - **作用**: 在 ODBC 3.x 中,SQLExtendedFetch 已替换为 SQLFetchScroll + +- API: SQLSetPos + - **是否支持**: 支持 + - **标准**: ODBC + - **作用**: 设置行集中的游标位置,并允许应用程序更新数据集中的行 + +- API: SQLBulkOperations + - **是否支持**: 不支持 + - **标准**: ODBC + - **作用**: 执行批量插入和批量书签操作,包括更新、删除和按书签提取 + + +### 检索错误或诊断信息 + +- API: SQLError + - **是否支持**: 不支持 + - **标准**: 弃用 + - **作用**: 在 ODBC 3.x 中,ODBC 2.x 函数 SQLError 已替换为 SQLGetDiagRec + +- API: SQLGetDiagField + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 返回附加诊断信息(单条诊断结果) + +- API: SQLGetDiagRec + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 返回附加诊断信息(多条诊断结果) + + +### 获取有关数据源的系统表项的信息 + +- API: SQLColumnPrivileges + - **是否支持**: 不支持 + - **标准**: ODBC + - **作用**: 用于检索指定表中列的权限信息,如哪些用户或角色拥有对特定列的读取、插入、更新或删除权限 + +- API: SQLColumns + - **是否支持**: 支持 + - **标准**: X/Open + - **作用**: 返回指定表中的列名列表 + +- API: SQLForeignKeys + - **是否支持**: 不支持 + - **标准**: ODBC + - **作用**: 检索外键关系的详细信息 + +- API: SQLPrimaryKeys + - **是否支持**: 支持 + - **标准**: ODBC + - **作用**: 返回构成表主键的列名列表 + +- API: SQLSpecialColumns + - **是否支持**: 不支持 + - **标准**: X/Open + - **作用**: 返回数据库中特殊列的信息,如唯一键或索引列 + +- API: SQLStatistics + - **是否支持**: 不支持 + - **标准**: ISO 92 + - **作用**: 返回关于表的统计信息,如行数、列数、平均行宽等 + +- API: SQLTablePrivileges + - **是否支持**: 不支持 + - **标准**: ODBC + - **作用**: 返回用户在特定表上的权限,如SELECT、INSERT、UPDATE等 + +- API: SQLTables + - **是否支持**: 支持 + - **标准**: X/Open + - **作用**: 返回存储在数据源的当前数据库中的表信息 + +- API: SQLProcedures + - **是否支持**: 不支持 + - **标准**: ODBC + - **作用**: 返回数据库中可用的存储过程信息,包括名称和类型 + +- API: SQLProcedureColumns + - **是否支持**: 不支持 + - **标准**: ODBC + - **作用**: 返回存储过程的列信息,包括输入输出参数的详细信息 + + +### 执行事务 + +- API: SQLTransact + - **是否支持**: 不支持 + - **标准**: 弃用 + - **作用**: 在 ODBC 3.x 中,ODBC 2.x 函数 SQLTransact 已替换为 SQLEndTran + +- API: SQLEndTran + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 用于提交或回滚事务,TDengine 不支持事务,因此不支持回滚操作 + + +### 终止连接 + +- API: SQLDisconnect + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 断开数据库连接 + +- API: SQLFreeHandle + - **是否支持**: 支持 + - **标准**: ISO 92 + - **作用**: 释放与特定环境、连接、语句或描述符句柄关联的资源 + +- API: SQLFreeConnect + - **是否支持**: 不支持 + - **标准**: 弃用 + - **作用**: 在 ODBC 3.x 中,ODBC 2.0 函数 SQLFreeConnect 已替换为 SQLFreeHandle + +- API: SQLFreeEnv + - **是否支持**: 不支持 + - **标准**: 弃用 + - **作用**: 在 ODBC 3.x 中,ODBC 2.0 函数 SQLFreeEnv 已替换为 SQLFreeHandle + +- API: SQLFreeStmt + - **是否支持**: 支持 + - **标准**: ODBC + - **作用**: 结束语句处理,丢弃挂起的结果,并且可以选择释放与语句句柄关联的所有资源 + +- API: SQLCloseCursor + - **是否支持**: 支持 + - **标准**: ODBC + - **作用**: 关闭与当前语句句柄关联的游标,并释放游标所使用的所有资源 + + ## 与第三方集成 作为使用 TDengine ODBC driver 的一个示例,你可以使用 Power BI 与 TDengine 分析时序数据。更多细节请参考 [Power BI](../../../third-party/bi/powerbi) diff --git a/docs/zh/14-reference/05-connector/assets/odbc-native-config-zh.webp b/docs/zh/14-reference/05-connector/assets/odbc-native-config-zh.webp index 5589bc6cf7..ee8c48b4ef 100644 Binary files a/docs/zh/14-reference/05-connector/assets/odbc-native-config-zh.webp and b/docs/zh/14-reference/05-connector/assets/odbc-native-config-zh.webp differ diff --git a/docs/zh/14-reference/05-connector/assets/odbc-ws-config-zh.webp b/docs/zh/14-reference/05-connector/assets/odbc-ws-config-zh.webp index 6a9cece9d9..70c0fc8b8d 100644 Binary files a/docs/zh/14-reference/05-connector/assets/odbc-ws-config-zh.webp and b/docs/zh/14-reference/05-connector/assets/odbc-ws-config-zh.webp differ diff --git a/include/common/tmsg.h b/include/common/tmsg.h index b9a2eed7a1..f0c8b78dda 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -1685,6 +1685,7 @@ typedef struct { typedef struct { int32_t openVnodes; + int32_t dropVnodes; int32_t totalVnodes; int32_t masterNum; int64_t numOfSelectReqs; diff --git a/include/libs/executor/storageapi.h b/include/libs/executor/storageapi.h index 7a845e43c3..8e88a1a278 100644 --- a/include/libs/executor/storageapi.h +++ b/include/libs/executor/storageapi.h @@ -382,7 +382,7 @@ typedef struct SStateStore { int32_t (*streamStateCountWinAddIfNotExist)(SStreamState* pState, SSessionKey* pKey, COUNT_TYPE winCount, void** ppVal, int32_t* pVLen, int32_t* pWinCode); - int32_t (*streamStateCountWinAdd)(SStreamState* pState, SSessionKey* pKey, void** pVal, int32_t* pVLen); + int32_t (*streamStateCountWinAdd)(SStreamState* pState, SSessionKey* pKey, COUNT_TYPE winCount, void** pVal, int32_t* pVLen); int32_t (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark, bool igUp, int8_t pkType, int32_t pkLen, SUpdateInfo** ppInfo); diff --git a/include/libs/stream/streamState.h b/include/libs/stream/streamState.h index 46874b7c65..f9469a449d 100644 --- a/include/libs/stream/streamState.h +++ b/include/libs/stream/streamState.h @@ -87,7 +87,7 @@ void streamStateFreeVal(void* val); // count window int32_t streamStateCountWinAddIfNotExist(SStreamState* pState, SSessionKey* pKey, COUNT_TYPE winCount, void** ppVal, int32_t* pVLen, int32_t* pWinCode); -int32_t streamStateCountWinAdd(SStreamState* pState, SSessionKey* pKey, void** pVal, int32_t* pVLen); +int32_t streamStateCountWinAdd(SStreamState* pState, SSessionKey* pKey, COUNT_TYPE winCount, void** pVal, int32_t* pVLen); SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key); SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key); diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index cb10aeb6a0..e6d750468e 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -70,6 +70,8 @@ typedef struct SActiveCheckpointInfo SActiveCheckpointInfo; #define SSTREAM_TASK_NEED_CONVERT_VER 2 #define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3 +extern int32_t streamMetaId; + enum { STREAM_STATUS__NORMAL = 0, STREAM_STATUS__STOP, @@ -135,11 +137,6 @@ enum { STREAM_QUEUE__PROCESSING, }; -enum { - STREAM_META_WILL_STOP = 1, - STREAM_META_OK_TO_STOP = 2, -}; - typedef enum EStreamTaskEvent { TASK_EVENT_INIT = 0x1, TASK_EVENT_INIT_SCANHIST = 0x2, @@ -282,7 +279,6 @@ typedef enum { } EConsenChkptStatus; typedef struct SConsenChkptInfo { -// bool alreadySendChkptId; EConsenChkptStatus status; int64_t statusTs; int32_t consenChkptTransId; diff --git a/include/libs/stream/tstreamFileState.h b/include/libs/stream/tstreamFileState.h index 6f1a1b3b98..a265ae7e60 100644 --- a/include/libs/stream/tstreamFileState.h +++ b/include/libs/stream/tstreamFileState.h @@ -110,7 +110,7 @@ int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, ch // count window int32_t getCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, COUNT_TYPE winCount, void** pVal, int32_t* pVLen, int32_t* pWinCode); -int32_t createCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, void** pVal, int32_t* pVLen); +int32_t createCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, COUNT_TYPE winCount, void** pVal, int32_t* pVLen); // function int32_t getSessionRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen, void** pVal, int32_t* pVLen, diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index 07d56f9b07..50c096258e 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -47,7 +47,7 @@ extern "C" { #define SYNC_HEARTBEAT_SLOW_MS 1500 #define SYNC_HEARTBEAT_REPLY_SLOW_MS 1500 #define SYNC_SNAP_RESEND_MS 1000 * 60 -#define SYNC_SNAP_TIMEOUT_MS 1000 * 300 +#define SYNC_SNAP_TIMEOUT_MS 1000 * 180 #define SYNC_VND_COMMIT_MIN_MS 3000 diff --git a/include/os/osString.h b/include/os/osString.h index 5f211ad2ee..30bfd61b62 100644 --- a/include/os/osString.h +++ b/include/os/osString.h @@ -51,7 +51,12 @@ typedef enum { M2C = 0, C2M } ConvType; #define strtod STR_TO_LD_FUNC_TAOS_FORBID #define strtold STR_TO_D_FUNC_TAOS_FORBID #define strtof STR_TO_F_FUNC_TAOS_FORBID + +#ifdef strndup +#undef strndup +#endif #define strndup STR_TO_F_FUNC_TAOS_FORBID + #endif #define tstrncpy(dst, src, size) \ diff --git a/include/util/tlrucache.h b/include/util/tlrucache.h index e5e59d0876..97c51b5b88 100644 --- a/include/util/tlrucache.h +++ b/include/util/tlrucache.h @@ -25,6 +25,7 @@ extern "C" { typedef struct SLRUCache SLRUCache; typedef void (*_taos_lru_deleter_t)(const void *key, size_t keyLen, void *value, void *ud); +typedef void (*_taos_lru_overwriter_t)(const void *key, size_t keyLen, void *value, void *ud); typedef int (*_taos_lru_functor_t)(const void *key, size_t keyLen, void *value, void *ud); typedef struct LRUHandle LRUHandle; @@ -42,7 +43,8 @@ SLRUCache *taosLRUCacheInit(size_t capacity, int numShardBits, double highPriPoo void taosLRUCacheCleanup(SLRUCache *cache); LRUStatus taosLRUCacheInsert(SLRUCache *cache, const void *key, size_t keyLen, void *value, size_t charge, - _taos_lru_deleter_t deleter, LRUHandle **handle, LRUPriority priority, void *ud); + _taos_lru_deleter_t deleter, _taos_lru_overwriter_t overwriter, LRUHandle **handle, + LRUPriority priority, void *ud); LRUHandle *taosLRUCacheLookup(SLRUCache *cache, const void *key, size_t keyLen); void taosLRUCacheErase(SLRUCache *cache, const void *key, size_t keyLen); diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index ea19125bf5..13447bd5e4 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -616,8 +616,8 @@ function update_TDengine() { [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: taosadapter &${NC}" else - echo -e "${GREEN_DARK}To start service ${NC}: launchctl start com.tdengine.taosd${NC}" - echo -e "${GREEN_DARK}To start Adapter ${NC}: launchctl start com.tdengine.taosadapter${NC}" + echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}" + echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}" fi fi @@ -668,8 +668,8 @@ function install_TDengine() { [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: taosadapter &${NC}" else - echo -e "${GREEN_DARK}To start service ${NC}: launchctl start com.tdengine.taosd${NC}" - echo -e "${GREEN_DARK}To start Adapter ${NC}: launchctl start com.tdengine.taosadapter${NC}" + echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}" + echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}" fi fi diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index d0ea7055de..8ce4685716 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -23,12 +23,12 @@ #include "tglobal.h" #include "tmsgtype.h" -#define RAW_NULL_CHECK(c) \ - do { \ - if (c == NULL) { \ - code = terrno; \ - goto end; \ - } \ +#define RAW_NULL_CHECK(c) \ + do { \ + if (c == NULL) { \ + code = terrno; \ + goto end; \ + } \ } while (0) #define RAW_FALSE_CHECK(c) \ @@ -47,7 +47,7 @@ } \ } while (0) -#define LOG_ID_TAG "connId:0x%" PRIx64 ",qid:0x%" PRIx64 +#define LOG_ID_TAG "connId:0x%" PRIx64 ",QID:0x%" PRIx64 #define LOG_ID_VALUE *(int64_t*)taos, pRequest->requestId #define TMQ_META_VERSION "1.0" @@ -1188,7 +1188,7 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { pCreateReq->ctb.suid = pTableMeta->uid; SArray* pTagVals = NULL; - code = tTagToValArray((STag *)pCreateReq->ctb.pTag, &pTagVals); + code = tTagToValArray((STag*)pCreateReq->ctb.pTag, &pTagVals); if (code != TSDB_CODE_SUCCESS) { taosMemoryFreeClear(pTableMeta); goto end; @@ -1206,18 +1206,19 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { if (strcmp(tag->name, tName) == 0 && tag->type != TSDB_DATA_TYPE_JSON) { STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, i); if (pTagVal) { - if (pTagVal->cid != tag->colId){ + if (pTagVal->cid != tag->colId) { pTagVal->cid = tag->colId; rebuildTag = true; } } else { - uError("create tb invalid data %s, size:%d index:%d cid:%d", pCreateReq->name, (int)taosArrayGetSize(pTagVals), i, tag->colId); + uError("create tb invalid data %s, size:%d index:%d cid:%d", pCreateReq->name, + (int)taosArrayGetSize(pTagVals), i, tag->colId); } } } } taosMemoryFreeClear(pTableMeta); - if (rebuildTag){ + if (rebuildTag) { STag* ppTag = NULL; code = tTagNew(pTagVals, 1, false, &ppTag); taosArrayDestroy(pTagVals); @@ -1815,7 +1816,7 @@ end: static int32_t buildCreateTbMap(STaosxRsp* rsp, SHashObj* pHashObj) { // find schema data info - int32_t code = 0; + int32_t code = 0; SVCreateTbReq pCreateReq = {0}; SDecoder decoderTmp = {0}; @@ -1826,15 +1827,16 @@ static int32_t buildCreateTbMap(STaosxRsp* rsp, SHashObj* pHashObj) { RAW_NULL_CHECK(lenTmp); tDecoderInit(&decoderTmp, *dataTmp, *lenTmp); - RAW_RETURN_CHECK (tDecodeSVCreateTbReq(&decoderTmp, &pCreateReq)); + RAW_RETURN_CHECK(tDecodeSVCreateTbReq(&decoderTmp, &pCreateReq)); if (pCreateReq.type != TSDB_CHILD_TABLE) { code = TSDB_CODE_INVALID_MSG; goto end; } - if (taosHashGet(pHashObj, pCreateReq.name, strlen(pCreateReq.name)) == NULL){ - RAW_RETURN_CHECK(taosHashPut(pHashObj, pCreateReq.name, strlen(pCreateReq.name), &pCreateReq, sizeof(SVCreateTbReq))); - } else{ + if (taosHashGet(pHashObj, pCreateReq.name, strlen(pCreateReq.name)) == NULL) { + RAW_RETURN_CHECK( + taosHashPut(pHashObj, pCreateReq.name, strlen(pCreateReq.name), &pCreateReq, sizeof(SVCreateTbReq))); + } else { tDestroySVCreateTbReq(&pCreateReq, TSDB_MSG_FLG_DECODE); pCreateReq = (SVCreateTbReq){0}; } @@ -1927,7 +1929,7 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) // find schema data info SVCreateTbReq* pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, tbName, strlen(tbName)); - SVgroupInfo vg = {0}; + SVgroupInfo vg = {0}; RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vg)); if (pCreateReqDst) { // change stable name to get meta (void)strcpy(pName.tname, pCreateReqDst->ctb.stbName); @@ -1957,10 +1959,10 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) fields[i].bytes = pSW->pSchema[i].bytes; tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name)); } - void* rawData = getRawDataFromRes(pRetrieve); - char err[ERR_MSG_LEN] = {0}; + void* rawData = getRawDataFromRes(pRetrieve); + char err[ERR_MSG_LEN] = {0}; SVCreateTbReq* pCreateReqTmp = NULL; - if (pCreateReqDst){ + if (pCreateReqDst) { RAW_RETURN_CHECK(cloneSVreateTbReq(pCreateReqDst, &pCreateReqTmp)); } code = rawBlockBindData(pQuery, pTableMeta, rawData, &pCreateReqTmp, fields, pSW->nCols, true, err, ERR_MSG_LEN); diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index d8a66f82bf..7a67522231 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -867,6 +867,7 @@ int32_t blockDataExtractBlock(SSDataBlock* pBlock, int32_t startIndex, int32_t r code = blockDataEnsureCapacity(pDst, rowCount); if (code) { + blockDataDestroy(pDst); return code; } diff --git a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h index 6b01b92445..0e1a4bc98e 100644 --- a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h +++ b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h @@ -77,6 +77,7 @@ typedef struct { typedef struct { int32_t vnodeNum; int32_t opened; + int32_t dropped; int32_t failed; bool updateVnodesList; int32_t threadIndex; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index d081e70ff0..3cf0382eba 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -311,6 +311,8 @@ static void *vmOpenVnodeInThread(void *param) { snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, pCfg->vgId); vnodeDestroy(pCfg->vgId, path, pMgmt->pTfs, 0); pThread->updateVnodesList = true; + pThread->dropped++; + (void)atomic_add_fetch_32(&pMgmt->state.dropVnodes, 1); continue; } @@ -352,8 +354,8 @@ static void *vmOpenVnodeInThread(void *param) { (void)atomic_add_fetch_32(&pMgmt->state.openVnodes, 1); } - dInfo("thread:%d, numOfVnodes:%d, opened:%d failed:%d", pThread->threadIndex, pThread->vnodeNum, pThread->opened, - pThread->failed); + dInfo("thread:%d, numOfVnodes:%d, opened:%d dropped:%d failed:%d", pThread->threadIndex, pThread->vnodeNum, + pThread->opened, pThread->dropped, pThread->failed); return NULL; } @@ -427,7 +429,7 @@ static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) { taosMemoryFree(threads); taosMemoryFree(pCfgs); - if (pMgmt->state.openVnodes != pMgmt->state.totalVnodes) { + if ((pMgmt->state.openVnodes + pMgmt->state.dropVnodes) != pMgmt->state.totalVnodes) { dError("there are total vnodes:%d, opened:%d", pMgmt->state.totalVnodes, pMgmt->state.openVnodes); terrno = TSDB_CODE_VND_INIT_FAILED; return -1; @@ -774,6 +776,7 @@ static int32_t vmStartVnodes(SVnodeMgmt *pMgmt) { } pMgmt->state.openVnodes = 0; + pMgmt->state.dropVnodes = 0; dInfo("restore %d vnodes with %d threads", numOfVnodes, threadNum); for (int32_t t = 0; t < threadNum; ++t) { diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 3ec99f6e44..69d3de25fc 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -165,68 +165,62 @@ void mndCleanupStream(SMnode *pMnode) { } SSdbRow *mndStreamActionDecode(SSdbRaw *pRaw) { - int32_t code = 0; - int32_t lino = 0; - terrno = TSDB_CODE_OUT_OF_MEMORY; - - SSdbRow *pRow = NULL; + int32_t code = 0; + int32_t lino = 0; + SSdbRow * pRow = NULL; SStreamObj *pStream = NULL; - void *buf = NULL; + void * buf = NULL; int8_t sver = 0; + int32_t tlen; + int32_t dataPos = 0; - if (sdbGetRawSoftVer(pRaw, &sver) != 0) { - goto STREAM_DECODE_OVER; - } + code = sdbGetRawSoftVer(pRaw, &sver); + TSDB_CHECK_CODE(code, lino, _over); if (sver < 1 || sver > MND_STREAM_VER_NUMBER) { - terrno = 0; mError("stream read invalid ver, data ver: %d, curr ver: %d", sver, MND_STREAM_VER_NUMBER); - goto STREAM_DECODE_OVER; + goto _over; } pRow = sdbAllocRow(sizeof(SStreamObj)); - if (pRow == NULL) { - goto STREAM_DECODE_OVER; - } + TSDB_CHECK_NULL(pRow, code, lino, _over, terrno); pStream = sdbGetRowObj(pRow); - if (pStream == NULL) { - goto STREAM_DECODE_OVER; - } + TSDB_CHECK_NULL(pStream, code, lino, _over, terrno); - int32_t tlen; - int32_t dataPos = 0; - SDB_GET_INT32(pRaw, dataPos, &tlen, STREAM_DECODE_OVER); + SDB_GET_INT32(pRaw, dataPos, &tlen, _over); buf = taosMemoryMalloc(tlen + 1); - if (buf == NULL) { - goto STREAM_DECODE_OVER; - } + TSDB_CHECK_NULL(buf, code, lino, _over, terrno); - SDB_GET_BINARY(pRaw, dataPos, buf, tlen, STREAM_DECODE_OVER); + SDB_GET_BINARY(pRaw, dataPos, buf, tlen, _over); SDecoder decoder; tDecoderInit(&decoder, buf, tlen + 1); - if (tDecodeSStreamObj(&decoder, pStream, sver) < 0) { - tDecoderClear(&decoder); - goto STREAM_DECODE_OVER; - } + code = tDecodeSStreamObj(&decoder, pStream, sver); tDecoderClear(&decoder); - terrno = TSDB_CODE_SUCCESS; - -STREAM_DECODE_OVER: - taosMemoryFreeClear(buf); - if (terrno != TSDB_CODE_SUCCESS) { - char *p = (pStream == NULL) ? "null" : pStream->name; - mError("stream:%s, failed to decode from raw:%p since %s", p, pRaw, tstrerror(terrno)); - taosMemoryFreeClear(pRow); - return NULL; + if (code < 0) { + tFreeStreamObj(pStream); } - mTrace("stream:%s, decode from raw:%p, row:%p, checkpoint:%" PRId64, pStream->name, pRaw, pStream, - pStream->checkpointId); - return pRow; +_over: + taosMemoryFreeClear(buf); + + if (code != TSDB_CODE_SUCCESS) { + char *p = (pStream == NULL) ? "null" : pStream->name; + mError("stream:%s, failed to decode from raw:%p since %s at:%d", p, pRaw, tstrerror(code), lino); + taosMemoryFreeClear(pRow); + + terrno = code; + return NULL; + } else { + mTrace("stream:%s, decode from raw:%p, row:%p, checkpoint:%" PRId64, pStream->name, pRaw, pStream, + pStream->checkpointId); + + terrno = 0; + return pRow; + } } static int32_t mndStreamActionInsert(SSdb *pSdb, SStreamObj *pStream) { diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 6f7b24eab2..40bb99d6b5 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -1637,6 +1637,7 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans, bool pTrans->code = code; bool continueExec = true; if (code != 0 && code != TSDB_CODE_MND_TRANS_CTX_SWITCH) { + taosMsleep(100); continueExec = true; } else { continueExec = false; diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c index 36068d1447..93347c810f 100644 --- a/source/dnode/vnode/src/meta/metaCache.c +++ b/source/dnode/vnode/src/meta/metaCache.c @@ -646,7 +646,7 @@ int32_t metaUidFilterCachePut(void* pVnode, uint64_t suid, const void* pKey, int } // add to cache. - (void)taosLRUCacheInsert(pCache, key, TAG_FILTER_RES_KEY_LEN, pPayload, payloadLen, freeUidCachePayload, NULL, + (void)taosLRUCacheInsert(pCache, key, TAG_FILTER_RES_KEY_LEN, pPayload, payloadLen, freeUidCachePayload, NULL, NULL, TAOS_LRU_PRIORITY_LOW, NULL); _end: (void)taosThreadMutexUnlock(pLock); @@ -804,7 +804,7 @@ int32_t metaPutTbGroupToCache(void* pVnode, uint64_t suid, const void* pKey, int } // add to cache. - (void)taosLRUCacheInsert(pCache, key, TAG_FILTER_RES_KEY_LEN, pPayload, payloadLen, freeTbGroupCachePayload, NULL, + (void)taosLRUCacheInsert(pCache, key, TAG_FILTER_RES_KEY_LEN, pPayload, payloadLen, freeTbGroupCachePayload, NULL, NULL, TAOS_LRU_PRIORITY_LOW, NULL); _end: (void)taosThreadMutexUnlock(pLock); diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 14207e7fb3..484c5c0a16 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -1523,6 +1523,7 @@ int32_t metaGetTableTags(void *pVnode, uint64_t suid, SArray *pUidTagInfo) { } memcpy(info.pTagVal, pCur->pVal, pCur->vLen); if (taosArrayPush(pUidTagInfo, &info) == NULL) { + taosMemoryFreeClear(info.pTagVal); metaCloseCtbCursor(pCur); taosHashCleanup(pSepecifiedUidMap); return terrno; diff --git a/source/dnode/vnode/src/tq/tqStreamTask.c b/source/dnode/vnode/src/tq/tqStreamTask.c index b0bf89029e..3ec269ec22 100644 --- a/source/dnode/vnode/src/tq/tqStreamTask.c +++ b/source/dnode/vnode/src/tq/tqStreamTask.c @@ -16,8 +16,13 @@ #include "tq.h" #include "vnd.h" -#define MAX_REPEAT_SCAN_THRESHOLD 3 -#define SCAN_WAL_IDLE_DURATION 100 +#define MAX_REPEAT_SCAN_THRESHOLD 3 +#define SCAN_WAL_IDLE_DURATION 100 + +typedef struct SBuildScanWalMsgParam { + int64_t metaId; + int32_t numOfTasks; +} SBuildScanWalMsgParam; static int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle); static int32_t setWalReaderStartOffset(SStreamTask* pTask, int32_t vgId); @@ -31,13 +36,12 @@ int32_t tqScanWal(STQ* pTq) { SStreamMeta* pMeta = pTq->pStreamMeta; int32_t vgId = pMeta->vgId; int64_t st = taosGetTimestampMs(); + int32_t numOfTasks = 0; + bool shouldIdle = true; tqDebug("vgId:%d continue to check if data in wal are available, scanCounter:%d", vgId, pMeta->scanInfo.scanCounter); // check all tasks - int32_t numOfTasks = 0; - bool shouldIdle = true; - int32_t code = doScanWalForAllTasks(pMeta, &shouldIdle); if (code) { tqError("vgId:%d failed to start all tasks, try next time, code:%s", vgId, tstrerror(code)); @@ -68,54 +72,61 @@ int32_t tqScanWal(STQ* pTq) { return code; } -typedef struct SBuildScanWalMsgParam { - STQ* pTq; - int32_t numOfTasks; -} SBuildScanWalMsgParam; - static void doStartScanWal(void* param, void* tmrId) { + int32_t vgId = 0; + STQ* pTq = NULL; + int32_t code = 0; + SBuildScanWalMsgParam* pParam = (SBuildScanWalMsgParam*)param; - STQ* pTq = pParam->pTq; - int32_t vgId = pTq->pStreamMeta->vgId; + SStreamMeta* pMeta = taosAcquireRef(streamMetaId, pParam->metaId); + if (pMeta == NULL) { + tqError("metaRid:%" PRId64 " not valid now, stream meta has been freed", pParam->metaId); + taosMemoryFree(pParam); + return; + } + + vgId = pMeta->vgId; + pTq = pMeta->ahandle; + tqDebug("vgId:%d create msg to start wal scan, numOfTasks:%d, vnd restored:%d", vgId, pParam->numOfTasks, pTq->pVnode->restored); - int32_t code = streamTaskSchedTask(&pTq->pVnode->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA); - taosMemoryFree(pParam); - + code = streamTaskSchedTask(&pTq->pVnode->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA); if (code) { tqError("vgId:%d failed sched task to scan wal, code:%s", vgId, tstrerror(code)); } + + code = taosReleaseRef(streamMetaId, pParam->metaId); + if (code) { + tqError("vgId:%d failed to release ref for streamMeta, rid:%" PRId64 " code:%s", vgId, pParam->metaId, + tstrerror(code)); + } + + taosMemoryFree(pParam); } int32_t tqScanWalInFuture(STQ* pTq, int32_t numOfTasks, int32_t idleDuration) { - SStreamMeta* pMeta = pTq->pStreamMeta; - int32_t code = 0; - int32_t vgId = TD_VID(pTq->pVnode); + SStreamMeta* pMeta = pTq->pStreamMeta; + int32_t code = 0; + int32_t vgId = TD_VID(pTq->pVnode); + tmr_h pTimer = NULL; + SBuildScanWalMsgParam* pParam = NULL; - SBuildScanWalMsgParam* pParam = taosMemoryMalloc(sizeof(SBuildScanWalMsgParam)); + pParam = taosMemoryMalloc(sizeof(SBuildScanWalMsgParam)); if (pParam == NULL) { return terrno; } - pParam->pTq = pTq; + pParam->metaId = pMeta->rid; pParam->numOfTasks = numOfTasks; - tmr_h pTimer = NULL; code = streamTimerGetInstance(&pTimer); if (code) { tqError("vgId:%d failed to get tmr ctrl during sched scan wal", vgId); - return code; - } - - if (pMeta->scanInfo.scanTimer == NULL) { - pMeta->scanInfo.scanTimer = taosTmrStart(doStartScanWal, idleDuration, pParam, pTimer); + taosMemoryFree(pParam); } else { - bool ret = taosTmrReset(doStartScanWal, idleDuration, pParam, pTimer, &pMeta->scanInfo.scanTimer); - if (!ret) { -// tqError("vgId:%d failed to start scan wal in:%dms", vgId, idleDuration); - } + streamTmrStart(doStartScanWal, idleDuration, pParam, pTimer, &pMeta->scanInfo.scanTimer, vgId, "scan-wal-fut"); } return code; @@ -124,8 +135,8 @@ int32_t tqScanWalInFuture(STQ* pTq, int32_t numOfTasks, int32_t idleDuration) { int32_t tqScanWalAsync(STQ* pTq, bool ckPause) { int32_t vgId = TD_VID(pTq->pVnode); SStreamMeta* pMeta = pTq->pStreamMeta; - - bool alreadyRestored = pTq->pVnode->restored; + bool alreadyRestored = pTq->pVnode->restored; + int32_t numOfTasks = 0; // do not launch the stream tasks, if it is a follower or not restored vnode. if (!(vnodeIsRoleLeader(pTq->pVnode) && alreadyRestored)) { @@ -134,7 +145,7 @@ int32_t tqScanWalAsync(STQ* pTq, bool ckPause) { streamMetaWLock(pMeta); - int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); + numOfTasks = taosArrayGetSize(pMeta->pTaskList); if (numOfTasks == 0) { tqDebug("vgId:%d no stream tasks existed to run", vgId); streamMetaWUnLock(pMeta); @@ -378,13 +389,13 @@ int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta, bool* pScanIdle) { numOfTasks = taosArrayGetSize(pTaskList); for (int32_t i = 0; i < numOfTasks; ++i) { - STaskId* pTaskId = taosArrayGet(pTaskList, i); + STaskId* pTaskId = taosArrayGet(pTaskList, i); if (pTaskId == NULL) { continue; } SStreamTask* pTask = NULL; - int32_t code = streamMetaAcquireTask(pStreamMeta, pTaskId->streamId, pTaskId->taskId, &pTask); + int32_t code = streamMetaAcquireTask(pStreamMeta, pTaskId->streamId, pTaskId->taskId, &pTask); if (pTask == NULL || code != 0) { continue; } diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index 30be253b65..6b7e857120 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -84,7 +84,6 @@ int32_t tqExpandStreamTask(SStreamTask* pTask) { code = qSetTaskId(pTask->exec.pExecutor, pTask->id.taskId, pTask->id.streamId); if (code) { - return code; } } @@ -363,7 +362,7 @@ int32_t tqStreamTaskProcessDispatchReq(SStreamMeta* pMeta, SRpcMsg* pMsg) { } pRspHead->vgId = htonl(req.upstreamNodeId); - if(pRspHead->vgId == 0) { + if (pRspHead->vgId == 0) { tqError("vgId:%d invalid dispatch msg from upstream to task:0x%x", pMeta->vgId, req.taskId); return TSDB_CODE_INVALID_MSG; } @@ -460,7 +459,7 @@ int32_t tqStreamTaskProcessRetrieveReq(SStreamMeta* pMeta, SRpcMsg* pMsg) { if (code != TSDB_CODE_SUCCESS) { // return error not send rsp manually tqError("s-task:0x%x vgId:%d failed to process retrieve request from 0x%x, code:%s", req.dstTaskId, req.dstNodeId, req.srcTaskId, tstrerror(code)); - } else { // send rsp manually only on success. + } else { // send rsp manually only on success. SRpcMsg rsp = {.info = pMsg->info, .code = 0}; streamTaskSendRetrieveRsp(&req, &rsp); } @@ -515,7 +514,7 @@ int32_t tqStreamTaskProcessCheckRsp(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLe } tDecoderClear(&decoder); - tqDebug("tq task:0x%x (vgId:%d) recv check rsp(qid:0x%" PRIx64 ") from 0x%x (vgId:%d) status %d", rsp.upstreamTaskId, + tqDebug("tq task:0x%x (vgId:%d) recv check rsp(QID:0x%" PRIx64 ") from 0x%x (vgId:%d) status %d", rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.status); if (!isLeader) { @@ -1272,7 +1271,7 @@ int32_t tqStreamTaskProcessConsenChkptIdReq(SStreamMeta* pMeta, SRpcMsg* pMsg) { streamMutexLock(&pTask->lock); if (pTask->chkInfo.checkpointId < req.checkpointId) { - tqFatal("s-task:%s vgId:%d invalid consensus-checkpointId:%" PRId64 ", greater than existed checkpointId:%"PRId64, + tqFatal("s-task:%s vgId:%d invalid consensus-checkpointId:%" PRId64 ", greater than existed checkpointId:%" PRId64, pTask->id.idStr, vgId, req.checkpointId, pTask->chkInfo.checkpointId); streamMutexUnlock(&pTask->lock); diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index e3382cde32..85f74b1672 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -22,6 +22,12 @@ #define ROCKS_BATCH_SIZE (4096) +void tsdbLRUCacheRelease(SLRUCache *cache, LRUHandle *handle, bool eraseIfLastRef) { + if (!taosLRUCacheRelease(cache, handle, eraseIfLastRef)) { + tsdbTrace(" release lru cache failed"); + } +} + static int32_t tsdbOpenBCache(STsdb *pTsdb) { int32_t code = 0, lino = 0; int32_t szPage = pTsdb->pVnode->config.tsdbPageSize; @@ -579,7 +585,7 @@ static void tsdbCacheDeleter(const void *key, size_t klen, void *value, void *ud if (pLastCol->dirty) { if (tsdbCacheFlushDirty(key, klen, pLastCol, ud) != 0) { STsdb *pTsdb = (STsdb *)ud; - tsdbError("tsdb/cache: vgId:%d, flush cache %s failed at line %d.", TD_VID(pTsdb->pVnode), __func__, __LINE__); + tsdbTrace("tsdb/cache: vgId:%d, flush cache %s failed at line %d.", TD_VID(pTsdb->pVnode), __func__, __LINE__); } } @@ -597,6 +603,13 @@ static void tsdbCacheDeleter(const void *key, size_t klen, void *value, void *ud taosMemoryFree(value); } +static void tsdbCacheOverWriter(const void *key, size_t klen, void *value, void *ud) { + SLastCol *pLastCol = (SLastCol *)value; + pLastCol->dirty = 0; +} + +static int32_t tsdbCachePutToLRU(STsdb *pTsdb, SLastKey *pLastKey, SLastCol *pLastCol, int8_t dirty); + static int32_t tsdbCacheNewTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, int8_t col_type, int8_t lflag) { int32_t code = 0, lino = 0; @@ -606,27 +619,10 @@ static int32_t tsdbCacheNewTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, i SLastCol emptyCol = { .rowKey = emptyRowKey, .colVal = COL_VAL_NONE(cid, col_type), .dirty = 1, .cacheStatus = TSDB_LAST_CACHE_VALID}; - SLastCol *pLastCol = taosMemoryCalloc(1, sizeof(SLastCol)); - if (!pLastCol) { - return terrno; - } - - size_t charge = 0; - *pLastCol = emptyCol; - TAOS_CHECK_EXIT(tsdbCacheReallocSLastCol(pLastCol, &charge)); - SLastKey *pLastKey = &(SLastKey){.lflag = lflag, .uid = uid, .cid = cid}; - LRUStatus status = taosLRUCacheInsert(pCache, pLastKey, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter, NULL, - TAOS_LRU_PRIORITY_LOW, pTsdb); - if (status != TAOS_LRU_STATUS_OK) { - tsdbError("vgId:%d, %s failed at line %d status %d.", TD_VID(pTsdb->pVnode), __func__, __LINE__, status); - code = TSDB_CODE_FAILED; - pLastCol = NULL; - } - -_exit: - if (TSDB_CODE_SUCCESS != code) { - taosMemoryFree(pLastCol); + code = tsdbCachePutToLRU(pTsdb, pLastKey, &emptyCol, 1); + if (code) { + tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); } TAOS_RETURN(code); @@ -723,9 +719,13 @@ static int32_t tsdbCacheDropTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, { SLastCol *pLastCol = NULL; code = tsdbCacheDeserialize(values_list[0], values_list_sizes[0], &pLastCol); - if (code != TSDB_CODE_SUCCESS) { - tsdbWarn("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); + if (code == TSDB_CODE_INVALID_PARA) { + tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + } else if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; } if (NULL != pLastCol) { rocksdb_writebatch_delete(wb, keys_list[0], klen); @@ -734,9 +734,13 @@ static int32_t tsdbCacheDropTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, pLastCol = NULL; code = tsdbCacheDeserialize(values_list[1], values_list_sizes[1], &pLastCol); - if (code != TSDB_CODE_SUCCESS) { - tsdbWarn("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); + if (code == TSDB_CODE_INVALID_PARA) { + tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + } else if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; } if (NULL != pLastCol) { rocksdb_writebatch_delete(wb, keys_list[1], klen); @@ -749,9 +753,7 @@ static int32_t tsdbCacheDropTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, for (int i = 0; i < 2; i++) { LRUHandle *h = taosLRUCacheLookup(pTsdb->lruCache, keys_list[i], klen); if (h) { - if (taosLRUCacheRelease(pTsdb->lruCache, h, true)) { - tsdbInfo("vgId:%d, %s release lru cache failed at line %d.", TD_VID(pTsdb->pVnode), __func__, __LINE__); - } + tsdbLRUCacheRelease(pTsdb->lruCache, h, true); taosLRUCacheErase(pTsdb->lruCache, keys_list[i], klen); } } @@ -780,17 +782,13 @@ int32_t tsdbCacheNewTable(STsdb *pTsdb, tb_uid_t uid, tb_uid_t suid, SSchemaWrap code = tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, LFLAG_LAST_ROW); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - TAOS_RETURN(code); } code = tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, LFLAG_LAST); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - TAOS_RETURN(code); } } } else { @@ -808,17 +806,13 @@ int32_t tsdbCacheNewTable(STsdb *pTsdb, tb_uid_t uid, tb_uid_t suid, SSchemaWrap code = tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, LFLAG_LAST_ROW); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - TAOS_RETURN(code); } code = tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, LFLAG_LAST); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - TAOS_RETURN(code); } } @@ -837,10 +831,8 @@ int32_t tsdbCacheDropTable(STsdb *pTsdb, tb_uid_t uid, tb_uid_t suid, SSchemaWra code = tsdbCacheCommitNoLock(pTsdb); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s commit with no lock failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s commit with no lock failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - TAOS_RETURN(code); } if (pSchemaRow != NULL) { @@ -855,10 +847,8 @@ int32_t tsdbCacheDropTable(STsdb *pTsdb, tb_uid_t uid, tb_uid_t suid, SSchemaWra code = tsdbCacheDropTableColumn(pTsdb, uid, cid, hasPrimayKey); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s drop table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s drop table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - TAOS_RETURN(code); } } } else { @@ -881,10 +871,8 @@ int32_t tsdbCacheDropTable(STsdb *pTsdb, tb_uid_t uid, tb_uid_t suid, SSchemaWra code = tsdbCacheDropTableColumn(pTsdb, uid, cid, hasPrimayKey); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s drop table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s drop table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - TAOS_RETURN(code); } } @@ -905,10 +893,8 @@ int32_t tsdbCacheDropSubTables(STsdb *pTsdb, SArray *uids, tb_uid_t suid) { code = tsdbCacheCommitNoLock(pTsdb); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s commit with no lock failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s commit with no lock failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - TAOS_RETURN(code); } STSchema *pTSchema = NULL; @@ -934,11 +920,8 @@ int32_t tsdbCacheDropSubTables(STsdb *pTsdb, SArray *uids, tb_uid_t suid) { code = tsdbCacheDropTableColumn(pTsdb, uid, cid, hasPrimayKey); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s drop table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s drop table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - taosMemoryFree(pTSchema); - TAOS_RETURN(code); } } } @@ -959,17 +942,13 @@ int32_t tsdbCacheNewNTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, int8_t code = tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 0); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - TAOS_RETURN(code); } code = tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 1); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - TAOS_RETURN(code); } // rocksMayWrite(pTsdb, true, false, false); (void)taosThreadMutexUnlock(&pTsdb->lruMutex); @@ -984,18 +963,14 @@ int32_t tsdbCacheDropNTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, bool h code = tsdbCacheCommitNoLock(pTsdb); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s commit with no lock failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s commit with no lock failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - TAOS_RETURN(code); } code = tsdbCacheDropTableColumn(pTsdb, uid, cid, hasPrimayKey); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s drop table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s drop table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - TAOS_RETURN(code); } rocksMayWrite(pTsdb, false); @@ -1015,17 +990,13 @@ int32_t tsdbCacheNewSTableColumn(STsdb *pTsdb, SArray *uids, int16_t cid, int8_t code = tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 0); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - TAOS_RETURN(code); } code = tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 1); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s new table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - TAOS_RETURN(code); } } @@ -1041,10 +1012,8 @@ int32_t tsdbCacheDropSTableColumn(STsdb *pTsdb, SArray *uids, int16_t cid, bool code = tsdbCacheCommitNoLock(pTsdb); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s commit with no lock failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s commit with no lock failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - TAOS_RETURN(code); } for (int i = 0; i < TARRAY_SIZE(uids); ++i) { @@ -1052,10 +1021,8 @@ int32_t tsdbCacheDropSTableColumn(STsdb *pTsdb, SArray *uids, int16_t cid, bool code = tsdbCacheDropTableColumn(pTsdb, uid, cid, hasPrimayKey); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s drop table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s drop table column failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - TAOS_RETURN(code); } } @@ -1071,40 +1038,6 @@ typedef struct { SLastKey key; } SIdxKey; -static int32_t tsdbCacheUpdateValue(SValue *pOld, SValue *pNew) { - uint8_t *pFree = NULL; - int nData = 0; - - if (IS_VAR_DATA_TYPE(pOld->type)) { - pFree = pOld->pData; - nData = pOld->nData; - } - - *pOld = *pNew; - if (IS_VAR_DATA_TYPE(pNew->type)) { - if (nData < pNew->nData) { - pOld->pData = taosMemoryCalloc(1, pNew->nData); - if (!pOld->pData) { - return terrno; - } - } else { - pOld->pData = pFree; - pFree = NULL; - } - - if (pNew->nData) { - memcpy(pOld->pData, pNew->pData, pNew->nData); - } else { - pFree = pOld->pData; - pOld->pData = NULL; - } - } - - taosMemoryFreeClear(pFree); - - TAOS_RETURN(TSDB_CODE_SUCCESS); -} - static void tsdbCacheUpdateLastColToNone(SLastCol *pLastCol, ELastCacheStatus cacheStatus) { // update rowkey pLastCol->rowKey.ts = TSKEY_MIN; @@ -1128,11 +1061,7 @@ static void tsdbCacheUpdateLastColToNone(SLastCol *pLastCol, ELastCacheStatus ca } pLastCol->colVal = COL_VAL_NONE(pLastCol->colVal.cid, pLastCol->colVal.value.type); - - if (!pLastCol->dirty) { - pLastCol->dirty = 1; - } - + pLastCol->dirty = 1; pLastCol->cacheStatus = cacheStatus; } @@ -1155,7 +1084,7 @@ static int32_t tsdbCachePutToRocksdb(STsdb *pTsdb, SLastKey *pLastKey, SLastCol TAOS_RETURN(code); } -static int32_t tsdbCachePutToLRU(STsdb *pTsdb, SLastKey *pLastKey, SLastCol *pLastCol) { +static int32_t tsdbCachePutToLRU(STsdb *pTsdb, SLastKey *pLastKey, SLastCol *pLastCol, int8_t dirty) { int32_t code = 0, lino = 0; SLastCol *pLRULastCol = taosMemoryCalloc(1, sizeof(SLastCol)); @@ -1165,11 +1094,11 @@ static int32_t tsdbCachePutToLRU(STsdb *pTsdb, SLastKey *pLastKey, SLastCol *pLa size_t charge = 0; *pLRULastCol = *pLastCol; - pLRULastCol->dirty = 1; + pLRULastCol->dirty = dirty; TAOS_CHECK_EXIT(tsdbCacheReallocSLastCol(pLRULastCol, &charge)); LRUStatus status = taosLRUCacheInsert(pTsdb->lruCache, pLastKey, ROCKS_KEY_LEN, pLRULastCol, charge, tsdbCacheDeleter, - NULL, TAOS_LRU_PRIORITY_LOW, pTsdb); + tsdbCacheOverWriter, NULL, TAOS_LRU_PRIORITY_LOW, pTsdb); if (TAOS_LRU_STATUS_OK != status && TAOS_LRU_STATUS_OK_OVERWRITTEN != status) { tsdbError("vgId:%d, %s failed at line %d status %d.", TD_VID(pTsdb->pVnode), __func__, __LINE__, status); code = TSDB_CODE_FAILED; @@ -1216,14 +1145,13 @@ static int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SArray if (pLastCol->cacheStatus != TSDB_LAST_CACHE_NO_CACHE) { int32_t cmp_res = tRowKeyCompare(&pLastCol->rowKey, pRowKey); if (cmp_res < 0 || (cmp_res == 0 && !COL_VAL_IS_NONE(pColVal))) { - SLastCol newLastCol = {.rowKey = *pRowKey, .colVal = *pColVal, .cacheStatus = TSDB_LAST_CACHE_VALID}; - code = tsdbCachePutToLRU(pTsdb, key, &newLastCol); + SLastCol newLastCol = { + .rowKey = *pRowKey, .colVal = *pColVal, .dirty = 1, .cacheStatus = TSDB_LAST_CACHE_VALID}; + code = tsdbCachePutToLRU(pTsdb, key, &newLastCol, 1); } } - if (!taosLRUCacheRelease(pCache, h, false)) { - tsdbInfo("vgId:%d, %s release lru cache failed at line %d", TD_VID(pTsdb->pVnode), __func__, __LINE__); - } + tsdbLRUCacheRelease(pCache, h, false); TAOS_CHECK_EXIT(code); } else { if (!remainCols) { @@ -1284,9 +1212,13 @@ static int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SArray SLastCol *pLastCol = NULL; code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); - if (code != TSDB_CODE_SUCCESS) { - tsdbWarn("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); + if (code == TSDB_CODE_INVALID_PARA) { + tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + } else if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; } /* if (code) { @@ -1296,7 +1228,7 @@ static int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SArray SLastCol *pToFree = pLastCol; if (pLastCol && pLastCol->cacheStatus == TSDB_LAST_CACHE_NO_CACHE) { - if ((code = tsdbCachePutToLRU(pTsdb, &idxKey->key, pLastCol)) != TSDB_CODE_SUCCESS) { + if ((code = tsdbCachePutToLRU(pTsdb, &idxKey->key, pLastCol, 0)) != TSDB_CODE_SUCCESS) { tsdbError("tsdb/cache: vgId:%d, put lru failed at line %d since %s.", TD_VID(pTsdb->pVnode), lino, tstrerror(code)); taosMemoryFreeClear(pToFree); @@ -1319,14 +1251,14 @@ static int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SArray } if (NULL == pLastCol || cmp_res < 0 || (cmp_res == 0 && !COL_VAL_IS_NONE(pColVal))) { - SLastCol lastColTmp = {.rowKey = *pRowKey, .colVal = *pColVal, .cacheStatus = TSDB_LAST_CACHE_VALID}; + SLastCol lastColTmp = {.rowKey = *pRowKey, .colVal = *pColVal, .dirty = 0, .cacheStatus = TSDB_LAST_CACHE_VALID}; if ((code = tsdbCachePutToRocksdb(pTsdb, &idxKey->key, &lastColTmp)) != TSDB_CODE_SUCCESS) { tsdbError("tsdb/cache: vgId:%d, put rocks failed at line %d since %s.", TD_VID(pTsdb->pVnode), lino, tstrerror(code)); taosMemoryFreeClear(pToFree); break; } - if ((code = tsdbCachePutToLRU(pTsdb, &idxKey->key, &lastColTmp)) != TSDB_CODE_SUCCESS) { + if ((code = tsdbCachePutToLRU(pTsdb, &idxKey->key, &lastColTmp, 0)) != TSDB_CODE_SUCCESS) { tsdbError("tsdb/cache: vgId:%d, put lru failed at line %d since %s.", TD_VID(pTsdb->pVnode), lino, tstrerror(code)); taosMemoryFreeClear(pToFree); @@ -1438,9 +1370,8 @@ int32_t tsdbCacheRowFormatUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, int6 } code = tSimpleHashIterateRemove(iColHash, &iCol, sizeof(iCol), &pIte, &iter); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s tSimpleHashIterateRemove failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, + tsdbTrace("vgId:%d, %s tSimpleHashIterateRemove failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - TAOS_CHECK_GOTO(code, &lino, _exit); } } } @@ -1449,9 +1380,8 @@ int32_t tsdbCacheRowFormatUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, int6 // 3. do update code = tsdbCacheUpdate(pTsdb, suid, uid, ctxArray); if (code < TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s tsdbCacheUpdate failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s tsdbCacheUpdate failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - TAOS_CHECK_GOTO(code, &lino, _exit); } _exit: @@ -1538,9 +1468,8 @@ int32_t tsdbCacheColFormatUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SBlo // 3. do update code = tsdbCacheUpdate(pTsdb, suid, uid, ctxArray); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s tsdbCacheUpdate failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s tsdbCacheUpdate failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - TAOS_CHECK_GOTO(code, &lino, _exit); } _exit: @@ -1681,30 +1610,14 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr continue; } - SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol)); - if (!pTmpLastCol) { - TAOS_CHECK_EXIT(terrno); - } - - size_t charge = 0; - *pTmpLastCol = *pLastCol; - pLastCol = pTmpLastCol; - code = tsdbCacheReallocSLastCol(pLastCol, &charge); - if (TSDB_CODE_SUCCESS != code) { - taosMemoryFree(pLastCol); + // store result back to rocks cache + code = tsdbCachePutToRocksdb(pTsdb, &idxKey->key, pLastCol); + if (code) { + tsdbError("vgId:%d, %s failed at line %d since %s.", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); TAOS_CHECK_EXIT(code); } - LRUStatus status = taosLRUCacheInsert(pCache, &idxKey->key, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter, NULL, - TAOS_LRU_PRIORITY_LOW, pTsdb); - if (TAOS_LRU_STATUS_OK != status && TAOS_LRU_STATUS_OK_OVERWRITTEN != status) { - tsdbError("vgId:%d, %s failed at line %d status %d.", TD_VID(pTsdb->pVnode), __func__, __LINE__, status); - pLastCol = NULL; - TAOS_CHECK_EXIT(TSDB_CODE_FAILED); - } - - // store result back to rocks cache - code = tsdbCachePutToRocksdb(pTsdb, &idxKey->key, pLastCol); + code = tsdbCachePutToLRU(pTsdb, &idxKey->key, pLastCol, 0); if (code) { tsdbError("vgId:%d, %s failed at line %d since %s.", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); TAOS_CHECK_EXIT(code); @@ -1772,25 +1685,21 @@ static int32_t tsdbCacheLoadFromRocks(STsdb *pTsdb, tb_uid_t uid, SArray *pLastA } code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); - if (code != TSDB_CODE_SUCCESS) { - tsdbWarn("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); + if (code == TSDB_CODE_INVALID_PARA) { + tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + } else if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; } SLastCol *pToFree = pLastCol; SIdxKey *idxKey = &((SIdxKey *)TARRAY_DATA(remainCols))[j]; if (pLastCol && pLastCol->cacheStatus != TSDB_LAST_CACHE_NO_CACHE) { - SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol)); - if (!pTmpLastCol) { - taosMemoryFreeClear(pToFree); - TAOS_CHECK_EXIT(terrno); - } - - size_t charge = 0; - *pTmpLastCol = *pLastCol; - pLastCol = pTmpLastCol; - code = tsdbCacheReallocSLastCol(pLastCol, &charge); - if (TSDB_CODE_SUCCESS != code) { - taosMemoryFreeClear(pLastCol); + code = tsdbCachePutToLRU(pTsdb, &idxKey->key, pLastCol, 0); + if (code) { + tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); taosMemoryFreeClear(pToFree); TAOS_CHECK_EXIT(code); } @@ -1798,20 +1707,10 @@ static int32_t tsdbCacheLoadFromRocks(STsdb *pTsdb, tb_uid_t uid, SArray *pLastA SLastCol lastCol = *pLastCol; code = tsdbCacheReallocSLastCol(&lastCol, NULL); if (TSDB_CODE_SUCCESS != code) { - tsdbCacheFreeSLastColItem(pLastCol); - taosMemoryFreeClear(pLastCol); taosMemoryFreeClear(pToFree); TAOS_CHECK_EXIT(code); } - LRUStatus status = taosLRUCacheInsert(pCache, &idxKey->key, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter, - NULL, TAOS_LRU_PRIORITY_LOW, pTsdb); - if (TAOS_LRU_STATUS_OK != status && TAOS_LRU_STATUS_OK_OVERWRITTEN != status) { - tsdbError("vgId:%d, %s failed at line %d status %d.", TD_VID(pTsdb->pVnode), __func__, __LINE__, status); - taosMemoryFreeClear(pToFree); - TAOS_CHECK_EXIT(TSDB_CODE_FAILED); - } - taosArraySet(pLastArray, idxKey->idx, &lastCol); taosArrayRemove(remainCols, j); taosArrayRemove(ignoreFromRocks, j); @@ -1908,11 +1807,7 @@ int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCache } if (h) { - code = taosLRUCacheRelease(pCache, h, false); - if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s release lru cache failed at line %d.", TD_VID(pTsdb->pVnode), __func__, __LINE__); - goto _exit; - } + tsdbLRUCacheRelease(pCache, h, false); } } @@ -1939,13 +1834,8 @@ int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCache // no cache or cache is invalid ++i; } - if (h) { - code = taosLRUCacheRelease(pCache, h, false); - if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s release lru cache failed at line %d.", TD_VID(pTsdb->pVnode), __func__, __LINE__); - goto _exit; - } + tsdbLRUCacheRelease(pCache, h, false); } } @@ -1981,10 +1871,8 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE code = tsdbCacheCommit(pTsdb); if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s commit failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tsdbTrace("vgId:%d, %s commit failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, tstrerror(code)); - (void)taosThreadMutexUnlock(&pTsdb->lruMutex); - TAOS_RETURN(code); } (void)taosThreadMutexLock(&pTsdb->lruMutex); @@ -1999,12 +1887,11 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE if (pLastCol->rowKey.ts <= eKey && pLastCol->rowKey.ts >= sKey) { SLastCol noneCol = {.rowKey.ts = TSKEY_MIN, .colVal = COL_VAL_NONE(cid, pTSchema->columns[i].type), + .dirty = 1, .cacheStatus = TSDB_LAST_CACHE_NO_CACHE}; - code = tsdbCachePutToLRU(pTsdb, &lastKey, &noneCol); - } - if (taosLRUCacheRelease(pTsdb->lruCache, h, false) != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s release lru cache failed at line %d.", TD_VID(pTsdb->pVnode), __func__, __LINE__); + code = tsdbCachePutToLRU(pTsdb, &lastKey, &noneCol, 1); } + tsdbLRUCacheRelease(pTsdb->lruCache, h, false); TAOS_CHECK_EXIT(code); } else { if (!remainCols) { @@ -2056,15 +1943,20 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE for (int i = 0; i < numKeys; ++i) { SLastCol *pLastCol = NULL; code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); - if (code != TSDB_CODE_SUCCESS) { - tsdbWarn("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); + if (code == TSDB_CODE_INVALID_PARA) { + tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + } else if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; } SIdxKey *idxKey = taosArrayGet(remainCols, i); SLastKey *pLastKey = &idxKey->key; if (NULL != pLastCol && (pLastCol->rowKey.ts <= eKey && pLastCol->rowKey.ts >= sKey)) { SLastCol noCacheCol = {.rowKey.ts = TSKEY_MIN, .colVal = COL_VAL_NONE(pLastKey->cid, pTSchema->columns[idxKey->idx].type), + .dirty = 0, .cacheStatus = TSDB_LAST_CACHE_NO_CACHE}; if ((code = tsdbCachePutToRocksdb(pTsdb, pLastKey, &noCacheCol)) != TSDB_CODE_SUCCESS) { @@ -2072,7 +1964,7 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE tsdbError("tsdb/cache/del: vgId:%d, put to rocks failed since %s.", TD_VID(pTsdb->pVnode), tstrerror(code)); goto _exit; } - if ((code = tsdbCachePutToLRU(pTsdb, pLastKey, &noCacheCol)) != TSDB_CODE_SUCCESS) { + if ((code = tsdbCachePutToLRU(pTsdb, pLastKey, &noCacheCol, 0)) != TSDB_CODE_SUCCESS) { taosMemoryFreeClear(pLastCol); tsdbError("tsdb/cache/del: vgId:%d, put to lru failed since %s.", TD_VID(pTsdb->pVnode), tstrerror(code)); goto _exit; @@ -3562,11 +3454,7 @@ _err: TAOS_RETURN(code); } -void tsdbCacheRelease(SLRUCache *pCache, LRUHandle *h) { - if (taosLRUCacheRelease(pCache, h, false)) { - tsdbError("%s release lru cache failed at line %d.", __func__, __LINE__); - } -} +void tsdbCacheRelease(SLRUCache *pCache, LRUHandle *h) { tsdbLRUCacheRelease(pCache, h, false); } void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity) { taosLRUCacheSetCapacity(pVnode->pTsdb->lruCache, capacity); @@ -3660,7 +3548,7 @@ int32_t tsdbCacheGetBlockS3(SLRUCache *pCache, STsdbFD *pFD, LRUHandle **handle) size_t charge = tsS3BlockSize * pFD->szPage; _taos_lru_deleter_t deleter = deleteBCache; LRUStatus status = - taosLRUCacheInsert(pCache, key, keyLen, pBlock, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW, NULL); + taosLRUCacheInsert(pCache, key, keyLen, pBlock, charge, deleter, NULL, &h, TAOS_LRU_PRIORITY_LOW, NULL); if (status != TAOS_LRU_STATUS_OK) { // code = -1; } @@ -3703,7 +3591,7 @@ void tsdbCacheSetPageS3(SLRUCache *pCache, STsdbFD *pFD, int64_t pgno, uint8_t * memcpy(pPg, pPage, charge); LRUStatus status = - taosLRUCacheInsert(pCache, key, keyLen, pPg, charge, deleter, &handle, TAOS_LRU_PRIORITY_LOW, NULL); + taosLRUCacheInsert(pCache, key, keyLen, pPg, charge, deleter, NULL, &handle, TAOS_LRU_PRIORITY_LOW, NULL); if (status != TAOS_LRU_STATUS_OK) { // ignore cache updating if not ok // code = TSDB_CODE_OUT_OF_MEMORY; @@ -3712,4 +3600,4 @@ void tsdbCacheSetPageS3(SLRUCache *pCache, STsdbFD *pFD, int64_t pgno, uint8_t * (void)taosThreadMutexUnlock(&pFD->pTsdb->pgMutex); tsdbCacheRelease(pFD->pTsdb->pgCache, handle); -} +} \ No newline at end of file diff --git a/source/dnode/vnode/src/tsdb/tsdbFSet2.c b/source/dnode/vnode/src/tsdb/tsdbFSet2.c index fc78fec2ea..fc681f9753 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFSet2.c +++ b/source/dnode/vnode/src/tsdb/tsdbFSet2.c @@ -71,6 +71,9 @@ static int32_t tsdbSttLvlInitRef(STsdb *pTsdb, const SSttLvl *lvl1, SSttLvl **lv } code = TARRAY2_APPEND(lvl[0]->fobjArr, fobj1); if (code) { + if (tsdbTFileObjUnref(fobj1) != 0) { + tsdbError("failed to unref file obj, fobj:%p", fobj1); + } tsdbSttLvlClear(lvl); return code; } diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 5b6511a38e..36bfb56120 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -704,7 +704,7 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFileReader* pFileRead pReader->cost.headFileLoadTime += (et1 - st) / 1000.0; -_end: +//_end: // tsdbBICacheRelease(pFileReader->pTsdb->biCache, handle); return code; } @@ -1872,9 +1872,9 @@ static void doPinSttBlock(SSttBlockReader* pSttBlockReader) { tMergeTreePinSttBl static void doUnpinSttBlock(SSttBlockReader* pSttBlockReader) { tMergeTreeUnpinSttBlock(&pSttBlockReader->mergeTree); } -static bool tryCopyDistinctRowFromSttBlock(TSDBROW* fRow, SSttBlockReader* pSttBlockReader, - STableBlockScanInfo* pScanInfo, SRowKey* pSttKey, STsdbReader* pReader, - bool* copied) { +static int32_t tryCopyDistinctRowFromSttBlock(TSDBROW* fRow, SSttBlockReader* pSttBlockReader, + STableBlockScanInfo* pScanInfo, SRowKey* pSttKey, STsdbReader* pReader, + bool* copied) { int32_t code = TSDB_CODE_SUCCESS; *copied = false; diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c index 00806885ef..f807ecf2d6 100644 --- a/source/dnode/vnode/src/tsdb/tsdbUtil.c +++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c @@ -812,6 +812,7 @@ int32_t tsdbRowMergerAdd(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema) if (!COL_VAL_IS_NONE(pColVal)) { if (IS_VAR_DATA_TYPE(pColVal->value.type)) { SColVal *pTColVal = taosArrayGet(pMerger->pArray, iCol); + if (!pTColVal) return terrno; if (!COL_VAL_IS_NULL(pColVal)) { code = tRealloc(&pTColVal->value.pData, pColVal->value.nData); if (code) return code; diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index 5f4628eb87..50bedba75d 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -73,7 +73,7 @@ void vnodeRedirectRpcMsg(SVnode *pVnode, SRpcMsg *pMsg, int32_t code) { if (rsp.pCont == NULL) { pMsg->code = TSDB_CODE_OUT_OF_MEMORY; } else { - if (tSerializeSEpSet(rsp.pCont, contLen, &newEpSet) != 0) { + if (tSerializeSEpSet(rsp.pCont, contLen, &newEpSet) < 0) { vError("vgId:%d, failed to serialize ep set", pVnode->config.vgId); } rsp.contLen = contLen; diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index 46f4f86484..525573ee01 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -72,7 +72,7 @@ int32_t ctgInitGetTbMetaTask(SCtgJob* pJob, int32_t taskIdx, void* param) { CTG_ERR_RET(terrno); } - qDebug("qid:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx, + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), name->tname); return TSDB_CODE_SUCCESS; @@ -94,7 +94,7 @@ int32_t ctgInitGetTbMetasTask(SCtgJob* pJob, int32_t taskIdx, void* param) { ctx->pNames = param; ctx->pResList = taosArrayInit(pJob->tbMetaNum, sizeof(SMetaRes)); if (NULL == ctx->pResList) { - qError("qid:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tbMetaNum, + qError("QID:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tbMetaNum, (int32_t)sizeof(SMetaRes)); ctgFreeTask(&task, true); CTG_ERR_RET(terrno); @@ -105,7 +105,7 @@ int32_t ctgInitGetTbMetasTask(SCtgJob* pJob, int32_t taskIdx, void* param) { CTG_ERR_RET(terrno); } - qDebug("qid:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%lu, tbNum:%d", pJob->queryId, taskIdx, + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%lu, tbNum:%d", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), taosArrayGetSize(ctx->pNames), pJob->tbMetaNum); return TSDB_CODE_SUCCESS; @@ -133,7 +133,7 @@ int32_t ctgInitGetDbVgTask(SCtgJob* pJob, int32_t taskIdx, void* param) { CTG_ERR_RET(terrno); } - qDebug("qid:0x%" PRIx64 " the %dth task type %s initialized, dbFName:%s", pJob->queryId, taskIdx, + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbFName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), dbFName); return TSDB_CODE_SUCCESS; @@ -161,7 +161,7 @@ int32_t ctgInitGetDbCfgTask(SCtgJob* pJob, int32_t taskIdx, void* param) { CTG_ERR_RET(terrno); } - qDebug("qid:0x%" PRIx64 " the %dth task type %s initialized, dbFName:%s", pJob->queryId, taskIdx, + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbFName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), dbFName); return TSDB_CODE_SUCCESS; @@ -189,7 +189,7 @@ int32_t ctgInitGetDbInfoTask(SCtgJob* pJob, int32_t taskIdx, void* param) { CTG_ERR_RET(terrno); } - qDebug("qid:0x%" PRIx64 " the %dth task type %s initialized, dbFName:%s", pJob->queryId, taskIdx, + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbFName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), dbFName); return TSDB_CODE_SUCCESS; @@ -223,7 +223,7 @@ int32_t ctgInitGetTbHashTask(SCtgJob* pJob, int32_t taskIdx, void* param) { CTG_ERR_RET(terrno); } - qDebug("qid:0x%" PRIx64 " the %dth task type %s initialized, tableName:%s", pJob->queryId, taskIdx, + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tableName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), name->tname); return TSDB_CODE_SUCCESS; @@ -245,7 +245,7 @@ int32_t ctgInitGetTbHashsTask(SCtgJob* pJob, int32_t taskIdx, void* param) { ctx->pNames = param; ctx->pResList = taosArrayInit(pJob->tbHashNum, sizeof(SMetaRes)); if (NULL == ctx->pResList) { - qError("qid:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tbHashNum, + qError("QID:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tbHashNum, (int32_t)sizeof(SMetaRes)); ctgFreeTask(&task, true); CTG_ERR_RET(terrno); @@ -256,7 +256,7 @@ int32_t ctgInitGetTbHashsTask(SCtgJob* pJob, int32_t taskIdx, void* param) { CTG_ERR_RET(terrno); } - qDebug("qid:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%lu, tbNum:%d", pJob->queryId, taskIdx, + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%lu, tbNum:%d", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), taosArrayGetSize(ctx->pNames), pJob->tbHashNum); return TSDB_CODE_SUCCESS; @@ -275,7 +275,7 @@ int32_t ctgInitGetQnodeTask(SCtgJob* pJob, int32_t taskIdx, void* param) { CTG_ERR_RET(terrno); } - qDebug("qid:0x%" PRIx64 " the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type)); + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type)); return TSDB_CODE_SUCCESS; } @@ -293,7 +293,7 @@ int32_t ctgInitGetDnodeTask(SCtgJob* pJob, int32_t taskIdx, void* param) { CTG_ERR_RET(terrno); } - qDebug("qid:0x%" PRIx64 " the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type)); + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type)); return TSDB_CODE_SUCCESS; } @@ -320,7 +320,7 @@ int32_t ctgInitGetIndexTask(SCtgJob* pJob, int32_t taskIdx, void* param) { CTG_ERR_RET(terrno); } - qDebug("qid:0x%" PRIx64 " the %dth task type %s initialized, indexFName:%s", pJob->queryId, taskIdx, + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, indexFName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), name); return TSDB_CODE_SUCCESS; @@ -348,7 +348,7 @@ int32_t ctgInitGetUdfTask(SCtgJob* pJob, int32_t taskIdx, void* param) { CTG_ERR_RET(terrno); } - qDebug("qid:0x%" PRIx64 " the %dth task type %s initialized, udfName:%s", pJob->queryId, taskIdx, + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, udfName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), name); return TSDB_CODE_SUCCESS; @@ -376,7 +376,7 @@ int32_t ctgInitGetUserTask(SCtgJob* pJob, int32_t taskIdx, void* param) { CTG_ERR_RET(terrno); } - qDebug("qid:0x%" PRIx64 " the %dth task type %s initialized, user:%s", pJob->queryId, taskIdx, + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, user:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), user->user); return TSDB_CODE_SUCCESS; @@ -394,7 +394,7 @@ int32_t ctgInitGetSvrVerTask(SCtgJob* pJob, int32_t taskIdx, void* param) { CTG_ERR_RET(terrno); } - qDebug("qid:0x%" PRIx64 " the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type)); + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type)); return TSDB_CODE_SUCCESS; } @@ -426,7 +426,7 @@ int32_t ctgInitGetTbIndexTask(SCtgJob* pJob, int32_t taskIdx, void* param) { CTG_ERR_RET(terrno); } - qDebug("qid:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx, + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), name->tname); return TSDB_CODE_SUCCESS; @@ -459,7 +459,7 @@ int32_t ctgInitGetTbCfgTask(SCtgJob* pJob, int32_t taskIdx, void* param) { CTG_ERR_RET(terrno); } - qDebug("qid:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx, + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), name->tname); return TSDB_CODE_SUCCESS; @@ -492,7 +492,7 @@ int32_t ctgInitGetTbTagTask(SCtgJob* pJob, int32_t taskIdx, void* param) { CTG_ERR_RET(terrno); } - qDebug("qid:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx, + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), name->tname); return TSDB_CODE_SUCCESS; @@ -514,7 +514,7 @@ int32_t ctgInitGetViewsTask(SCtgJob* pJob, int32_t taskIdx, void* param) { ctx->pNames = param; ctx->pResList = taosArrayInit(pJob->viewNum, sizeof(SMetaRes)); if (NULL == ctx->pResList) { - qError("qid:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->viewNum, + qError("QID:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->viewNum, (int32_t)sizeof(SMetaRes)); ctgFreeTask(&task, true); CTG_ERR_RET(terrno); @@ -525,7 +525,7 @@ int32_t ctgInitGetViewsTask(SCtgJob* pJob, int32_t taskIdx, void* param) { CTG_ERR_RET(terrno); } - qDebug("qid:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%lu, viewNum:%d", pJob->queryId, taskIdx, + qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%lu, viewNum:%d", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), taosArrayGetSize(ctx->pNames), pJob->viewNum); return TSDB_CODE_SUCCESS; @@ -546,7 +546,7 @@ int32_t ctgInitGetTbTSMATask(SCtgJob* pJob, int32_t taskId, void* param) { pTaskCtx->pNames = param; pTaskCtx->pResList = taosArrayInit(pJob->tbTsmaNum, sizeof(SMetaRes)); if (NULL == pTaskCtx->pResList) { - qError("qid:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tbTsmaNum, + qError("QID:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tbTsmaNum, (int32_t)sizeof(SMetaRes)); ctgFreeTask(&task, true); CTG_ERR_RET(terrno); @@ -574,7 +574,7 @@ int32_t ctgInitGetTSMATask(SCtgJob* pJob, int32_t taskId, void* param) { pTaskCtx->pNames = param; pTaskCtx->pResList = taosArrayInit(pJob->tsmaNum, sizeof(SMetaRes)); if (NULL == pTaskCtx->pResList) { - qError("qid:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tsmaNum, + qError("QID:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tsmaNum, (int32_t)sizeof(SMetaRes)); ctgFreeTask(&task, true); CTG_ERR_RET(terrno); @@ -603,7 +603,7 @@ static int32_t ctgInitGetTbNamesTask(SCtgJob* pJob, int32_t taskId, void* param) pTaskCtx->pNames = param; pTaskCtx->pResList = taosArrayInit(pJob->tbNameNum, sizeof(SMetaRes)); if (NULL == pTaskCtx->pResList) { - qError("qid:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tbNameNum, + qError("QID:0x%" PRIx64 " taosArrayInit %d SMetaRes %d failed", pJob->queryId, pJob->tbNameNum, (int32_t)sizeof(SMetaRes)); ctgFreeTask(&task, true); CTG_ERR_RET(terrno); @@ -1048,7 +1048,7 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgJob** job, const } double el = (taosGetTimestampUs() - st) / 1000.0; - qDebug("qid:0x%" PRIx64 ", jobId: 0x%" PRIx64 " initialized, task num %d, forceUpdate %d, elapsed time:%.2f ms", + qDebug("QID:0x%" PRIx64 ", jobId: 0x%" PRIx64 " initialized, task num %d, forceUpdate %d, elapsed time:%.2f ms", pJob->queryId, pJob->refId, taskNum, pReq->forceUpdate, el); return TSDB_CODE_SUCCESS; @@ -1450,16 +1450,17 @@ _return: int32_t ctgCallUserCb(void* param) { SCtgJob* pJob = (SCtgJob*)param; - qDebug("qid:0x%" PRIx64 " ctg start to call user cb with rsp %s", pJob->queryId, tstrerror(pJob->jobResCode)); + qDebug("QID:0x%" PRIx64 " ctg start to call user cb with rsp %s", pJob->queryId, tstrerror(pJob->jobResCode)); (*pJob->userFp)(&pJob->jobRes, pJob->userParam, pJob->jobResCode); - qDebug("qid:0x%" PRIx64 " ctg end to call user cb", pJob->queryId); + qDebug("QID:0x%" PRIx64 " ctg end to call user cb", pJob->queryId); int64_t refId = pJob->refId; int32_t code = taosRemoveRef(gCtgMgmt.jobPool, refId); if (code) { - qError("qid:0x%" PRIx64 " remove ctg job %" PRId64 " from jobPool failed, error:%s", pJob->queryId, refId, tstrerror(code)); + qError("QID:0x%" PRIx64 " remove ctg job %" PRId64 " from jobPool failed, error:%s", pJob->queryId, refId, + tstrerror(code)); } return TSDB_CODE_SUCCESS; @@ -1469,7 +1470,7 @@ void ctgUpdateJobErrCode(SCtgJob* pJob, int32_t errCode) { if (!NEED_CLIENT_REFRESH_VG_ERROR(errCode) || errCode == TSDB_CODE_SUCCESS) return; atomic_store_32(&pJob->jobResCode, errCode); - qDebug("qid:0x%" PRIx64 " ctg job errCode updated to %s", pJob->queryId, tstrerror(errCode)); + qDebug("QID:0x%" PRIx64 " ctg job errCode updated to %s", pJob->queryId, tstrerror(errCode)); return; } @@ -1481,7 +1482,7 @@ int32_t ctgHandleTaskEnd(SCtgTask* pTask, int32_t rspCode) { return TSDB_CODE_SUCCESS; } - qDebug("qid:0x%" PRIx64 " task %d end with res %s", pJob->queryId, pTask->taskId, tstrerror(rspCode)); + qDebug("QID:0x%" PRIx64 " task %d end with res %s", pJob->queryId, pTask->taskId, tstrerror(rspCode)); pTask->code = rspCode; pTask->status = CTG_TASK_DONE; @@ -1490,7 +1491,7 @@ int32_t ctgHandleTaskEnd(SCtgTask* pTask, int32_t rspCode) { int32_t taskDone = atomic_add_fetch_32(&pJob->taskDone, 1); if (taskDone < taosArrayGetSize(pJob->pTasks)) { - qDebug("qid:0x%" PRIx64 " task done: %d, total: %d", pJob->queryId, taskDone, + qDebug("QID:0x%" PRIx64 " task done: %d, total: %d", pJob->queryId, taskDone, (int32_t)taosArrayGetSize(pJob->pTasks)); ctgUpdateJobErrCode(pJob, rspCode); @@ -4347,7 +4348,7 @@ int32_t ctgLaunchJob(SCtgJob* pJob) { CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR); } - qDebug("qid:0x%" PRIx64 " ctg launch [%dth] task", pJob->queryId, pTask->taskId); + qDebug("QID:0x%" PRIx64 " ctg launch [%dth] task", pJob->queryId, pTask->taskId); CTG_ERR_RET((*gCtgAsyncFps[pTask->type].launchFp)(pTask)); pTask = taosArrayGet(pJob->pTasks, i); @@ -4360,7 +4361,7 @@ int32_t ctgLaunchJob(SCtgJob* pJob) { } if (taskNum <= 0) { - qDebug("qid:0x%" PRIx64 " ctg call user callback with rsp %s", pJob->queryId, tstrerror(pJob->jobResCode)); + qDebug("QID:0x%" PRIx64 " ctg call user callback with rsp %s", pJob->queryId, tstrerror(pJob->jobResCode)); CTG_ERR_RET(taosAsyncExec(ctgCallUserCb, pJob, NULL)); #if CTG_BATCH_FETCH diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c index f95c76c1cb..8cbb5c226a 100644 --- a/source/libs/catalog/src/ctgCache.c +++ b/source/libs/catalog/src/ctgCache.c @@ -1608,12 +1608,14 @@ int32_t ctgDropTSMAForTbEnqueue(SCatalog *pCtg, SName *pName, bool syncOp) { code = createDropAllTbTsmaCtgCacheOp(pCtg, pCache, syncOp, &pOp); } CTG_UNLOCK(CTG_READ, &pCtgCache->tsmaLock); + taosHashRelease(pDbCache->tsmaCache, pCtgCache); + pCtgCache = NULL; + ctgReleaseDBCache(pCtg, pDbCache); + pDbCache = NULL; CTG_ERR_JRET(code); CTG_ERR_JRET(ctgEnqueue(pCtg, pOp)); - taosHashRelease(pDbCache->tsmaCache, pCtgCache); - ctgReleaseDBCache(pCtg, pDbCache); return TSDB_CODE_SUCCESS; diff --git a/source/libs/catalog/src/ctgRemote.c b/source/libs/catalog/src/ctgRemote.c index d6e941c819..ed9dc81dd7 100644 --- a/source/libs/catalog/src/ctgRemote.c +++ b/source/libs/catalog/src/ctgRemote.c @@ -47,7 +47,7 @@ int32_t ctgHandleBatchRsp(SCtgJob* pJob, SCtgTaskCallbackParam* cbParam, SDataBu msgNum = 0; } - ctgDebug("qid:0x%" PRIx64 " ctg got batch %d rsp %s", pJob->queryId, cbParam->batchId, + ctgDebug("QID:0x%" PRIx64 " ctg got batch %d rsp %s", pJob->queryId, cbParam->batchId, TMSG_INFO(cbParam->reqType + 1)); SHashObj* pBatchs = taosHashInit(taskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); @@ -114,7 +114,7 @@ int32_t ctgHandleBatchRsp(SCtgJob* pJob, SCtgTaskCallbackParam* cbParam, SDataBu pMsgCtx->pBatchs = pBatchs; - ctgDebug("qid:0x%" PRIx64 " ctg task %d idx %d start to handle rsp %s, pBatchs: %p", pJob->queryId, pTask->taskId, + ctgDebug("QID:0x%" PRIx64 " ctg task %d idx %d start to handle rsp %s, pBatchs: %p", pJob->queryId, pTask->taskId, pRsp->msgIdx, TMSG_INFO(taskMsg.msgType + 1), pBatchs); (void)(*gCtgAsyncFps[pTask->type].handleRspFp)( @@ -454,7 +454,7 @@ int32_t ctgHandleMsgCallback(void* param, SDataBuf* pMsg, int32_t rspCode) { CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR); } - qDebug("qid:0x%" PRIx64 " ctg task %d start to handle rsp %s", pJob->queryId, pTask->taskId, + qDebug("QID:0x%" PRIx64 " ctg task %d start to handle rsp %s", pJob->queryId, pTask->taskId, TMSG_INFO(cbParam->reqType + 1)); #if CTG_BATCH_FETCH @@ -702,7 +702,7 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT if (TDMT_VND_TABLE_CFG == msgType) { SCtgTbCfgCtx* ctx = (SCtgTbCfgCtx*)pTask->taskCtx; pName = ctx->pName; - } else if (TDMT_VND_TABLE_META == msgType || TDMT_VND_TABLE_NAME == msgType) { + } else if (TDMT_VND_TABLE_META == msgType || TDMT_VND_TABLE_NAME == msgType) { if (CTG_TASK_GET_TB_META_BATCH == pTask->type) { SCtgTbMetasCtx* ctx = (SCtgTbMetasCtx*)pTask->taskCtx; SCtgFetch* fetch = taosArrayGet(ctx->pFetchs, tReq->msgIdx); @@ -808,7 +808,7 @@ int32_t ctgLaunchBatchs(SCatalog* pCtg, SCtgJob* pJob, SHashObj* pBatchs) { SCtgBatch* pBatch = (SCtgBatch*)p; int32_t msgSize = 0; - ctgDebug("qid:0x%" PRIx64 " ctg start to launch batch %d", pJob->queryId, pBatch->batchId); + ctgDebug("QID:0x%" PRIx64 " ctg start to launch batch %d", pJob->queryId, pBatch->batchId); CTG_ERR_JRET(ctgBuildBatchReqMsg(pBatch, *vgId, &msg, &msgSize)); code = ctgAsyncSendMsg(pCtg, &pBatch->conn, pJob, pBatch->pTaskIds, pBatch->batchId, pBatch->pMsgIdxs, @@ -1124,10 +1124,11 @@ int32_t ctgGetTbIndexFromMnode(SCatalog* pCtg, SRequestConnInfo* pConn, SName* n int32_t code = tNameExtractFullName(name, tbFName); if (code) { - ctgError("tNameExtractFullName failed, code:%s, type:%d, dbName:%s, tname:%s", tstrerror(code), name->type, name->dbname, name->tname); + ctgError("tNameExtractFullName failed, code:%s, type:%d, dbName:%s, tname:%s", tstrerror(code), name->type, + name->dbname, name->tname); CTG_ERR_RET(code); } - + code = queryBuildMsg[TMSG_INDEX(reqType)]((void*)tbFName, &msg, 0, &msgLen, mallocFp); if (code) { ctgError("Build get index msg failed, code:%s, tbFName:%s", tstrerror(code), tbFName); @@ -1450,7 +1451,8 @@ int32_t ctgGetTableCfgFromVnode(SCatalog* pCtg, SRequestConnInfo* pConn, const S int32_t code = tNameExtractFullName(pTableName, tbFName); if (code) { - ctgError("tNameExtractFullName failed, code:%s, type:%d, dbName:%s, tname:%s", tstrerror(code), pTableName->type, pTableName->dbname, pTableName->tname); + ctgError("tNameExtractFullName failed, code:%s, type:%d, dbName:%s, tname:%s", tstrerror(code), pTableName->type, + pTableName->dbname, pTableName->tname); CTG_ERR_RET(code); } @@ -1523,7 +1525,8 @@ int32_t ctgGetTableCfgFromMnode(SCatalog* pCtg, SRequestConnInfo* pConn, const S int32_t code = tNameExtractFullName(pTableName, tbFName); if (code) { - ctgError("tNameExtractFullName failed, code:%s, type:%d, dbName:%s, tname:%s", tstrerror(code), pTableName->type, pTableName->dbname, pTableName->tname); + ctgError("tNameExtractFullName failed, code:%s, type:%d, dbName:%s, tname:%s", tstrerror(code), pTableName->type, + pTableName->dbname, pTableName->tname); CTG_ERR_RET(code); } @@ -1632,10 +1635,11 @@ int32_t ctgGetViewInfoFromMnode(SCatalog* pCtg, SRequestConnInfo* pConn, SName* int32_t reqType = TDMT_MND_VIEW_META; SCtgTask* pTask = tReq ? tReq->pTask : NULL; void* (*mallocFp)(int64_t) = pTask ? (MallocType)taosMemoryMalloc : (MallocType)rpcMallocCont; - char fullName[TSDB_TABLE_FNAME_LEN]; + char fullName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(pName, fullName); if (code) { - ctgError("tNameExtractFullName failed, code:%s, type:%d, dbName:%s, tname:%s", tstrerror(code), pName->type, pName->dbname, pName->tname); + ctgError("tNameExtractFullName failed, code:%s, type:%d, dbName:%s, tname:%s", tstrerror(code), pName->type, + pName->dbname, pName->tname); CTG_ERR_RET(code); } @@ -1693,10 +1697,11 @@ int32_t ctgGetTbTSMAFromMnode(SCatalog* pCtg, SRequestConnInfo* pConn, const SNa int32_t msgLen = 0; SCtgTask* pTask = tReq ? tReq->pTask : NULL; void* (*mallocFp)(int64_t) = pTask ? (MallocType)taosMemoryMalloc : (MallocType)rpcMallocCont; - char tbFName[TSDB_TABLE_FNAME_LEN]; + char tbFName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(name, tbFName); if (code) { - ctgError("tNameExtractFullName failed, code:%s, type:%d, dbName:%s, tname:%s", tstrerror(code), name->type, name->dbname, name->tname); + ctgError("tNameExtractFullName failed, code:%s, type:%d, dbName:%s, tname:%s", tstrerror(code), name->type, + name->dbname, name->tname); CTG_ERR_RET(code); } @@ -1757,10 +1762,11 @@ int32_t ctgGetStreamProgressFromVnode(SCatalog* pCtg, SRequestConnInfo* pConn, c char tbFName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(pTbName, tbFName); if (code) { - ctgError("tNameExtractFullName failed, code:%s, type:%d, dbName:%s, tname:%s", tstrerror(code), pTbName->type, pTbName->dbname, pTbName->tname); + ctgError("tNameExtractFullName failed, code:%s, type:%d, dbName:%s, tname:%s", tstrerror(code), pTbName->type, + pTbName->dbname, pTbName->tname); CTG_ERR_RET(code); } - + SCtgTask* pTask = tReq ? tReq->pTask : NULL; void* (*mallocFp)(int64_t) = pTask ? (MallocType)taosMemoryMalloc : (MallocType)rpcMallocCont; diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c index 96cd783d2f..86a38017bd 100644 --- a/source/libs/catalog/src/ctgUtil.c +++ b/source/libs/catalog/src/ctgUtil.c @@ -452,7 +452,8 @@ void ctgClearHandleMeta(SCatalog* pCtg, int64_t* pClearedSize, int64_t* pCleardN code = taosHashRemove(dbCache->tbCache, key, len); if (code) { - qError("taosHashRemove table cache failed, key:%s, len:%d, error:%s", (char*)key, (int32_t)len, tstrerror(code)); + qError("taosHashRemove table cache failed, key:%s, len:%d, error:%s", (char*)key, (int32_t)len, + tstrerror(code)); } cacheSize = @@ -1096,7 +1097,7 @@ void ctgFreeJob(void* job) { taosMemoryFree(job); - qDebug("qid:0x%" PRIx64 ", ctg job 0x%" PRIx64 " freed", qid, rid); + qDebug("QID:0x%" PRIx64 ", ctg job 0x%" PRIx64 " freed", qid, rid); } int32_t ctgUpdateMsgCtx(SCtgMsgCtx* pCtx, int32_t reqType, void* out, char* target) { @@ -1241,10 +1242,11 @@ int32_t ctgGetVgInfoFromHashValue(SCatalog* pCtg, SEpSet* pMgmtEps, SDBVgInfo* d char tbFullName[TSDB_TABLE_FNAME_LEN]; code = tNameExtractFullName(pTableName, tbFullName); if (code) { - ctgError("tNameExtractFullName failed, error:%s, type:%d, dbName:%s, tname:%s", tstrerror(code), pTableName->type, pTableName->dbname, pTableName->tname); + ctgError("tNameExtractFullName failed, error:%s, type:%d, dbName:%s, tname:%s", tstrerror(code), pTableName->type, + pTableName->dbname, pTableName->tname); CTG_ERR_RET(code); } - + uint32_t hashValue = taosGetTbHashVal(tbFullName, (uint32_t)strlen(tbFullName), dbInfo->hashMethod, dbInfo->hashPrefix, dbInfo->hashSuffix); @@ -1704,7 +1706,8 @@ int32_t ctgCloneTableIndex(SArray* pIndex, SArray** pRes) { } int32_t ctgUpdateSendTargetInfo(SMsgSendInfo* pMsgSendInfo, int32_t msgType, char* dbFName, int32_t vgId) { - if (msgType == TDMT_VND_TABLE_META || msgType == TDMT_VND_TABLE_CFG || msgType == TDMT_VND_BATCH_META || msgType == TDMT_VND_TABLE_NAME) { + if (msgType == TDMT_VND_TABLE_META || msgType == TDMT_VND_TABLE_CFG || msgType == TDMT_VND_BATCH_META || + msgType == TDMT_VND_TABLE_NAME) { pMsgSendInfo->target.type = TARGET_TYPE_VNODE; pMsgSendInfo->target.vgId = vgId; pMsgSendInfo->target.dbFName = taosStrdup(dbFName); @@ -2010,7 +2013,8 @@ int32_t ctgChkSetTbAuthRes(SCatalog* pCtg, SCtgAuthReq* req, SCtgAuthRsp* res) { char dbFName[TSDB_DB_FNAME_LEN]; code = tNameExtractFullName(&req->pRawReq->tbName, tbFName); if (code) { - ctgError("tNameExtractFullName failed, error:%s, type:%d, dbName:%s, tname:%s", tstrerror(code), req->pRawReq->tbName.type, req->pRawReq->tbName.dbname, req->pRawReq->tbName.tname); + ctgError("tNameExtractFullName failed, error:%s, type:%d, dbName:%s, tname:%s", tstrerror(code), + req->pRawReq->tbName.type, req->pRawReq->tbName.dbname, req->pRawReq->tbName.tname); CTG_ERR_RET(code); } @@ -2201,7 +2205,8 @@ int32_t ctgChkSetViewAuthRes(SCatalog* pCtg, SCtgAuthReq* req, SCtgAuthRsp* res) } else { code = tNameExtractFullName(&req->pRawReq->tbName, viewFName); if (code) { - ctgError("tNameExtractFullName failed, error:%s, type:%d, dbName:%s, tname:%s", tstrerror(code), req->pRawReq->tbName.type, req->pRawReq->tbName.dbname, req->pRawReq->tbName.tname); + ctgError("tNameExtractFullName failed, error:%s, type:%d, dbName:%s, tname:%s", tstrerror(code), + req->pRawReq->tbName.type, req->pRawReq->tbName.dbname, req->pRawReq->tbName.tname); CTG_ERR_RET(code); } } diff --git a/source/libs/executor/inc/streamexecutorInt.h b/source/libs/executor/inc/streamexecutorInt.h index c6053cc96e..ab00dceb20 100644 --- a/source/libs/executor/inc/streamexecutorInt.h +++ b/source/libs/executor/inc/streamexecutorInt.h @@ -26,6 +26,8 @@ void setStreamOperatorState(SSteamOpBasicInfo* pBasicInfo, EStreamType type); bool needSaveStreamOperatorInfo(SSteamOpBasicInfo* pBasicInfo); void saveStreamOperatorStateComplete(SSteamOpBasicInfo* pBasicInfo); +void reuseOutputBuf(void* pState, SRowBuffPos* pPos, SStateStore* pAPI); + #ifdef __cplusplus } #endif diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 04b3a83264..c74aef3992 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1143,11 +1143,11 @@ SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, S varDataSetLen(tmp, tagVal.nData); memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); code = colDataSetVal(pColInfo, i, tmp, false); - QUERY_CHECK_CODE(code, lino, _end); #if TAG_FILTER_DEBUG qDebug("tagfilter varch:%s", tmp + 2); #endif taosMemoryFree(tmp); + QUERY_CHECK_CODE(code, lino, _end); } else { code = colDataSetVal(pColInfo, i, (const char*)&tagVal.i64, false); QUERY_CHECK_CODE(code, lino, _end); diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index c3228f59bf..cbf392f67e 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -798,7 +798,6 @@ void qCleanExecTaskBlockBuf(qTaskInfo_t tinfo) { int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; int64_t threadId = taosGetSelfPthreadId(); - int32_t lino = 0; int64_t curOwner = 0; *pRes = NULL; @@ -846,7 +845,7 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) { int32_t code = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot, pRes); if (code) { pTaskInfo->code = code; - qError("%s failed at line %d, code:%s %s", __func__, lino, tstrerror(code), GET_TASKID(pTaskInfo)); + qError("%s failed at line %d, code:%s %s", __func__, __LINE__, tstrerror(code), GET_TASKID(pTaskInfo)); } blockDataCheck(*pRes, false); diff --git a/source/libs/executor/src/executorInt.c b/source/libs/executor/src/executorInt.c index 5fc483087a..64a07c4653 100644 --- a/source/libs/executor/src/executorInt.c +++ b/source/libs/executor/src/executorInt.c @@ -687,10 +687,10 @@ int32_t copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResu code = blockDataEnsureCapacity(pBlock, pBlock->info.rows + pCtx[j].resultInfo->numOfRes); QUERY_CHECK_CODE(code, lino, _end); - int32_t winCode = pCtx[j].fpSet.finalize(&pCtx[j], pBlock); - if (TAOS_FAILED(winCode)) { - qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(winCode)); - QUERY_CHECK_CODE(winCode, lino, _end); + code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock); + if (TSDB_CODE_SUCCESS != code) { + qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code)); + QUERY_CHECK_CODE(code, lino, _end); } } else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) { // do nothing @@ -1301,10 +1301,17 @@ FORCE_INLINE int32_t getNextBlockFromDownstreamImpl(struct SOperatorInfo* pOpera freeOperatorParam(pOperator->pDownstreamGetParams[idx], OP_GET_PARAM); pOperator->pDownstreamGetParams[idx] = NULL; } + + if (code) { + qError("failed to get next data block from upstream at %s, line:%d code:%s", __func__, __LINE__, tstrerror(code)); + } return code; } code = pOperator->pDownstream[idx]->fpSet.getNextFn(pOperator->pDownstream[idx], pResBlock); + if (code) { + qError("failed to get next data block from upstream at %s, %d code:%s", __func__, __LINE__, tstrerror(code)); + } return code; } diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 1c9279b84f..9cf2a3ea17 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -86,11 +86,13 @@ static void destroyGroupOperatorInfo(void* param) { taosArrayDestroy(pInfo->pGroupCols); taosArrayDestroyEx(pInfo->pGroupColVals, freeGroupKey); cleanupExprSupp(&pInfo->scalarSup); - if (pInfo->pOperator) { + + if (pInfo->pOperator != NULL) { cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, &pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable); pInfo->pOperator = NULL; } + cleanupGroupResInfo(&pInfo->groupResInfo); cleanupAggSup(&pInfo->aggSup); taosMemoryFreeClear(param); diff --git a/source/libs/executor/src/mergeoperator.c b/source/libs/executor/src/mergeoperator.c index 45cd755f78..7fd6b91e52 100644 --- a/source/libs/executor/src/mergeoperator.c +++ b/source/libs/executor/src/mergeoperator.c @@ -67,6 +67,9 @@ int32_t sortMergeloadNextDataBlock(void* param, SSDataBlock** ppBlock) { SOperatorInfo* pOperator = (SOperatorInfo*)param; int32_t code = pOperator->fpSet.getNextFn(pOperator, ppBlock); blockDataCheck(*ppBlock, false); + if (code) { + qError("failed to get next data block from upstream, %s code:%s", __func__, tstrerror(code)); + } return code; } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 324eaa3bb5..bae9926f63 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -737,7 +737,7 @@ _end: if (NULL != pVal) { insertRet = taosLRUCacheInsert(pCache->pTableMetaEntryCache, &pBlock->info.id.uid, sizeof(uint64_t), pVal, - sizeof(STableCachedVal), freeCachedMetaItem, NULL, TAOS_LRU_PRIORITY_LOW, NULL); + sizeof(STableCachedVal), freeCachedMetaItem, NULL, NULL, TAOS_LRU_PRIORITY_LOW, NULL); if (insertRet != TAOS_LRU_STATUS_OK) { qWarn("failed to put meta into lru cache, code:%d, %s", insertRet, idStr); } @@ -1380,8 +1380,7 @@ static int32_t doTableScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { code = tableListGetSize(pInfo->base.pTableListInfo, &numOfTables); if (code != TSDB_CODE_SUCCESS) { taosRUnLockLatch(&pTaskInfo->lock); - lino = __LINE__; - goto _end; + TSDB_CHECK_CODE(code, lino, _end); } if (pInfo->currentTable >= numOfTables) { @@ -1393,11 +1392,11 @@ static int32_t doTableScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { STableKeyInfo* tmp = (STableKeyInfo*)tableListGetInfo(pInfo->base.pTableListInfo, pInfo->currentTable); if (!tmp) { - qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno)); taosRUnLockLatch(&pTaskInfo->lock); (*ppRes) = NULL; - return terrno; + QUERY_CHECK_NULL(tmp, code, lino, _end, terrno); } + tInfo = *tmp; taosRUnLockLatch(&pTaskInfo->lock); @@ -1412,11 +1411,12 @@ static int32_t doTableScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) { } } else { // scan table group by group sequentially code = groupSeqTableScan(pOperator, ppRes); + QUERY_CHECK_CODE(code, lino, _end); } _end: if (code != TSDB_CODE_SUCCESS) { - qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + qError("%s %s failed at line %d since %s", GET_TASKID(pTaskInfo), __func__, lino, tstrerror(code)); pTaskInfo->code = code; T_LONG_JMP(pTaskInfo->env, code); } @@ -5834,9 +5834,10 @@ SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, SSDataBlock* SOperatorInfo* pOperator) { STableMergeScanInfo* pInfo = pOperator->info; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + STupleHandle* pTupleHandle = NULL; blockDataCleanup(pResBlock); - STupleHandle* pTupleHandle = NULL; + while (1) { while (1) { pTupleHandle = NULL; diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 11b3fa8c70..1c241dffec 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -204,15 +204,18 @@ int32_t appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle) * @brief get next tuple with group id attached, here assume that all tuples are sorted by group keys * @param [in, out] pBlock the output block, the group id will be saved in it * @retval NULL if next group tuple arrived and this new group tuple will be saved in pInfo.pSavedTuple - * @retval NULL if no more tuples */ -static STupleHandle* nextTupleWithGroupId(SSortHandle* pHandle, SSortOperatorInfo* pInfo, SSDataBlock* pBlock) { - int32_t code = 0; +static int32_t nextTupleWithGroupId(SSortHandle* pHandle, SSortOperatorInfo* pInfo, SSDataBlock* pBlock, + STupleHandle** pTupleHandle) { + QRY_PARAM_CHECK(pTupleHandle); + + int32_t code = 0; STupleHandle* retTuple = pInfo->pGroupIdCalc->pSavedTuple; if (!retTuple) { code = tsortNextTuple(pHandle, &retTuple); if (code) { - return NULL; + qError("failed to get next tuple, code:%s", tstrerror(code)); + return code; } } @@ -225,7 +228,8 @@ static STupleHandle* nextTupleWithGroupId(SSortHandle* pHandle, SSortOperatorInf newGroup = tsortCompAndBuildKeys(pInfo->pGroupIdCalc->pSortColsArr, pInfo->pGroupIdCalc->keyBuf, &pInfo->pGroupIdCalc->lastKeysLen, retTuple); } - bool emptyBlock = pBlock->info.rows == 0; + + bool emptyBlock = (pBlock->info.rows == 0); if (newGroup) { if (!emptyBlock) { // new group arrived, and we have already copied some tuples for cur group, save the new group tuple, return @@ -247,17 +251,20 @@ static STupleHandle* nextTupleWithGroupId(SSortHandle* pHandle, SSortOperatorInf } } - return retTuple; + *pTupleHandle = retTuple; + return code; } static int32_t getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t capacity, SArray* pColMatchInfo, SSortOperatorInfo* pInfo, SSDataBlock** pResBlock) { QRY_PARAM_CHECK(pResBlock); blockDataCleanup(pDataBlock); - int32_t lino = 0; - int32_t code = 0; - SSDataBlock* p = NULL; + int32_t lino = 0; + int32_t code = 0; + STupleHandle* pTupleHandle = NULL; + SSDataBlock* p = NULL; + code = tsortGetSortedDataBlock(pHandle, &p); if (p == NULL || (code != 0)) { return code; @@ -266,16 +273,15 @@ static int32_t getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, code = blockDataEnsureCapacity(p, capacity); QUERY_CHECK_CODE(code, lino, _error); - STupleHandle* pTupleHandle; while (1) { if (pInfo->pGroupIdCalc) { - pTupleHandle = nextTupleWithGroupId(pHandle, pInfo, p); + code = nextTupleWithGroupId(pHandle, pInfo, p, &pTupleHandle); } else { code = tsortNextTuple(pHandle, &pTupleHandle); } - if (pTupleHandle == NULL || code != 0) { - lino = __LINE__; + TSDB_CHECK_CODE(code, lino, _error); + if (pTupleHandle == NULL) { break; } @@ -320,7 +326,7 @@ static int32_t getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, return code; _error: - qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); blockDataDestroy(p); return code; @@ -330,6 +336,9 @@ int32_t loadNextDataBlock(void* param, SSDataBlock** ppBlock) { SOperatorInfo* pOperator = (SOperatorInfo*)param; int32_t code = pOperator->fpSet.getNextFn(pOperator, ppBlock); blockDataCheck(*ppBlock, false); + if (code) { + qError("failed to get next data block from upstream, %s code:%s", __func__, tstrerror(code)); + } return code; } diff --git a/source/libs/executor/src/streamcountwindowoperator.c b/source/libs/executor/src/streamcountwindowoperator.c index 8b2a1b7c71..33b3e7748c 100644 --- a/source/libs/executor/src/streamcountwindowoperator.c +++ b/source/libs/executor/src/streamcountwindowoperator.c @@ -90,7 +90,7 @@ int32_t setCountOutputBuf(SStreamAggSupporter* pAggSup, TSKEY ts, uint64_t group if (isSlidingCountWindow(pAggSup)) { if (pBuffInfo->winBuffOp == CREATE_NEW_WINDOW) { - code = pAggSup->stateStore.streamStateCountWinAdd(pAggSup->pState, &pCurWin->winInfo.sessionWin, + code = pAggSup->stateStore.streamStateCountWinAdd(pAggSup->pState, &pCurWin->winInfo.sessionWin, pAggSup->windowCount, (void**)&pCurWin->winInfo.pStatePos, &size); QUERY_CHECK_CODE(code, lino, _end); @@ -101,9 +101,11 @@ int32_t setCountOutputBuf(SStreamAggSupporter* pAggSup, TSKEY ts, uint64_t group winCode = pAggSup->stateStore.streamStateSessionGetKVByCur(pBuffInfo->pCur, &pCurWin->winInfo.sessionWin, (void**)&pCurWin->winInfo.pStatePos, &size); if (winCode == TSDB_CODE_FAILED) { - code = pAggSup->stateStore.streamStateCountWinAdd(pAggSup->pState, &pCurWin->winInfo.sessionWin, + code = pAggSup->stateStore.streamStateCountWinAdd(pAggSup->pState, &pCurWin->winInfo.sessionWin, pAggSup->windowCount, (void**)&pCurWin->winInfo.pStatePos, &size); QUERY_CHECK_CODE(code, lino, _end); + } else { + reuseOutputBuf(pAggSup->pState, pCurWin->winInfo.pStatePos, &pAggSup->stateStore); } } else { pBuffInfo->pCur = pAggSup->stateStore.streamStateCountSeekKeyPrev(pAggSup->pState, &pCurWin->winInfo.sessionWin, @@ -111,9 +113,11 @@ int32_t setCountOutputBuf(SStreamAggSupporter* pAggSup, TSKEY ts, uint64_t group winCode = pAggSup->stateStore.streamStateSessionGetKVByCur(pBuffInfo->pCur, &pCurWin->winInfo.sessionWin, (void**)&pCurWin->winInfo.pStatePos, &size); if (winCode == TSDB_CODE_FAILED) { - code = pAggSup->stateStore.streamStateCountWinAdd(pAggSup->pState, &pCurWin->winInfo.sessionWin, + code = pAggSup->stateStore.streamStateCountWinAdd(pAggSup->pState, &pCurWin->winInfo.sessionWin, pAggSup->windowCount, (void**)&pCurWin->winInfo.pStatePos, &size); QUERY_CHECK_CODE(code, lino, _end); + } else { + reuseOutputBuf(pAggSup->pState, pCurWin->winInfo.pStatePos, &pAggSup->stateStore); } } if (ts < pCurWin->winInfo.sessionWin.win.ekey) { diff --git a/source/libs/executor/src/streamfilloperator.c b/source/libs/executor/src/streamfilloperator.c index 291cc3b67b..826220581a 100644 --- a/source/libs/executor/src/streamfilloperator.c +++ b/source/libs/executor/src/streamfilloperator.c @@ -1165,12 +1165,12 @@ _end: return code; } -static int32_t initResultBuf(SStreamFillSupporter* pFillSup) { - pFillSup->rowSize = sizeof(SResultCellData) * pFillSup->numOfAllCols; - for (int i = 0; i < pFillSup->numOfAllCols; i++) { - SFillColInfo* pCol = &pFillSup->pAllColInfo[i]; - SResSchema* pSchema = &pCol->pExpr->base.resSchema; - pFillSup->rowSize += pSchema->bytes; +static int32_t initResultBuf(SSDataBlock* pInputRes, SStreamFillSupporter* pFillSup) { + int32_t numOfCols = taosArrayGetSize(pInputRes->pDataBlock); + pFillSup->rowSize = sizeof(SResultCellData) * numOfCols; + for (int i = 0; i < numOfCols; i++) { + SColumnInfoData* pCol = taosArrayGet(pInputRes->pDataBlock, i); + pFillSup->rowSize += pCol->info.bytes; } pFillSup->next.key = INT64_MIN; pFillSup->nextNext.key = INT64_MIN; @@ -1185,7 +1185,7 @@ static int32_t initResultBuf(SStreamFillSupporter* pFillSup) { } static SStreamFillSupporter* initStreamFillSup(SStreamFillPhysiNode* pPhyFillNode, SInterval* pInterval, - SExprInfo* pFillExprInfo, int32_t numOfFillCols, SStorageAPI* pAPI) { + SExprInfo* pFillExprInfo, int32_t numOfFillCols, SStorageAPI* pAPI, SSDataBlock* pInputRes) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; SStreamFillSupporter* pFillSup = taosMemoryCalloc(1, sizeof(SStreamFillSupporter)); @@ -1214,7 +1214,7 @@ static SStreamFillSupporter* initStreamFillSup(SStreamFillPhysiNode* pPhyFillNod pFillSup->interval = *pInterval; pFillSup->pAPI = pAPI; - code = initResultBuf(pFillSup); + code = initResultBuf(pInputRes, pFillSup); QUERY_CHECK_CODE(code, lino, _end); SExprInfo* noFillExpr = NULL; @@ -1371,7 +1371,11 @@ int32_t createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysi code = initExprSupp(&pOperator->exprSupp, pFillExprInfo, numOfFillCols, &pTaskInfo->storageAPI.functionStore); QUERY_CHECK_CODE(code, lino, _error); - pInfo->pFillSup = initStreamFillSup(pPhyFillNode, pInterval, pFillExprInfo, numOfFillCols, &pTaskInfo->storageAPI); + pInfo->pSrcBlock = createDataBlockFromDescNode(pPhyFillNode->node.pOutputDataBlockDesc); + QUERY_CHECK_NULL(pInfo->pSrcBlock, code, lino, _error, terrno); + + pInfo->pFillSup = initStreamFillSup(pPhyFillNode, pInterval, pFillExprInfo, numOfFillCols, &pTaskInfo->storageAPI, + pInfo->pSrcBlock); if (!pInfo->pFillSup) { code = TSDB_CODE_FAILED; QUERY_CHECK_CODE(code, lino, _error); @@ -1380,8 +1384,7 @@ int32_t createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysi initResultSizeInfo(&pOperator->resultInfo, 4096); pInfo->pRes = createDataBlockFromDescNode(pPhyFillNode->node.pOutputDataBlockDesc); QUERY_CHECK_NULL(pInfo->pRes, code, lino, _error, terrno); - pInfo->pSrcBlock = createDataBlockFromDescNode(pPhyFillNode->node.pOutputDataBlockDesc); - QUERY_CHECK_NULL(pInfo->pSrcBlock, code, lino, _error, terrno); + code = blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); QUERY_CHECK_CODE(code, lino, _error); diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index cacc4f4cee..8164281871 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1229,11 +1229,13 @@ static void destroyStateWindowOperatorInfo(void* param) { SStateWindowOperatorInfo* pInfo = (SStateWindowOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); taosMemoryFreeClear(pInfo->stateKey.pData); - if (pInfo->pOperator) { + + if (pInfo->pOperator != NULL) { cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, &pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable); pInfo->pOperator = NULL; } + cleanupExprSupp(&pInfo->scalarSup); colDataDestroy(&pInfo->twAggSup.timeWindowData); cleanupAggSup(&pInfo->aggSup); @@ -1251,13 +1253,17 @@ void destroyIntervalOperatorInfo(void* param) { if (param == NULL) { return; } + SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)param; + cleanupBasicInfo(&pInfo->binfo); - if (pInfo->pOperator) { + + if (pInfo->pOperator != NULL) { cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, &pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable); pInfo->pOperator = NULL; } + cleanupAggSup(&pInfo->aggSup); cleanupExprSupp(&pInfo->scalarSupp); @@ -1265,6 +1271,7 @@ void destroyIntervalOperatorInfo(void* param) { taosArrayDestroy(pInfo->pInterpCols); pInfo->pInterpCols = NULL; + taosArrayDestroyEx(pInfo->pPrevValues, freeItem); pInfo->pPrevValues = NULL; @@ -1358,6 +1365,7 @@ int32_t createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPhysiNode SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { code = terrno; + lino = __LINE__; goto _error; } @@ -1465,8 +1473,10 @@ _error: if (pInfo != NULL) { destroyIntervalOperatorInfo(pInfo); } + destroyOperatorAndDownstreams(pOperator, &downstream, 1); pTaskInfo->code = code; + qError("error happens at %s %d, code:%s", __func__, lino, tstrerror(code)); return code; } @@ -1754,11 +1764,13 @@ void destroySWindowOperatorInfo(void* param) { cleanupBasicInfo(&pInfo->binfo); colDataDestroy(&pInfo->twAggSup.timeWindowData); - if (pInfo->pOperator) { + + if (pInfo->pOperator != NULL) { cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, &pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable); pInfo->pOperator = NULL; } + cleanupAggSup(&pInfo->aggSup); cleanupExprSupp(&pInfo->scalarSupp); diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index 6fef9a5e10..17c390e239 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -771,7 +771,7 @@ static int32_t getSortedBlockDataInner(SSortHandle* pHandle, SMsortComparParam* code = adjustMergeTreeForNextTuple(pSource, pHandle->pMergeTree, pHandle, &pHandle->numOfCompletedSources); if (code != TSDB_CODE_SUCCESS) { - return terrno = code; + return code; } if (pHandle->pDataBlock->info.rows >= capacity) { @@ -2391,25 +2391,31 @@ static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) { return code; } -static void freeSortSource(SSortSource* pSource) { - if (NULL == pSource) { +static void freeSortSource(void* p) { + SSortSource** pSource = (SSortSource**)p; + if (NULL == pSource || NULL == *pSource) { return; } - if (!pSource->onlyRef && pSource->param) { - taosMemoryFree(pSource->param); + if ((*pSource)->pageIdList) { + taosArrayDestroy((*pSource)->pageIdList); } - if (!pSource->onlyRef && pSource->src.pBlock) { - blockDataDestroy(pSource->src.pBlock); - pSource->src.pBlock = NULL; + if (!(*pSource)->onlyRef) { + if ((*pSource)->param) { + taosMemoryFree((*pSource)->param); + } + if ((*pSource)->src.pBlock) { + blockDataDestroy((*pSource)->src.pBlock); + } } - taosMemoryFree(pSource); + taosMemoryFreeClear(*pSource); } static int32_t createBlocksQuickSortInitialSources(SSortHandle* pHandle) { int32_t code = 0; + int32_t lino = 0; size_t sortBufSize = pHandle->numOfPages * pHandle->pageSize; SSortSource** p = taosArrayGet(pHandle->pOrderedSource, 0); if (p == NULL) { @@ -2417,17 +2423,12 @@ static int32_t createBlocksQuickSortInitialSources(SSortHandle* pHandle) { } SSortSource* pSource = *p; - - taosArrayRemove(pHandle->pOrderedSource, 0); - tsortClearOrderedSource(pHandle->pOrderedSource, NULL, NULL); + size_t origSourceCount = taosArrayGetSize(pHandle->pOrderedSource); while (1) { SSDataBlock* pBlock = NULL; code = pHandle->fetchfp(pSource->param, &pBlock); - if (code != 0) { - freeSortSource(pSource); - return code; - } + QUERY_CHECK_CODE(code, lino, _end); if (pBlock == NULL) { break; @@ -2441,10 +2442,7 @@ static int32_t createBlocksQuickSortInitialSources(SSortHandle* pHandle) { pHandle->numOfPages = 1024; sortBufSize = pHandle->numOfPages * pHandle->pageSize; code = createOneDataBlock(pBlock, false, &pHandle->pDataBlock); - if (code) { - freeSortSource(pSource); - return code; - } + QUERY_CHECK_CODE(code, lino, _end); } if (pHandle->beforeFp != NULL) { @@ -2452,43 +2450,30 @@ static int32_t createBlocksQuickSortInitialSources(SSortHandle* pHandle) { } code = blockDataMerge(pHandle->pDataBlock, pBlock); - if (code != TSDB_CODE_SUCCESS) { - freeSortSource(pSource); - return code; - } + QUERY_CHECK_CODE(code, lino, _end); size_t size = blockDataGetSize(pHandle->pDataBlock); if (size > sortBufSize) { // Perform the in-memory sort and then flush data in the buffer into disk. int64_t st = taosGetTimestampUs(); code = blockDataSort(pHandle->pDataBlock, pHandle->pSortInfo); - if (code != 0) { - freeSortSource(pSource); - return code; - } + QUERY_CHECK_CODE(code, lino, _end); pHandle->sortElapsed += (taosGetTimestampUs() - st); if (pHandle->pqMaxRows > 0) blockDataKeepFirstNRows(pHandle->pDataBlock, pHandle->pqMaxRows); code = doAddToBuf(pHandle->pDataBlock, pHandle); - if (code != TSDB_CODE_SUCCESS) { - freeSortSource(pSource); - return code; - } + QUERY_CHECK_CODE(code, lino, _end); } } - freeSortSource(pSource); - if (pHandle->pDataBlock != NULL && pHandle->pDataBlock->info.rows > 0) { size_t size = blockDataGetSize(pHandle->pDataBlock); // Perform the in-memory sort and then flush data in the buffer into disk. int64_t st = taosGetTimestampUs(); code = blockDataSort(pHandle->pDataBlock, pHandle->pSortInfo); - if (code != 0) { - return code; - } + QUERY_CHECK_CODE(code, lino, _end); if (pHandle->pqMaxRows > 0) blockDataKeepFirstNRows(pHandle->pDataBlock, pHandle->pqMaxRows); pHandle->sortElapsed += (taosGetTimestampUs() - st); @@ -2501,12 +2486,16 @@ static int32_t createBlocksQuickSortInitialSources(SSortHandle* pHandle) { pHandle->loops = 1; pHandle->tupleHandle.rowIndex = -1; pHandle->tupleHandle.pBlock = pHandle->pDataBlock; - return 0; } else { code = doAddToBuf(pHandle->pDataBlock, pHandle); } } +_end: + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } + taosArrayRemoveBatch(pHandle->pOrderedSource, 0, origSourceCount, freeSortSource); return code; } @@ -2880,6 +2869,7 @@ static int32_t tsortSingleTableMergeNextTuple(SSortHandle* pHandle, STupleHandle pHandle->tupleHandle.pBlock = NULL; return code; } + pHandle->tupleHandle.pBlock = pBlock; pHandle->tupleHandle.rowIndex = 0; } @@ -2895,8 +2885,7 @@ int32_t tsortOpen(SSortHandle* pHandle) { } if (pHandle == NULL || pHandle->fetchfp == NULL || pHandle->comparFn == NULL) { - code = TSDB_CODE_INVALID_PARA; - return code; + return TSDB_CODE_INVALID_PARA; } pHandle->opened = true; diff --git a/source/libs/index/src/indexFstFile.c b/source/libs/index/src/indexFstFile.c index 793aaa810e..2351273975 100644 --- a/source/libs/index/src/indexFstFile.c +++ b/source/libs/index/src/indexFstFile.c @@ -148,7 +148,7 @@ static int64_t idxFileCtxDoReadFrom(IFileCtx* ctx, uint8_t* buf, int len, int32_ memcpy(buf + total, blk->buf + blkOffset, nread); LRUStatus s = taosLRUCacheInsert(ctx->lru, key, strlen(key), blk, cacheMemSize, deleteDataBlockFromLRU, NULL, - TAOS_LRU_PRIORITY_LOW, NULL); + NULL, TAOS_LRU_PRIORITY_LOW, NULL); if (s != TAOS_LRU_STATUS_OK) { return -1; } diff --git a/source/libs/planner/src/planner.c b/source/libs/planner/src/planner.c index 43159bce20..c3aa95f5b7 100644 --- a/source/libs/planner/src/planner.c +++ b/source/libs/planner/src/planner.c @@ -37,7 +37,7 @@ static int32_t dumpQueryPlan(SQueryPlan* pPlan) { char* pStr = NULL; code = nodesNodeToString((SNode*)pPlan, false, &pStr, NULL); if (TSDB_CODE_SUCCESS == code) { - planDebugL("qid:0x%" PRIx64 " Query Plan, JsonPlan: %s", pPlan->queryId, pStr); + planDebugL("QID:0x%" PRIx64 " Query Plan, JsonPlan: %s", pPlan->queryId, pStr); taosMemoryFree(pStr); } return code; @@ -123,7 +123,7 @@ int32_t qContinuePlanPostQuery(void* pPostPlan) { } int32_t qSetSubplanExecutionNode(SSubplan* subplan, int32_t groupId, SDownstreamSourceNode* pSource) { - planDebug("qid:0x%" PRIx64 " set subplan execution node, groupId:%d", subplan->id.queryId, groupId); + planDebug("QID:0x%" PRIx64 " set subplan execution node, groupId:%d", subplan->id.queryId, groupId); return setSubplanExecutionNode(subplan->pNode, groupId, pSource); } @@ -143,7 +143,7 @@ static void clearSubplanExecutionNode(SPhysiNode* pNode) { } void qClearSubplanExecutionNode(SSubplan* pSubplan) { - planDebug("qid:0x%" PRIx64 " clear subplan execution node, groupId:%d", pSubplan->id.queryId, pSubplan->id.groupId); + planDebug("QID:0x%" PRIx64 " clear subplan execution node, groupId:%d", pSubplan->id.queryId, pSubplan->id.groupId); clearSubplanExecutionNode(pSubplan->pNode); } diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index 4f08c93c1e..2a4951d237 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -628,8 +628,8 @@ int32_t sclWalkCaseWhenList(SScalarCtx *ctx, SNodeList *pList, struct SListCell cell = cell->pNext) { pWhenThen = (SWhenThenNode *)node; - SCL_ERR_RET(sclGetNodeRes(pWhenThen->pWhen, ctx, &pWhen)); - SCL_ERR_RET(sclGetNodeRes(pWhenThen->pThen, ctx, &pThen)); + SCL_ERR_JRET(sclGetNodeRes(pWhenThen->pWhen, ctx, &pWhen)); + SCL_ERR_JRET(sclGetNodeRes(pWhenThen->pThen, ctx, &pThen)); SCL_ERR_JRET(vectorCompareImpl(pCase, pWhen, pComp, rowIdx, 1, TSDB_ORDER_ASC, OP_TYPE_EQUAL)); @@ -646,6 +646,10 @@ int32_t sclWalkCaseWhenList(SScalarCtx *ctx, SNodeList *pList, struct SListCell goto _return; } + sclFreeParam(pWhen); + sclFreeParam(pThen); + taosMemoryFreeClear(pWhen); + taosMemoryFreeClear(pThen); } if (pElse) { @@ -672,8 +676,8 @@ _return: sclFreeParam(pWhen); sclFreeParam(pThen); - taosMemoryFree(pWhen); - taosMemoryFree(pThen); + taosMemoryFreeClear(pWhen); + taosMemoryFreeClear(pThen); SCL_RET(code); } diff --git a/source/libs/scheduler/src/schJob.c b/source/libs/scheduler/src/schJob.c index f475c974cc..03145da939 100644 --- a/source/libs/scheduler/src/schJob.c +++ b/source/libs/scheduler/src/schJob.c @@ -728,7 +728,7 @@ void schFreeJobImpl(void *job) { uint64_t queryId = pJob->queryId; int64_t refId = pJob->refId; - qDebug("qid:0x%" PRIx64 " begin to free sch job, refId:0x%" PRIx64 ", pointer:%p", queryId, refId, pJob); + qDebug("QID:0x%" PRIx64 " begin to free sch job, refId:0x%" PRIx64 ", pointer:%p", queryId, refId, pJob); schDropJobAllTasks(pJob); @@ -775,7 +775,7 @@ void schFreeJobImpl(void *job) { taosMemoryFreeClear(pJob->userRes.execRes); taosMemoryFreeClear(pJob->fetchRes); taosMemoryFreeClear(pJob->sql); - int32_t code = tsem_destroy(&pJob->rspSem); + int32_t code = tsem_destroy(&pJob->rspSem); if (code) { qError("tsem_destroy failed, error:%s", tstrerror(code)); } @@ -786,7 +786,7 @@ void schFreeJobImpl(void *job) { schCloseJobRef(); } - qDebug("qid:0x%" PRIx64 " sch job freed, refId:0x%" PRIx64 ", pointer:%p", queryId, refId, pJob); + qDebug("QID:0x%" PRIx64 " sch job freed, refId:0x%" PRIx64 ", pointer:%p", queryId, refId, pJob); } int32_t schJobFetchRows(SSchJob *pJob) { @@ -797,7 +797,7 @@ int32_t schJobFetchRows(SSchJob *pJob) { if (schChkCurrentOp(pJob, SCH_OP_FETCH, true)) { SCH_JOB_DLOG("sync wait for rsp now, job status:%s", SCH_GET_JOB_STATUS_STR(pJob)); - code = tsem_wait(&pJob->rspSem); + code = tsem_wait(&pJob->rspSem); if (code) { qError("tsem_wait for fetch rspSem failed, error:%s", tstrerror(code)); SCH_RET(code); @@ -821,7 +821,7 @@ int32_t schInitJob(int64_t *pJobId, SSchedulerReq *pReq) { int64_t refId = -1; SSchJob *pJob = taosMemoryCalloc(1, sizeof(SSchJob)); if (NULL == pJob) { - qError("qid:0x%" PRIx64 " calloc %d failed", pReq->pDag->queryId, (int32_t)sizeof(SSchJob)); + qError("QID:0x%" PRIx64 " calloc %d failed", pReq->pDag->queryId, (int32_t)sizeof(SSchJob)); SCH_ERR_JRET(terrno); } @@ -831,7 +831,7 @@ int32_t schInitJob(int64_t *pJobId, SSchedulerReq *pReq) { if (pReq->sql) { pJob->sql = taosStrdup(pReq->sql); if (NULL == pJob->sql) { - qError("qid:0x%" PRIx64 " strdup sql %s failed", pReq->pDag->queryId, pReq->sql); + qError("QID:0x%" PRIx64 " strdup sql %s failed", pReq->pDag->queryId, pReq->sql); SCH_ERR_JRET(terrno); } } @@ -839,7 +839,7 @@ int32_t schInitJob(int64_t *pJobId, SSchedulerReq *pReq) { if (pReq->allocatorRefId > 0) { pJob->allocatorRefId = nodesMakeAllocatorWeakRef(pReq->allocatorRefId); if (pJob->allocatorRefId <= 0) { - qError("qid:0x%" PRIx64 " nodesMakeAllocatorWeakRef failed", pReq->pDag->queryId); + qError("QID:0x%" PRIx64 " nodesMakeAllocatorWeakRef failed", pReq->pDag->queryId); SCH_ERR_JRET(terrno); } } @@ -851,11 +851,11 @@ int32_t schInitJob(int64_t *pJobId, SSchedulerReq *pReq) { pJob->pWorkerCb = pReq->pWorkerCb; if (pReq->pNodeList == NULL || taosArrayGetSize(pReq->pNodeList) <= 0) { - qDebug("qid:0x%" PRIx64 " input exec nodeList is empty", pReq->pDag->queryId); + qDebug("QID:0x%" PRIx64 " input exec nodeList is empty", pReq->pDag->queryId); } else { pJob->nodeList = taosArrayDup(pReq->pNodeList, NULL); if (NULL == pJob->nodeList) { - qError("qid:0x%" PRIx64 " taosArrayDup failed, origNum:%d", pReq->pDag->queryId, + qError("QID:0x%" PRIx64 " taosArrayDup failed, origNum:%d", pReq->pDag->queryId, (int32_t)taosArrayGetSize(pReq->pNodeList)); SCH_ERR_JRET(terrno); } @@ -918,7 +918,7 @@ _return: int32_t schExecJob(SSchJob *pJob, SSchedulerReq *pReq) { int32_t code = 0; - qDebug("qid:0x%" PRIx64 " sch job refId 0x%" PRIx64 " started", pReq->pDag->queryId, pJob->refId); + qDebug("QID:0x%" PRIx64 " sch job refId 0x%" PRIx64 " started", pReq->pDag->queryId, pJob->refId); SCH_ERR_RET(schLaunchJob(pJob)); @@ -926,7 +926,7 @@ int32_t schExecJob(SSchJob *pJob, SSchedulerReq *pReq) { SCH_JOB_DLOG("sync wait for rsp now, job status:%s", SCH_GET_JOB_STATUS_STR(pJob)); code = tsem_wait(&pJob->rspSem); if (code) { - qError("qid:0x%" PRIx64 " tsem_wait sync rspSem failed, error:%s", pReq->pDag->queryId, tstrerror(code)); + qError("QID:0x%" PRIx64 " tsem_wait sync rspSem failed, error:%s", pReq->pDag->queryId, tstrerror(code)); SCH_ERR_RET(code); } } @@ -1191,7 +1191,7 @@ int32_t schProcessOnCbBegin(SSchJob **job, SSchTask **task, uint64_t qId, int64_ (void)schAcquireJob(rId, &pJob); if (NULL == pJob) { - qWarn("qid:0x%" PRIx64 ",TID:0x%" PRIx64 "job no exist, may be dropped, refId:0x%" PRIx64, qId, tId, rId); + qWarn("QID:0x%" PRIx64 ",TID:0x%" PRIx64 "job no exist, may be dropped, refId:0x%" PRIx64, qId, tId, rId); SCH_ERR_RET(TSDB_CODE_QRY_JOB_NOT_EXIST); } diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index 0c1329d632..d1fd62a5b8 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -500,7 +500,7 @@ _return: int32_t schHandleDropCallback(void *param, SDataBuf *pMsg, int32_t code) { SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param; - qDebug("qid:0x%" PRIx64 ",TID:0x%" PRIx64 " drop task rsp received, code:0x%x", pParam->queryId, pParam->taskId, + qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 " drop task rsp received, code:0x%x", pParam->queryId, pParam->taskId, code); // called if drop task rsp received code (void)rpcReleaseHandle(pMsg->handle, TAOS_CONN_CLIENT); // ignore error @@ -517,7 +517,7 @@ int32_t schHandleDropCallback(void *param, SDataBuf *pMsg, int32_t code) { int32_t schHandleNotifyCallback(void *param, SDataBuf *pMsg, int32_t code) { SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param; - qDebug("qid:0x%" PRIx64 ",TID:0x%" PRIx64 " task notify rsp received, code:0x%x", pParam->queryId, pParam->taskId, + qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 " task notify rsp received, code:0x%x", pParam->queryId, pParam->taskId, code); if (pMsg) { taosMemoryFree(pMsg->pData); diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index 375ad5fa37..4c609fa5e2 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -996,7 +996,7 @@ int32_t schProcessOnTaskStatusRsp(SQueryNodeEpId *pEpId, SArray *pStatusList) { int32_t code = 0; - qDebug("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d task status in server: %s", pStatus->queryId, pStatus->taskId, + qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 ",EID:%d task status in server: %s", pStatus->queryId, pStatus->taskId, pStatus->execId, jobTaskStatusStr(pStatus->status)); if (schProcessOnCbBegin(&pJob, &pTask, pStatus->queryId, pStatus->refId, pStatus->taskId)) { @@ -1043,12 +1043,12 @@ int32_t schHandleExplainRes(SArray *pExplainRes) { continue; } - qDebug("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ", begin to handle LOCAL explain rsp msg", localRsp->qId, localRsp->tId); + qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 ", begin to handle LOCAL explain rsp msg", localRsp->qId, localRsp->tId); pJob = NULL; (void)schAcquireJob(localRsp->rId, &pJob); if (NULL == pJob) { - qWarn("qid:0x%" PRIx64 ",TID:0x%" PRIx64 "job no exist, may be dropped, refId:0x%" PRIx64, localRsp->qId, + qWarn("QID:0x%" PRIx64 ",TID:0x%" PRIx64 "job no exist, may be dropped, refId:0x%" PRIx64, localRsp->qId, localRsp->tId, localRsp->rId); SCH_ERR_JRET(TSDB_CODE_QRY_JOB_NOT_EXIST); } @@ -1068,7 +1068,7 @@ int32_t schHandleExplainRes(SArray *pExplainRes) { (void)schReleaseJob(pJob->refId); - qDebug("qid:0x%" PRIx64 ",TID:0x%" PRIx64 ", end to handle LOCAL explain rsp msg, code:%x", localRsp->qId, + qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 ", end to handle LOCAL explain rsp msg, code:%x", localRsp->qId, localRsp->tId, code); SCH_ERR_JRET(code); diff --git a/source/libs/stream/inc/streamInt.h b/source/libs/stream/inc/streamInt.h index a5c5c1b775..94c196d280 100644 --- a/source/libs/stream/inc/streamInt.h +++ b/source/libs/stream/inc/streamInt.h @@ -164,7 +164,6 @@ extern void* streamTimer; extern int32_t streamBackendId; extern int32_t streamBackendCfWrapperId; extern int32_t taskDbWrapperId; -extern int32_t streamMetaId; int32_t streamTimerInit(); void streamTimerCleanUp(); diff --git a/source/libs/stream/src/streamCheckStatus.c b/source/libs/stream/src/streamCheckStatus.c index 2688617823..75bcc326b3 100644 --- a/source/libs/stream/src/streamCheckStatus.c +++ b/source/libs/stream/src/streamCheckStatus.c @@ -108,7 +108,7 @@ void streamTaskSendCheckMsg(SStreamTask* pTask) { pRange->range.maxVer, pWindow->skey, pWindow->ekey, req.reqId); code = streamSendCheckMsg(pTask, &req, pTask->outputInfo.fixedDispatcher.nodeId, - &pTask->outputInfo.fixedDispatcher.epSet); + &pTask->outputInfo.fixedDispatcher.epSet); } else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { streamTaskStartMonitorCheckRsp(pTask); @@ -171,14 +171,14 @@ void streamTaskProcessCheckMsg(SStreamMeta* pMeta, SStreamTaskCheckReq* pReq, SS streamTaskCheckStatus(pTask, pReq->upstreamTaskId, pReq->upstreamNodeId, pReq->stage, &pRsp->oldStage); SStreamTaskState pState = streamTaskGetStatus(pTask); - stDebug("s-task:%s status:%s, stage:%" PRId64 " recv task check req(qid:0x%" PRIx64 + stDebug("s-task:%s status:%s, stage:%" PRId64 " recv task check req(QID:0x%" PRIx64 ") task:0x%x (vgId:%d), check_status:%d", pTask->id.idStr, pState.name, pRsp->oldStage, pRsp->reqId, pRsp->upstreamTaskId, pRsp->upstreamNodeId, pRsp->status); streamMetaReleaseTask(pMeta, pTask); } else { pRsp->status = TASK_DOWNSTREAM_NOT_READY; - stDebug("tq recv task check(taskId:0x%" PRIx64 "-0x%x not built yet) req(qid:0x%" PRIx64 + stDebug("tq recv task check(taskId:0x%" PRIx64 "-0x%x not built yet) req(QID:0x%" PRIx64 ") from task:0x%x (vgId:%d), rsp check_status %d", pReq->streamId, taskId, pRsp->reqId, pRsp->upstreamTaskId, pRsp->upstreamNodeId, pRsp->status); } @@ -259,7 +259,8 @@ int32_t streamTaskSendCheckRsp(const SStreamMeta* pMeta, int32_t vgId, SStreamTa void* buf = rpcMallocCont(sizeof(SMsgHead) + len); if (buf == NULL) { - stError("s-task:0x%x vgId:%d failed prepare msg, %s at line:%d code:%s", taskId, pMeta->vgId, __func__, __LINE__, tstrerror(code)); + stError("s-task:0x%x vgId:%d failed prepare msg, %s at line:%d code:%s", taskId, pMeta->vgId, __func__, __LINE__, + tstrerror(code)); return terrno; } @@ -332,7 +333,7 @@ void streamTaskCleanupCheckInfo(STaskCheckInfo* pInfo) { /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void processDownstreamReadyRsp(SStreamTask* pTask) { EStreamTaskEvent event = (pTask->info.fillHistory == 0) ? TASK_EVENT_INIT : TASK_EVENT_INIT_SCANHIST; - int32_t code = streamTaskOnHandleEventSuccess(pTask->status.pSM, event, NULL, NULL); + int32_t code = streamTaskOnHandleEventSuccess(pTask->status.pSM, event, NULL, NULL); if (code) { stError("s-task:%s failed to set event succ, code:%s", pTask->id.idStr, tstrerror(code)); } @@ -354,7 +355,7 @@ void processDownstreamReadyRsp(SStreamTask* pTask) { stDebug("s-task:%s level:%d initial status is %s from mnode, set it to be halt", pTask->id.idStr, pTask->info.taskLevel, streamTaskGetStatusStr(pTask->status.taskStatus)); code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_HALT); - if (code != 0) { // todo: handle error + if (code != 0) { // todo: handle error stError("s-task:%s failed to handle halt event, code:%s", pTask->id.idStr, tstrerror(code)); } } @@ -373,8 +374,9 @@ void processDownstreamReadyRsp(SStreamTask* pTask) { int32_t addIntoNodeUpdateList(SStreamTask* pTask, int32_t nodeId) { int32_t vgId = pTask->pMeta->vgId; - int32_t code = 0;; - bool existed = false; + int32_t code = 0; + ; + bool existed = false; streamMutexLock(&pTask->lock); diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index 699774ed52..e44bca123b 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -120,38 +120,39 @@ int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSo } int32_t streamTaskProcessCheckpointTriggerRsp(SStreamTask* pTask, SCheckpointTriggerRsp* pRsp) { + SActiveCheckpointInfo* pInfo = pTask->chkInfo.pActiveInfo; + bool unQualified = false; + const char* id = pTask->id.idStr; + if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { - stError("s-task:%s invalid msg recv, checkpoint-trigger rsp not handled", pTask->id.idStr); + stError("s-task:%s invalid msg recv, checkpoint-trigger rsp not handled", id); return TSDB_CODE_INVALID_MSG; } if (pRsp->rspCode != TSDB_CODE_SUCCESS) { - stDebug("s-task:%s retrieve checkpoint-trgger rsp from upstream:0x%x invalid, code:%s", pTask->id.idStr, - pRsp->upstreamTaskId, tstrerror(pRsp->rspCode)); + stDebug("s-task:%s retrieve checkpoint-trgger rsp from upstream:0x%x invalid, code:%s", id, pRsp->upstreamTaskId, + tstrerror(pRsp->rspCode)); return TSDB_CODE_SUCCESS; } streamMutexLock(&pTask->lock); SStreamTaskState status = streamTaskGetStatus(pTask); - if (status.state != TASK_STATUS__CK) { - stError("s-task:%s status:%s not in checkpoint status, discard the checkpoint-trigger msg", pTask->id.idStr, status.name); - streamMutexUnlock(&pTask->lock); - return TSDB_CODE_STREAM_TASK_IVLD_STATUS; - } - streamMutexUnlock(&pTask->lock); - SActiveCheckpointInfo* pInfo = pTask->chkInfo.pActiveInfo; - streamMutexLock(&pInfo->lock); - if (pInfo->activeId != pRsp->checkpointId || pInfo->transId != pRsp->transId) { - stError("s-task:%s status:%s not in checkpoint status, discard the checkpoint-trigger msg", pTask->id.idStr, status.name); - - streamMutexUnlock(&pInfo->lock); + if (status.state != TASK_STATUS__CK) { + stError("s-task:%s status:%s not in checkpoint status, discard the checkpoint-trigger msg", id, status.name); return TSDB_CODE_STREAM_TASK_IVLD_STATUS; } + streamMutexLock(&pInfo->lock); + unQualified = (pInfo->activeId != pRsp->checkpointId || pInfo->transId != pRsp->transId); streamMutexUnlock(&pInfo->lock); + if (unQualified) { + stError("s-task:%s status:%s not in checkpoint status, discard the checkpoint-trigger msg", id, status.name); + return TSDB_CODE_STREAM_TASK_IVLD_STATUS; + } + // NOTE: here we do not do the duplicated checkpoint-trigger msg check, since it will be done by following functions. int32_t code = appendCheckpointIntoInputQ(pTask, STREAM_INPUT__CHECKPOINT_TRIGGER, pRsp->checkpointId, pRsp->transId, pRsp->upstreamTaskId); @@ -903,7 +904,7 @@ static int32_t doChkptStatusCheck(SStreamTask* pTask) { return -1; } - if ((pTmrInfo->launchChkptId != pActiveInfo->activeId) || (pActiveInfo->activeId == 0)) { + if (pTmrInfo->launchChkptId != pActiveInfo->activeId) { int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); stWarn("s-task:%s vgId:%d checkpoint-trigger retrieve by previous checkpoint procedure, checkpointId:%" PRId64 ", quit, ref:%d", @@ -963,11 +964,44 @@ static int32_t doFindNotSendUpstream(SStreamTask* pTask, SArray* pList, SArray** return 0; } +static int32_t chkptTriggerRecvMonitorHelper(SStreamTask* pTask, SArray* pNotSendList) { + const char* id = pTask->id.idStr; + SArray* pList = pTask->upstreamInfo.pList; // send msg to retrieve checkpoint trigger msg + SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; + SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr; + int32_t vgId = pTask->pMeta->vgId; + + int32_t code = doChkptStatusCheck(pTask); + if (code) { + return code; + } + + code = doFindNotSendUpstream(pTask, pList, &pNotSendList); + if (code) { + int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); + stDebug("s-task:%s failed to find not send upstream, code:%s, out of tmr, ref:%d", id, tstrerror(code), ref); + return code; + } + + // do send retrieve checkpoint trigger msg to upstream + code = doSendRetrieveTriggerMsg(pTask, pNotSendList); + if (code) { + stError("s-task:%s vgId:%d failed to retrieve trigger msg, code:%s", pTask->id.idStr, vgId, tstrerror(code)); + code = 0; + } + + return code; +} + void checkpointTriggerMonitorFn(void* param, void* tmrId) { SStreamTask* pTask = param; int32_t vgId = pTask->pMeta->vgId; int64_t now = taosGetTimestampMs(); const char* id = pTask->id.idStr; + SArray* pNotSendList = NULL; + SArray* pList = pTask->upstreamInfo.pList; // send msg to retrieve checkpoint trigger msg + int32_t code = 0; + int32_t numOfNotSend = 0; SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr; @@ -1008,42 +1042,21 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) { } streamMutexLock(&pActiveInfo->lock); + code = chkptTriggerRecvMonitorHelper(pTask, pNotSendList); + streamMutexUnlock(&pActiveInfo->lock); - int32_t code = doChkptStatusCheck(pTask); - if (code) { - streamMutexUnlock(&pActiveInfo->lock); + if (code != TSDB_CODE_SUCCESS) { streamMetaReleaseTask(pTask->pMeta, pTask); - return; - } - - // send msg to retrieve checkpoint trigger msg - SArray* pList = pTask->upstreamInfo.pList; - SArray* pNotSendList = NULL; - - code = doFindNotSendUpstream(pTask, pList, &pNotSendList); - if (code) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stDebug("s-task:%s failed to find not send upstream, code:%s, out of tmr, ref:%d", id, tstrerror(code), ref); - streamMutexUnlock(&pActiveInfo->lock); - streamMetaReleaseTask(pTask->pMeta, pTask); - taosArrayDestroy(pNotSendList); return; } - // do send retrieve checkpoint trigger msg to upstream - int32_t size = taosArrayGetSize(pNotSendList); - code = doSendRetrieveTriggerMsg(pTask, pNotSendList); - if (code) { - stError("s-task:%s vgId:%d failed to retrieve trigger msg, code:%s", pTask->id.idStr, vgId, tstrerror(code)); - } - - streamMutexUnlock(&pActiveInfo->lock); - // check every 100ms - if (size > 0) { + numOfNotSend = taosArrayGetSize(pNotSendList); + if (numOfNotSend > 0) { stDebug("s-task:%s start to monitor checkpoint-trigger in 10s", id); - streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "trigger-recv-monitor"); + streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, + "trigger-recv-monitor"); } else { int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); stDebug("s-task:%s all checkpoint-trigger recved, quit from monitor checkpoint-trigger tmr, ref:%d", id, ref); @@ -1106,19 +1119,13 @@ int32_t doSendRetrieveTriggerMsg(SStreamTask* pTask, SArray* pNotSendList) { return code; } -bool streamTaskAlreadySendTrigger(SStreamTask* pTask, int32_t downstreamNodeId) { +static int32_t isAlreadySendTriggerNoLock(SStreamTask* pTask, int32_t downstreamNodeId) { int64_t now = taosGetTimestampMs(); const char* id = pTask->id.idStr; SActiveCheckpointInfo* pInfo = pTask->chkInfo.pActiveInfo; SStreamTaskState pStatus = streamTaskGetStatus(pTask); - if (pStatus.state != TASK_STATUS__CK) { - return false; - } - - streamMutexLock(&pInfo->lock); if (!pInfo->dispatchTrigger) { - streamMutexUnlock(&pInfo->lock); return false; } @@ -1146,14 +1153,29 @@ bool streamTaskAlreadySendTrigger(SStreamTask* pTask, int32_t downstreamNodeId) id, pSendInfo->sendTs, before, pInfo->activeId, pInfo->transId); } - streamMutexUnlock(&pInfo->lock); return true; } - streamMutexUnlock(&pInfo->lock); return false; } +bool streamTaskAlreadySendTrigger(SStreamTask* pTask, int32_t downstreamNodeId) { + int64_t now = taosGetTimestampMs(); + const char* id = pTask->id.idStr; + SActiveCheckpointInfo* pInfo = pTask->chkInfo.pActiveInfo; + SStreamTaskState pStatus = streamTaskGetStatus(pTask); + + if (pStatus.state != TASK_STATUS__CK) { + return false; + } + + streamMutexLock(&pInfo->lock); + bool send = isAlreadySendTriggerNoLock(pTask, downstreamNodeId); + streamMutexUnlock(&pInfo->lock); + + return send; +} + void streamTaskGetTriggerRecvStatus(SStreamTask* pTask, int32_t* pRecved, int32_t* pTotal) { *pRecved = taosArrayGetSize(pTask->chkInfo.pActiveInfo->pReadyMsgList); @@ -1169,8 +1191,10 @@ void streamTaskGetTriggerRecvStatus(SStreamTask* pTask, int32_t* pRecved, int32_ int32_t streamTaskInitTriggerDispatchInfo(SStreamTask* pTask) { SActiveCheckpointInfo* pInfo = pTask->chkInfo.pActiveInfo; int64_t now = taosGetTimestampMs(); + int32_t code = 0; streamMutexLock(&pInfo->lock); + pInfo->dispatchTrigger = true; if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) { STaskDispatcherFixed* pDispatch = &pTask->outputInfo.fixedDispatcher; @@ -1178,8 +1202,7 @@ int32_t streamTaskInitTriggerDispatchInfo(SStreamTask* pTask) { STaskTriggerSendInfo p = {.sendTs = now, .recved = false, .nodeId = pDispatch->nodeId, .taskId = pDispatch->taskId}; void* px = taosArrayPush(pInfo->pDispatchTriggerList, &p); if (px == NULL) { // pause the stream task, if memory not enough - streamMutexUnlock(&pInfo->lock); - return terrno; + code = terrno; } } else { for (int32_t i = 0; i < streamTaskGetNumOfDownstream(pTask); ++i) { @@ -1191,14 +1214,15 @@ int32_t streamTaskInitTriggerDispatchInfo(SStreamTask* pTask) { STaskTriggerSendInfo p = {.sendTs = now, .recved = false, .nodeId = pVgInfo->vgId, .taskId = pVgInfo->taskId}; void* px = taosArrayPush(pInfo->pDispatchTriggerList, &p); if (px == NULL) { // pause the stream task, if memory not enough - streamMutexUnlock(&pInfo->lock); - return terrno; + code = terrno; + break; } } } streamMutexUnlock(&pInfo->lock); - return 0; + + return code; } int32_t streamTaskGetNumOfConfirmed(SActiveCheckpointInfo* pInfo) { diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index a3146ae9d4..78cbd844a0 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -526,6 +526,7 @@ static void doMonitorDispatchData(void* param, void* tmrId) { int32_t msgId = pMsgInfo->msgId; int32_t code = 0; int64_t now = taosGetTimestampMs(); + bool inDispatch = true; stDebug("s-task:%s start monitor dispatch data", id); @@ -550,12 +551,15 @@ static void doMonitorDispatchData(void* param, void* tmrId) { int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); stDebug("s-task:%s not in dispatch procedure, abort from timer, ref:%d", pTask->id.idStr, ref); - pTask->msgInfo.inMonitor = 0; - streamMutexUnlock(&pMsgInfo->lock); - return; + pMsgInfo->inMonitor = 0; + inDispatch = false; } streamMutexUnlock(&pMsgInfo->lock); + if (!inDispatch) { + return; + } + int32_t numOfFailed = getFailedDispatchInfo(pMsgInfo, now); if (numOfFailed == 0) { stDebug("s-task:%s no error occurs, check again in %dms", id, DISPATCH_RETRY_INTERVAL_MS); @@ -638,15 +642,54 @@ void streamStartMonitorDispatchData(SStreamTask* pTask, int64_t waitDuration) { "dispatch-monitor"); } +static int32_t doAddDispatchBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, + SArray* vgInfo, uint32_t hashValue, int64_t now, bool* pFound) { + size_t numOfVgroups = taosArrayGetSize(vgInfo); + int32_t code = 0; + + *pFound = false; + + for (int32_t j = 0; j < numOfVgroups; j++) { + SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, j); + if (pVgInfo == NULL) { + continue; + } + + if (hashValue >= pVgInfo->hashBegin && hashValue <= pVgInfo->hashEnd) { + if ((code = streamAddBlockIntoDispatchMsg(pDataBlock, &pReqs[j])) < 0) { + stError("s-task:%s failed to add dispatch block, code:%s", pTask->id.idStr, tstrerror(terrno)); + return code; + } + + if (pReqs[j].blockNum == 0) { + SVgroupInfo* pDstVgroupInfo = taosArrayGet(vgInfo, j); + if (pDstVgroupInfo != NULL) { + addDispatchEntry(&pTask->msgInfo, pDstVgroupInfo->vgId, now, false); + } + } + + pReqs[j].blockNum++; + *pFound = true; + break; + } + } + + return code; +} + int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, int64_t groupId, int64_t now) { bool found = false; uint32_t hashValue = 0; - int32_t numOfVgroups = 0; + int32_t code = 0; + SArray* vgInfo = pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos; - SArray* vgInfo = pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos; if (pTask->pNameMap == NULL) { pTask->pNameMap = tSimpleHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT)); + if (pTask->pNameMap == NULL) { + stError("s-task:%s failed to init the name map, code:%s", pTask->id.idStr, tstrerror(terrno)); + return terrno; + } } void* pVal = tSimpleHashGet(pTask->pNameMap, &groupId, sizeof(int64_t)); @@ -669,11 +712,11 @@ int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, S } } } else { - int32_t code = buildCtbNameByGroupIdImpl(pTask->outputInfo.shuffleDispatcher.stbFullName, groupId, - pDataBlock->info.parTbName); + code = buildCtbNameByGroupIdImpl(pTask->outputInfo.shuffleDispatcher.stbFullName, groupId, + pDataBlock->info.parTbName); if (code) { - stError("s-task:%s failed to build child table name for group:%" PRId64 ", code:%s", pTask->id.idStr, - groupId, tstrerror(code)); + stError("s-task:%s failed to build child table name for group:%" PRId64 ", code:%s", pTask->id.idStr, groupId, + tstrerror(code)); } } @@ -688,44 +731,21 @@ int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, S memcpy(bln.parTbName, pDataBlock->info.parTbName, strlen(pDataBlock->info.parTbName)); // failed to put into name buffer, no need to do anything - if (tSimpleHashGetSize(pTask->pNameMap) < MAX_BLOCK_NAME_NUM) { // allow error, and do nothing - int32_t code = tSimpleHashPut(pTask->pNameMap, &groupId, sizeof(int64_t), &bln, sizeof(SBlockName)); + if (tSimpleHashGetSize(pTask->pNameMap) < MAX_BLOCK_NAME_NUM) { // allow error, and do nothing + code = tSimpleHashPut(pTask->pNameMap, &groupId, sizeof(int64_t), &bln, sizeof(SBlockName)); } } - numOfVgroups = taosArrayGetSize(vgInfo); - - // TODO: optimize search streamMutexLock(&pTask->msgInfo.lock); + code = doAddDispatchBlock(pTask, pReqs, pDataBlock, vgInfo, hashValue, now, &found); + streamMutexUnlock(&pTask->msgInfo.lock); - for (int32_t j = 0; j < numOfVgroups; j++) { - SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, j); - if (pVgInfo == NULL) { - continue; - } - - if (hashValue >= pVgInfo->hashBegin && hashValue <= pVgInfo->hashEnd) { - if (streamAddBlockIntoDispatchMsg(pDataBlock, &pReqs[j]) < 0) { - streamMutexUnlock(&pTask->msgInfo.lock); - return -1; - } - - if (pReqs[j].blockNum == 0) { - SVgroupInfo* pDstVgroupInfo = taosArrayGet(vgInfo, j); - if (pDstVgroupInfo != NULL) { - addDispatchEntry(&pTask->msgInfo, pDstVgroupInfo->vgId, now, false); - } - } - - pReqs[j].blockNum++; - found = true; - break; - } + if (code) { + return code; } - streamMutexUnlock(&pTask->msgInfo.lock); if (!found) { - stError("s-task:%s not found req hash value:%u", pTask->id.idStr, hashValue); + stError("s-task:%s not found req hash value:%u, failed to add dispatch block", pTask->id.idStr, hashValue); return TSDB_CODE_STREAM_INTERNAL_ERROR; } else { return 0; @@ -919,7 +939,7 @@ static int32_t doTaskChkptStatusCheck(SStreamTask* pTask, int32_t num) { } static int32_t doFindNotConfirmUpstream(SArray** ppNotRspList, SArray* pList, int32_t num, int32_t vgId, int32_t level, - const char* id) { + const char* id) { SArray* pTmp = taosArrayInit(4, sizeof(int32_t)); if (pTmp == NULL) { return terrno; @@ -940,8 +960,8 @@ static int32_t doFindNotConfirmUpstream(SArray** ppNotRspList, SArray* pList, in stError("s-task:%s vgId:%d failed to record not rsp task, code: out of memory", id, vgId); return terrno; } else { - stDebug("s-task:%s vgId:%d level:%d checkpoint-ready rsp from upstream:0x%x not confirmed yet", id, vgId, - level, pInfo->upstreamTaskId); + stDebug("s-task:%s vgId:%d level:%d checkpoint-ready rsp from upstream:0x%x not confirmed yet", id, vgId, level, + pInfo->upstreamTaskId); } } @@ -987,13 +1007,48 @@ static void doSendChkptReadyMsg(SStreamTask* pTask, SArray* pNotRspList, int64_t } } -static void checkpointReadyMsgSendMonitorFn(void* param, void* tmrId) { +static int32_t chkptReadyMsgSendHelper(SStreamTask* pTask, SArray* pNotRspList) { + SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; + SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptReadyMsgTmr; + SArray* pList = pActiveInfo->pReadyMsgList; + int32_t num = taosArrayGetSize(pList); + int32_t vgId = pTask->pMeta->vgId; + int32_t checkpointId = pActiveInfo->activeId; + const char* id = pTask->id.idStr; + int32_t notRsp = 0; + + int32_t code = doTaskChkptStatusCheck(pTask, num); + if (code) { + return code; + } + + code = doFindNotConfirmUpstream(&pNotRspList, pList, num, vgId, pTask->info.taskLevel, id); + if (code) { + int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); + stError("s-task:%s failed to find not rsp checkpoint-ready downstream, code:%s, out of tmr, ref:%d", id, + tstrerror(code), ref); + return code; + } + + notRsp = taosArrayGetSize(pNotRspList); + if (notRsp == 0) { + streamClearChkptReadyMsg(pActiveInfo); + } else { + doSendChkptReadyMsg(pTask, pNotRspList, checkpointId, pList); + } + + return code; +} + +static void chkptReadyMsgSendMonitorFn(void* param, void* tmrId) { SStreamTask* pTask = param; int32_t vgId = pTask->pMeta->vgId; const char* id = pTask->id.idStr; SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptReadyMsgTmr; SArray* pNotRspList = NULL; + int32_t code = 0; + int32_t notRsp = 0; // check the status every 100ms if (streamTaskShouldStop(pTask)) { @@ -1004,7 +1059,7 @@ static void checkpointReadyMsgSendMonitorFn(void* param, void* tmrId) { } if (++pTmrInfo->activeCounter < 50) { - streamTmrStart(checkpointReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, + streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "chkpt-ready-monitor"); return; } @@ -1027,45 +1082,26 @@ static void checkpointReadyMsgSendMonitorFn(void* param, void* tmrId) { } streamMutexLock(&pActiveInfo->lock); + code = chkptReadyMsgSendHelper(pTask, pNotRspList); + streamMutexUnlock(&pActiveInfo->lock); - SArray* pList = pActiveInfo->pReadyMsgList; - int32_t num = taosArrayGetSize(pList); - int32_t code = doTaskChkptStatusCheck(pTask, num); - if (code) { - streamMutexUnlock(&pActiveInfo->lock); + if (code != TSDB_CODE_SUCCESS) { streamMetaReleaseTask(pTask->pMeta, pTask); - return; - } - - code = doFindNotConfirmUpstream(&pNotRspList, pList, num, vgId, pTask->info.taskLevel, id); - if (code) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stError("s-task:%s failed to find not rsp checkpoint-ready downstream, code:%s, out of tmr, ref:%d", id, - tstrerror(code), ref); - streamMutexUnlock(&pActiveInfo->lock); - streamMetaReleaseTask(pTask->pMeta, pTask); - taosArrayDestroy(pNotRspList); return; } - int32_t checkpointId = pActiveInfo->activeId; - int32_t notRsp = taosArrayGetSize(pNotRspList); - doSendChkptReadyMsg(pTask, pNotRspList, checkpointId, pList); - + notRsp = taosArrayGetSize(pNotRspList); if (notRsp > 0) { // send checkpoint-ready msg again - streamTmrStart(checkpointReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, + stDebug("s-task:%s start to monitor checkpoint-ready msg recv status in 10s", id); + streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "chkpt-ready-monitor"); - streamMutexUnlock(&pActiveInfo->lock); } else { int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); stDebug( "s-task:%s vgId:%d checkpoint-ready msg confirmed by all upstream task(s), clear checkpoint-ready msg and quit " "from timer, ref:%d", id, vgId, ref); - - streamClearChkptReadyMsg(pActiveInfo); - streamMutexUnlock(&pActiveInfo->lock); // release should be the last execution, since pTask may be destroy after it immidiately. streamMetaReleaseTask(pTask->pMeta, pTask); } @@ -1124,7 +1160,7 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask) { stDebug("s-task:%s start checkpoint-ready monitor in 10s, ref:%d ", pTask->id.idStr, ref); streamMetaAcquireOneTask(pTask); - streamTmrStart(checkpointReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, + streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, "chkpt-ready-monitor"); // mark the timer monitor checkpointId @@ -1190,6 +1226,7 @@ int32_t streamAddBlockIntoDispatchMsg(const SSDataBlock* pBlock, SStreamDispatch taosMemoryFree(buf); return terrno; } + SET_PAYLOAD_LEN(pRetrieve->data, actualLen, actualLen); int32_t payloadLen = actualLen + PAYLOAD_PREFIX_LEN; @@ -1359,29 +1396,11 @@ void initCheckpointReadyInfo(STaskCheckpointReadyInfo* pReadyInfo, int32_t upstr pReadyInfo->childId = childId; } -int32_t streamAddCheckpointReadyMsg(SStreamTask* pTask, int32_t upstreamTaskId, int32_t index, int64_t checkpointId) { - if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { - return TSDB_CODE_SUCCESS; - } - - SStreamUpstreamEpInfo* pInfo = NULL; - streamTaskGetUpstreamTaskEpInfo(pTask, upstreamTaskId, &pInfo); - if (pInfo == NULL) { - return TSDB_CODE_STREAM_TASK_NOT_EXIST; - } - - STaskCheckpointReadyInfo info = {0}; - initCheckpointReadyInfo(&info, pInfo->nodeId, pInfo->taskId, pInfo->childId, &pInfo->epSet, checkpointId); - - stDebug("s-task:%s (level:%d) prepare checkpoint-ready msg to upstream s-task:0x%" PRIx64 "-0x%x (vgId:%d) idx:%d", - pTask->id.idStr, pTask->info.taskLevel, pTask->id.streamId, pInfo->taskId, pInfo->nodeId, index); - +static int32_t doAddChkptReadyMsg(SStreamTask* pTask, STaskCheckpointReadyInfo* pInfo) { SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; - streamMutexLock(&pActiveInfo->lock); - void* px = taosArrayPush(pActiveInfo->pReadyMsgList, &info); + void* px = taosArrayPush(pActiveInfo->pReadyMsgList, pInfo); if (px == NULL) { - streamMutexUnlock(&pActiveInfo->lock); stError("s-task:%s failed to add readyMsg info, code: out of memory", pTask->id.idStr); return terrno; } @@ -1395,10 +1414,36 @@ int32_t streamAddCheckpointReadyMsg(SStreamTask* pTask, int32_t upstreamTaskId, stDebug("s-task:%s %d/%d checkpoint-trigger recv", pTask->id.idStr, numOfRecv, total); } - streamMutexUnlock(&pActiveInfo->lock); return 0; } +int32_t streamAddCheckpointReadyMsg(SStreamTask* pTask, int32_t upstreamTaskId, int32_t index, int64_t checkpointId) { + int32_t code = 0; + STaskCheckpointReadyInfo info = {0}; + + if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { + return TSDB_CODE_SUCCESS; + } + + SStreamUpstreamEpInfo* pInfo = NULL; + streamTaskGetUpstreamTaskEpInfo(pTask, upstreamTaskId, &pInfo); + if (pInfo == NULL) { + return TSDB_CODE_STREAM_TASK_NOT_EXIST; + } + + initCheckpointReadyInfo(&info, pInfo->nodeId, pInfo->taskId, pInfo->childId, &pInfo->epSet, checkpointId); + + stDebug("s-task:%s (level:%d) prepare checkpoint-ready msg to upstream s-task:0x%" PRIx64 "-0x%x (vgId:%d) idx:%d", + pTask->id.idStr, pTask->info.taskLevel, pTask->id.streamId, pInfo->taskId, pInfo->nodeId, index); + + SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; + + streamMutexLock(&pActiveInfo->lock); + code = doAddChkptReadyMsg(pTask, &info); + streamMutexUnlock(&pActiveInfo->lock); + return code; +} + void streamClearChkptReadyMsg(SActiveCheckpointInfo* pActiveInfo) { if (pActiveInfo == NULL) { return; diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 0eb87df9b0..2e06813071 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -794,7 +794,7 @@ static int32_t doStreamExecTask(SStreamTask* pTask) { // dispatch checkpoint msg to all downstream tasks int32_t type = pInput->type; if (type == STREAM_INPUT__CHECKPOINT_TRIGGER) { - int32_t code = streamProcessCheckpointTriggerBlock(pTask, (SStreamDataBlock*)pInput); + code = streamProcessCheckpointTriggerBlock(pTask, (SStreamDataBlock*)pInput); if (code != 0) { stError("s-task:%s failed to process checkpoint-trigger block, code:%s", pTask->id.idStr, tstrerror(code)); } diff --git a/source/libs/stream/src/streamHb.c b/source/libs/stream/src/streamHb.c index dde3595eb3..19391bf7a0 100644 --- a/source/libs/stream/src/streamHb.c +++ b/source/libs/stream/src/streamHb.c @@ -42,7 +42,7 @@ static bool waitForEnoughDuration(SMetaHbInfo* pInfo) { static bool existInHbMsg(SStreamHbMsg* pMsg, SDownstreamTaskEpset* pTaskEpset) { int32_t numOfExisted = taosArrayGetSize(pMsg->pUpdateNodes); - for (int k = 0; k < numOfExisted; ++k) { + for (int32_t k = 0; k < numOfExisted; ++k) { if (pTaskEpset->nodeId == *(int32_t*)taosArrayGet(pMsg->pUpdateNodes, k)) { return true; } @@ -56,7 +56,7 @@ static void addUpdateNodeIntoHbMsg(SStreamTask* pTask, SStreamHbMsg* pMsg) { streamMutexLock(&pTask->lock); int32_t num = taosArrayGetSize(pTask->outputInfo.pNodeEpsetUpdateList); - for (int j = 0; j < num; ++j) { + for (int32_t j = 0; j < num; ++j) { SDownstreamTaskEpset* pTaskEpset = taosArrayGet(pTask->outputInfo.pNodeEpsetUpdateList, j); bool exist = existInHbMsg(pMsg, pTaskEpset); diff --git a/source/libs/stream/src/streamSched.c b/source/libs/stream/src/streamSched.c index 63e24b0975..98920e6f70 100644 --- a/source/libs/stream/src/streamSched.c +++ b/source/libs/stream/src/streamSched.c @@ -83,12 +83,14 @@ void streamTaskResumeInFuture(SStreamTask* pTask) { ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void streamTaskResumeHelper(void* param, void* tmrId) { - SStreamTask* pTask = (SStreamTask*)param; - SStreamTaskId* pId = &pTask->id; - SStreamTaskState p = streamTaskGetStatus(pTask); + SStreamTask* pTask = (SStreamTask*)param; + SStreamTaskId* pId = &pTask->id; + SStreamTaskState p = streamTaskGetStatus(pTask); + int32_t code = 0; if (p.state == TASK_STATUS__DROPPING || p.state == TASK_STATUS__STOP) { int8_t status = streamTaskSetSchedStatusInactive(pTask); + TAOS_UNUSED(status); int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); stDebug("s-task:%s status:%s not resume task, ref:%d", pId->idStr, p.name, ref); @@ -97,13 +99,12 @@ void streamTaskResumeHelper(void* param, void* tmrId) { return; } - int32_t code = streamTaskSchedTask(pTask->pMsgCb, pTask->info.nodeId, pId->streamId, pId->taskId, STREAM_EXEC_T_RESUME_TASK); + code = streamTaskSchedTask(pTask->pMsgCb, pTask->info.nodeId, pId->streamId, pId->taskId, STREAM_EXEC_T_RESUME_TASK); int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); if (code) { stError("s-task:%s sched task failed, code:%s, ref:%d", pId->idStr, tstrerror(code), ref); } else { - stDebug("trigger to resume s-task:%s after being idled for %dms, ref:%d", pId->idStr, pTask->status.schedIdleTime, - ref); + stDebug("trigger to resume s-task:%s after idled for %dms, ref:%d", pId->idStr, pTask->status.schedIdleTime, ref); // release the task ref count streamTaskClearSchedIdleInfo(pTask); diff --git a/source/libs/stream/src/streamSessionState.c b/source/libs/stream/src/streamSessionState.c index 23598cf717..7e3d8d59f9 100644 --- a/source/libs/stream/src/streamSessionState.c +++ b/source/libs/stream/src/streamSessionState.c @@ -1060,7 +1060,7 @@ _end: return code; } -int32_t createCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, void** pVal, int32_t* pVLen) { +int32_t createCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, COUNT_TYPE winCount, void** pVal, int32_t* pVLen) { SSessionKey* pWinKey = pKey; const TSKEY gap = 0; int32_t code = TSDB_CODE_SUCCESS; @@ -1082,21 +1082,27 @@ int32_t createCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey int32_t size = taosArrayGetSize(pWinStates); if (size == 0) { void* pFileStore = getStateFileStore(pFileState); - void* p = NULL; + void* pRockVal = NULL; - int32_t code_file = getCountWinStateFromDisc(pFileStore, pWinKey, &p, pVLen); + int32_t code_file = getCountWinStateFromDisc(pFileStore, pWinKey, &pRockVal, pVLen); if (code_file == TSDB_CODE_SUCCESS && isFlushedState(pFileState, endTs, 0)) { - (*pVal) = createSessionWinBuff(pFileState, pWinKey, p, pVLen); - if (!(*pVal)) { - code = TSDB_CODE_OUT_OF_MEMORY; + int32_t valSize = *pVLen; + COUNT_TYPE* pWinStateCount = (COUNT_TYPE*)((char*)(pRockVal) + (valSize - sizeof(COUNT_TYPE))); + if ((*pWinStateCount) == winCount) { + code = addNewSessionWindow(pFileState, pWinStates, pWinKey, (SRowBuffPos**)pVal); QUERY_CHECK_CODE(code, lino, _end); - } - - qDebug("===stream===0 get state win:%" PRId64 ",%" PRId64 " from disc, res %d", pWinKey->win.skey, + } else { + (*pVal) = createSessionWinBuff(pFileState, pWinKey, pRockVal, pVLen); + if (!(*pVal)) { + code = TSDB_CODE_OUT_OF_MEMORY; + QUERY_CHECK_CODE(code, lino, _end); + } + qDebug("===stream===0 get state win:%" PRId64 ",%" PRId64 " from disc, res %d", pWinKey->win.skey, pWinKey->win.ekey, code_file); + } } else { code = addNewSessionWindow(pFileState, pWinStates, pWinKey, (SRowBuffPos**)pVal); - taosMemoryFree(p); + taosMemoryFree(pRockVal); QUERY_CHECK_CODE(code, lino, _end); } } else { diff --git a/source/libs/stream/src/streamStartHistory.c b/source/libs/stream/src/streamStartHistory.c index b376dbd16b..4d7bf2ba87 100644 --- a/source/libs/stream/src/streamStartHistory.c +++ b/source/libs/stream/src/streamStartHistory.c @@ -64,7 +64,6 @@ static int32_t streamTaskSetReady(SStreamTask* pTask) { int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated) { SStreamScanHistoryReq req; - int32_t code = 0; initScanHistoryReq(pTask, &req, igUntreated); int32_t len = sizeof(SStreamScanHistoryReq); @@ -173,7 +172,7 @@ int32_t streamTaskOnScanHistoryTaskReady(SStreamTask* pTask) { code = streamTaskStartScanHistory(pTask); } - // NOTE: there will be an deadlock if launch fill history here. + // NOTE: there will be a deadlock if launch fill history here. // start the related fill-history task, when current task is ready // if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { // streamLaunchFillHistoryTask(pTask); @@ -219,7 +218,7 @@ int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) { stDebug("s-task:%s start to launch related fill-history task:0x%" PRIx64 "-0x%x", idStr, hStreamId, hTaskId); - // Set the execute conditions, including the query time window and the version range + // Set the execution conditions, including the query time window and the version range streamMetaRLock(pMeta); SStreamTask** pHTask = taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id)); streamMetaRUnLock(pMeta); diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 1801c6e029..1994c882aa 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -545,6 +545,6 @@ int32_t streamStateCountWinAddIfNotExist(SStreamState* pState, SSessionKey* pKey return getCountWinResultBuff(pState->pFileState, pKey, winCount, ppVal, pVLen, pWinCode); } -int32_t streamStateCountWinAdd(SStreamState* pState, SSessionKey* pKey, void** pVal, int32_t* pVLen) { - return createCountWinResultBuff(pState->pFileState, pKey, pVal, pVLen); +int32_t streamStateCountWinAdd(SStreamState* pState, SSessionKey* pKey, COUNT_TYPE winCount, void** pVal, int32_t* pVLen) { + return createCountWinResultBuff(pState->pFileState, pKey, winCount, pVal, pVLen); } diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index f7a88746ec..cf5f1b2b91 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -445,7 +445,9 @@ _end: } int32_t clearRowBuff(SStreamFileState* pFileState) { - clearExpiredRowBuff(pFileState, pFileState->maxTs - pFileState->deleteMark, false); + if (pFileState->deleteMark != INT64_MAX) { + clearExpiredRowBuff(pFileState, pFileState->maxTs - pFileState->deleteMark, false); + } if (isListEmpty(pFileState->freeBuffs)) { return flushRowBuff(pFileState); } diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c index a4cfa69459..7d7868f3cd 100644 --- a/source/libs/transport/src/thttp.c +++ b/source/libs/transport/src/thttp.c @@ -677,7 +677,7 @@ static void httpHandleReq(SHttpMsg* msg) { tError("http-report failed to connect to http-server,dst:%s:%d, chanId:%" PRId64 ", seq:%" PRId64 ", reson:%s", cli->addr, cli->port, chanId, cli->seq, uv_strerror(ret)); httpFailFastMayUpdate(http->connStatusTable, cli->addr, cli->port, 0); - destroyHttpClient(cli); + uv_close((uv_handle_t*)&cli->tcp, httpDestroyClientCb); } TAOS_UNUSED(taosReleaseRef(httpRefMgt, chanId)); return; diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 9943fd1701..9ade5e5638 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -796,7 +796,7 @@ int32_t walMetaSerialize(SWal* pWal, char** serialized) { TAOS_RETURN(TSDB_CODE_OUT_OF_MEMORY); } - if (cJSON_AddItemToObject(pRoot, "meta", pMeta) != 0) { + if (!cJSON_AddItemToObject(pRoot, "meta", pMeta)) { wInfo("vgId:%d, failed to add meta to root", pWal->cfg.vgId); } (void)sprintf(buf, "%" PRId64, pWal->vers.firstVer); @@ -816,13 +816,13 @@ int32_t walMetaSerialize(SWal* pWal, char** serialized) { wInfo("vgId:%d, failed to add lastVer to meta", pWal->cfg.vgId); } - if (cJSON_AddItemToObject(pRoot, "files", pFiles) != 0) { + if (!cJSON_AddItemToObject(pRoot, "files", pFiles)) { wInfo("vgId:%d, failed to add files to root", pWal->cfg.vgId); } SWalFileInfo* pData = pWal->fileInfoSet->pData; for (int i = 0; i < sz; i++) { SWalFileInfo* pInfo = &pData[i]; - if (cJSON_AddItemToArray(pFiles, pField = cJSON_CreateObject()) != 0) { + if (!cJSON_AddItemToArray(pFiles, pField = cJSON_CreateObject())) { wInfo("vgId:%d, failed to add field to files", pWal->cfg.vgId); } if (pField == NULL) { diff --git a/source/util/src/tlrucache.c b/source/util/src/tlrucache.c index fbd17dd023..69832cd46c 100644 --- a/source/util/src/tlrucache.c +++ b/source/util/src/tlrucache.c @@ -38,18 +38,19 @@ enum { }; struct SLRUEntry { - void *value; - _taos_lru_deleter_t deleter; - void *ud; - SLRUEntry *nextHash; - SLRUEntry *next; - SLRUEntry *prev; - size_t totalCharge; - size_t keyLength; - uint32_t hash; - uint32_t refs; - uint8_t flags; - char keyData[1]; + void *value; + _taos_lru_deleter_t deleter; + _taos_lru_overwriter_t overwriter; + void *ud; + SLRUEntry *nextHash; + SLRUEntry *next; + SLRUEntry *prev; + size_t totalCharge; + size_t keyLength; + uint32_t hash; + uint32_t refs; + uint8_t flags; + char keyData[1]; }; #define TAOS_LRU_ENTRY_IN_CACHE(h) ((h)->flags & TAOS_LRU_IN_CACHE) @@ -403,6 +404,10 @@ static LRUStatus taosLRUCacheShardInsertEntry(SLRUCacheShard *shard, SLRUEntry * if (old != NULL) { status = TAOS_LRU_STATUS_OK_OVERWRITTEN; + if (old->overwriter) { + (*old->overwriter)(old->keyData, old->keyLength, old->value, old->ud); + } + TAOS_LRU_ENTRY_SET_IN_CACHE(old, false); if (!TAOS_LRU_ENTRY_HAS_REFS(old)) { taosLRUCacheShardLRURemove(shard, old); @@ -440,8 +445,9 @@ _exit: } static LRUStatus taosLRUCacheShardInsert(SLRUCacheShard *shard, const void *key, size_t keyLen, uint32_t hash, - void *value, size_t charge, _taos_lru_deleter_t deleter, LRUHandle **handle, - LRUPriority priority, void *ud) { + void *value, size_t charge, _taos_lru_deleter_t deleter, + _taos_lru_overwriter_t overwriter, LRUHandle **handle, LRUPriority priority, + void *ud) { SLRUEntry *e = taosMemoryCalloc(1, sizeof(SLRUEntry) - 1 + keyLen); if (!e) { if (deleter) { @@ -453,6 +459,7 @@ static LRUStatus taosLRUCacheShardInsert(SLRUCacheShard *shard, const void *key, e->value = value; e->flags = 0; e->deleter = deleter; + e->overwriter = overwriter; e->ud = ud; e->keyLength = keyLen; e->hash = hash; @@ -726,12 +733,12 @@ void taosLRUCacheCleanup(SLRUCache *cache) { } LRUStatus taosLRUCacheInsert(SLRUCache *cache, const void *key, size_t keyLen, void *value, size_t charge, - _taos_lru_deleter_t deleter, LRUHandle **handle, LRUPriority priority, void *ud) { + _taos_lru_deleter_t deleter, _taos_lru_overwriter_t overwriter, LRUHandle **handle, LRUPriority priority, void *ud) { uint32_t hash = TAOS_LRU_CACHE_SHARD_HASH32(key, keyLen); uint32_t shardIndex = hash & cache->shardedCache.shardMask; - return taosLRUCacheShardInsert(&cache->shards[shardIndex], key, keyLen, hash, value, charge, deleter, handle, - priority, ud); + return taosLRUCacheShardInsert(&cache->shards[shardIndex], key, keyLen, hash, value, charge, deleter, overwriter, + handle, priority, ud); } LRUHandle *taosLRUCacheLookup(SLRUCache *cache, const void *key, size_t keyLen) { @@ -869,4 +876,4 @@ bool taosLRUCacheIsStrictCapacity(SLRUCache *cache) { (void)taosThreadMutexUnlock(&cache->shardedCache.capacityMutex); return strict; -} \ No newline at end of file +} diff --git a/tests/docs-examples-test/c.sh b/tests/docs-examples-test/c.sh new file mode 100644 index 0000000000..54e334b22e --- /dev/null +++ b/tests/docs-examples-test/c.sh @@ -0,0 +1,92 @@ +#!/bin/bash + +pgrep taosd || taosd >> /dev/null 2>&1 & +pgrep taosadapter || taosadapter >> /dev/null 2>&1 & + +GREEN='\033[0;32m' +RED='\033[0;31m' +NC='\033[0m' + +TEST_PATH="../../docs/examples/c" +echo "setting TEST_PATH: $TEST_PATH" + +cd "${TEST_PATH}" || { echo -e "${RED}Failed to change directory to ${TEST_PATH}${NC}"; exit 1; } + +LOG_FILE="docs-c-test-out.log" + +> $LOG_FILE + +make > "$LOG_FILE" 2>&1 + +if [ $? -eq 0 ]; then + echo -e "${GREEN}Make completed successfully.${NC}" +else + echo -e "${RED}Make failed. Check log file: $LOG_FILE${NC}" + cat "$LOG_FILE" + exit 1 +fi + + +declare -a TEST_EXES=( + "connect_example" + "create_db_demo" + "insert_data_demo" + "query_data_demo" + "with_reqid_demo" + "stmt_insert_demo" + "tmq_demo" + "sml_insert_demo" +) + +declare -a NEED_CLEAN=( + "true" + "false" + "false" + "false" + "false" + "false" + "false" + "true" +) + +totalCases=0 +totalFailed=0 +totalSuccess=0 + +for i in "${!TEST_EXES[@]}"; do + TEST_EXE="${TEST_EXES[$i]}" + NEED_CLEAN_FLAG="${NEED_CLEAN[$i]}" + + if [ "$NEED_CLEAN_FLAG" = "true" ]; then + echo "Cleaning database before executing $TEST_EXE..." + taos -s "drop database if exists power" >> $LOG_FILE 2>&1 + fi + + echo "Executing $TEST_EXE..." + ./$TEST_EXE >> $LOG_FILE 2>&1 + RESULT=$? + + if [ "$RESULT" -eq 0 ]; then + totalSuccess=$((totalSuccess + 1)) + echo "[$GREEN OK $NC] $TEST_EXE executed successfully." + else + totalFailed=$((totalFailed + 1)) + echo "[$RED FAILED $NC] $TEST_EXE exited with code $RESULT." + fi + + totalCases=$((totalCases + 1)) +done + +tail -n 40 $LOG_FILE + +echo -e "\nTotal number of cases executed: $totalCases" +if [ "$totalSuccess" -gt "0" ]; then + echo -e "\n${GREEN} ### Total $totalSuccess C case(s) succeed! ### ${NC}" +fi + +if [ "$totalFailed" -ne "0" ]; then + echo -e "\n${RED} ### Total $totalFailed C case(s) failed! ### ${NC}" + exit 1 +fi + +echo "All tests completed." \ No newline at end of file diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index c70989b5a8..72b470f509 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -1555,6 +1555,7 @@ ,,n,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/telnet_tcp.py -R #docs-examples test +,,n,docs-examples-test,bash c.sh ,,n,docs-examples-test,bash python.sh ,,n,docs-examples-test,bash node.sh ,,n,docs-examples-test,bash csharp.sh