Merge branch '3.0' of https://github.com/taosdata/TDengine into enh/tsdb_optimize
This commit is contained in:
commit
89eafa18cb
|
@ -36,7 +36,7 @@ You can use below command to setup Grafana alert notification.
|
||||||
An existing Grafana Notification Channel can be specified with parameter `-E`, the notifier uid of the channel can be obtained by `curl -u admin:admin localhost:3000/api/alert-notifications |jq`
|
An existing Grafana Notification Channel can be specified with parameter `-E`, the notifier uid of the channel can be obtained by `curl -u admin:admin localhost:3000/api/alert-notifications |jq`
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
|
./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
|
||||||
```
|
```
|
||||||
|
|
||||||
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
|
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
|
||||||
|
@ -274,7 +274,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||metric value|
|
|gauge|DOUBLE||metric value|
|
||||||
|client\_ip|NCHAR|TAG|client ip|
|
|client\_ip|NCHAR|TAG|client ip|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|request\_method|NCHAR|TAG|request method|
|
|request\_method|NCHAR|TAG|request method|
|
||||||
|
@ -288,7 +288,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||metric value|
|
|gauge|DOUBLE||metric value|
|
||||||
|client\_ip|NCHAR|TAG|client ip|
|
|client\_ip|NCHAR|TAG|client ip|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|request\_method|NCHAR|TAG|request method|
|
|request\_method|NCHAR|TAG|request method|
|
||||||
|
@ -302,7 +302,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||metric value|
|
|gauge|DOUBLE||metric value|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
||||||
### taosadapter\_restful\_http\_request\_summary\_milliseconds table
|
### taosadapter\_restful\_http\_request\_summary\_milliseconds table
|
||||||
|
@ -330,7 +330,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||metric value|
|
|gauge|DOUBLE||metric value|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
||||||
### taosadapter\_system\_cpu\_percent table
|
### taosadapter\_system\_cpu\_percent table
|
||||||
|
@ -340,6 +340,6 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||mertic value|
|
|gauge|DOUBLE||mertic value|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
||||||
|
|
|
@ -149,7 +149,7 @@ curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifica
|
||||||
Use the `uid` value obtained above as `-E` input.
|
Use the `uid` value obtained above as `-E` input.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
||||||
```
|
```
|
||||||
|
|
||||||
If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature.
|
If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature.
|
||||||
|
|
|
@ -177,7 +177,7 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
|
||||||
假设您在主机 `tdengine` 上启动 TDengine 数据库,HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本:
|
假设您在主机 `tdengine` 上启动 TDengine 数据库,HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
|
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
|
||||||
```
|
```
|
||||||
|
|
||||||
我们提供了一个“-E”选项,用于从命令行配置 TDinsight 使用现有的通知通道(Notification Channel)。假设你的 Grafana 用户和密码是 `admin:admin`,使用以下命令获取已有的通知通道的`uid`:
|
我们提供了一个“-E”选项,用于从命令行配置 TDinsight 使用现有的通知通道(Notification Channel)。假设你的 Grafana 用户和密码是 `admin:admin`,使用以下命令获取已有的通知通道的`uid`:
|
||||||
|
@ -189,7 +189,7 @@ curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifica
|
||||||
使用上面获取的 `uid` 值作为 `-E` 输入。
|
使用上面获取的 `uid` 值作为 `-E` 输入。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
||||||
```
|
```
|
||||||
|
|
||||||
如果要监控多个 TDengine 集群,则需要设置多个 TDinsight 仪表盘。设置非默认 TDinsight 需要进行一些更改: `-n` `-i` `-t` 选项需要更改为非默认名称,如果使用 内置短信告警功能,`-N` 和 `-L` 也应该改变。
|
如果要监控多个 TDengine 集群,则需要设置多个 TDinsight 仪表盘。设置非默认 TDinsight 需要进行一些更改: `-n` `-i` `-t` 选项需要更改为非默认名称,如果使用 内置短信告警功能,`-N` 和 `-L` 也应该改变。
|
||||||
|
|
|
@ -32,7 +32,7 @@ chmod +x TDinsight.sh
|
||||||
- 使用已经存在的 Grafana Notification Channel `uid`,参数 `-E`。该参数可以使用 `curl -u admin:admin localhost:3000/api/alert-notifications |jq` 来获取。
|
- 使用已经存在的 Grafana Notification Channel `uid`,参数 `-E`。该参数可以使用 `curl -u admin:admin localhost:3000/api/alert-notifications |jq` 来获取。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
|
./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
|
||||||
```
|
```
|
||||||
|
|
||||||
运行程序并重启 Grafana 服务,打开面板:`http://localhost:3000/d/tdinsight`。
|
运行程序并重启 Grafana 服务,打开面板:`http://localhost:3000/d/tdinsight`。
|
||||||
|
@ -270,7 +270,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||监控指标值|
|
|gauge|DOUBLE||监控指标值|
|
||||||
|client\_ip|NCHAR|TAG|client ip|
|
|client\_ip|NCHAR|TAG|client ip|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|request\_method|NCHAR|TAG|request method|
|
|request\_method|NCHAR|TAG|request method|
|
||||||
|
@ -284,7 +284,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||监控指标值|
|
|gauge|DOUBLE||监控指标值|
|
||||||
|client\_ip|NCHAR|TAG|client ip|
|
|client\_ip|NCHAR|TAG|client ip|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|request\_method|NCHAR|TAG|request method|
|
|request\_method|NCHAR|TAG|request method|
|
||||||
|
@ -298,7 +298,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||监控指标值|
|
|gauge|DOUBLE||监控指标值|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
||||||
### taosadapter\_restful\_http\_request\_summary\_milliseconds 表
|
### taosadapter\_restful\_http\_request\_summary\_milliseconds 表
|
||||||
|
@ -326,7 +326,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||监控指标值|
|
|gauge|DOUBLE||监控指标值|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
||||||
### taosadapter\_system\_cpu\_percent 表
|
### taosadapter\_system\_cpu\_percent 表
|
||||||
|
@ -336,5 +336,5 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||监控指标值|
|
|gauge|DOUBLE||监控指标值|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
|
@ -247,4 +247,10 @@ launchctl limit maxfiles
|
||||||
该提示是创建 db 的 vnode 数量不够了,需要的 vnode 不能超过了 dnode 中 vnode 的上限。因为系统默认是一个 dnode 中有 CPU 核数两倍的 vnode,也可以通过配置文件中的参数 supportVnodes 控制。
|
该提示是创建 db 的 vnode 数量不够了,需要的 vnode 不能超过了 dnode 中 vnode 的上限。因为系统默认是一个 dnode 中有 CPU 核数两倍的 vnode,也可以通过配置文件中的参数 supportVnodes 控制。
|
||||||
正常调大 taos.cfg 中 supportVnodes 参数即可。
|
正常调大 taos.cfg 中 supportVnodes 参数即可。
|
||||||
|
|
||||||
|
### 21 【查询】在服务器上的使用 tao-CLI 能查到指定时间段的数据,但在客户端机器上查不到?
|
||||||
|
这种情况是因为客户端与服务器上设置的时区不一致导致的,调整客户端与服务器的时区一致即可解决。
|
||||||
|
|
||||||
|
### 22 【表名】表名确认是存在的,但写入或查询时报表不存在错误,非常奇怪,什么原因?
|
||||||
|
TDengine 中的所有名称,包括数据库名、表名等都是区分大小写的,如果这些名称在程序或 taos-CLI 中没有使用反引号(`)括起来使用,即使你输入的是大写的,引擎也会转化成小写来使用,如果名称前后加上了反引号,引擎就不会再转化成小写,会保持原样来使用。
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -351,6 +351,7 @@ typedef struct {
|
||||||
rocksdb_writeoptions_t *writeoptions;
|
rocksdb_writeoptions_t *writeoptions;
|
||||||
rocksdb_readoptions_t *readoptions;
|
rocksdb_readoptions_t *readoptions;
|
||||||
rocksdb_writebatch_t *writebatch;
|
rocksdb_writebatch_t *writebatch;
|
||||||
|
rocksdb_writebatch_t *rwritebatch;
|
||||||
TdThreadMutex rMutex;
|
TdThreadMutex rMutex;
|
||||||
STSchema *pTSchema;
|
STSchema *pTSchema;
|
||||||
} SRocksCache;
|
} SRocksCache;
|
||||||
|
|
|
@ -164,8 +164,10 @@ static int32_t tsdbOpenRocksCache(STsdb *pTsdb) {
|
||||||
}
|
}
|
||||||
|
|
||||||
rocksdb_writebatch_t *writebatch = rocksdb_writebatch_create();
|
rocksdb_writebatch_t *writebatch = rocksdb_writebatch_create();
|
||||||
|
rocksdb_writebatch_t *rwritebatch = rocksdb_writebatch_create();
|
||||||
|
|
||||||
pTsdb->rCache.writebatch = writebatch;
|
pTsdb->rCache.writebatch = writebatch;
|
||||||
|
pTsdb->rCache.rwritebatch = rwritebatch;
|
||||||
pTsdb->rCache.my_comparator = cmp;
|
pTsdb->rCache.my_comparator = cmp;
|
||||||
pTsdb->rCache.options = options;
|
pTsdb->rCache.options = options;
|
||||||
pTsdb->rCache.writeoptions = writeoptions;
|
pTsdb->rCache.writeoptions = writeoptions;
|
||||||
|
@ -198,6 +200,7 @@ static void tsdbCloseRocksCache(STsdb *pTsdb) {
|
||||||
rocksdb_close(pTsdb->rCache.db);
|
rocksdb_close(pTsdb->rCache.db);
|
||||||
rocksdb_flushoptions_destroy(pTsdb->rCache.flushoptions);
|
rocksdb_flushoptions_destroy(pTsdb->rCache.flushoptions);
|
||||||
rocksdb_writebatch_destroy(pTsdb->rCache.writebatch);
|
rocksdb_writebatch_destroy(pTsdb->rCache.writebatch);
|
||||||
|
rocksdb_writebatch_destroy(pTsdb->rCache.rwritebatch);
|
||||||
rocksdb_readoptions_destroy(pTsdb->rCache.readoptions);
|
rocksdb_readoptions_destroy(pTsdb->rCache.readoptions);
|
||||||
rocksdb_writeoptions_destroy(pTsdb->rCache.writeoptions);
|
rocksdb_writeoptions_destroy(pTsdb->rCache.writeoptions);
|
||||||
rocksdb_options_destroy(pTsdb->rCache.options);
|
rocksdb_options_destroy(pTsdb->rCache.options);
|
||||||
|
@ -208,14 +211,18 @@ static void tsdbCloseRocksCache(STsdb *pTsdb) {
|
||||||
taosMemoryFree(pTsdb->rCache.pTSchema);
|
taosMemoryFree(pTsdb->rCache.pTSchema);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rocksMayWrite(STsdb *pTsdb, bool force) {
|
static void rocksMayWrite(STsdb *pTsdb, bool force, bool read) {
|
||||||
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
|
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
|
||||||
|
if (read) {
|
||||||
if (force || rocksdb_writebatch_count(wb) >= 1024) {
|
wb = pTsdb->rCache.rwritebatch;
|
||||||
|
}
|
||||||
|
int count = rocksdb_writebatch_count(wb);
|
||||||
|
if ((force && count > 0) || count >= 1024) {
|
||||||
char *err = NULL;
|
char *err = NULL;
|
||||||
rocksdb_write(pTsdb->rCache.db, pTsdb->rCache.writeoptions, wb, &err);
|
rocksdb_write(pTsdb->rCache.db, pTsdb->rCache.writeoptions, wb, &err);
|
||||||
if (NULL != err) {
|
if (NULL != err) {
|
||||||
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
|
tsdbError("vgId:%d, %s failed at line %d, count: %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, count,
|
||||||
|
err);
|
||||||
rocksdb_free(err);
|
rocksdb_free(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -227,7 +234,8 @@ int32_t tsdbCacheCommit(STsdb *pTsdb) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
char *err = NULL;
|
char *err = NULL;
|
||||||
|
|
||||||
rocksMayWrite(pTsdb, true);
|
rocksMayWrite(pTsdb, true, false);
|
||||||
|
rocksMayWrite(pTsdb, true, true);
|
||||||
rocksdb_flush(pTsdb->rCache.db, pTsdb->rCache.flushoptions, &err);
|
rocksdb_flush(pTsdb->rCache.db, pTsdb->rCache.flushoptions, &err);
|
||||||
if (NULL != err) {
|
if (NULL != err) {
|
||||||
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
|
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
|
||||||
|
@ -453,7 +461,7 @@ int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow
|
||||||
taosMemoryFree(values_list);
|
taosMemoryFree(values_list);
|
||||||
taosMemoryFree(values_list_sizes);
|
taosMemoryFree(values_list_sizes);
|
||||||
|
|
||||||
rocksMayWrite(pTsdb, false);
|
rocksMayWrite(pTsdb, false, false);
|
||||||
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
|
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
|
||||||
|
|
||||||
_exit:
|
_exit:
|
||||||
|
@ -537,7 +545,7 @@ int32_t tsdbCacheGetSlow(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheR
|
||||||
}
|
}
|
||||||
|
|
||||||
// store result back to rocks cache
|
// store result back to rocks cache
|
||||||
wb = pTsdb->rCache.writebatch;
|
wb = pTsdb->rCache.rwritebatch;
|
||||||
char *value = NULL;
|
char *value = NULL;
|
||||||
size_t vlen = 0;
|
size_t vlen = 0;
|
||||||
tsdbCacheSerialize(pLastCol, &value, &vlen);
|
tsdbCacheSerialize(pLastCol, &value, &vlen);
|
||||||
|
@ -552,7 +560,7 @@ int32_t tsdbCacheGetSlow(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheR
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wb) {
|
if (wb) {
|
||||||
rocksMayWrite(pTsdb, false);
|
rocksMayWrite(pTsdb, false, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
|
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
|
||||||
|
@ -602,7 +610,7 @@ static SLastCol *tsdbCacheLoadCol(STsdb *pTsdb, SCacheRowsReader *pr, int16_t sl
|
||||||
}
|
}
|
||||||
|
|
||||||
// store result back to rocks cache
|
// store result back to rocks cache
|
||||||
wb = pTsdb->rCache.writebatch;
|
wb = pTsdb->rCache.rwritebatch;
|
||||||
char *value = NULL;
|
char *value = NULL;
|
||||||
size_t vlen = 0;
|
size_t vlen = 0;
|
||||||
tsdbCacheSerialize(pLastCol, &value, &vlen);
|
tsdbCacheSerialize(pLastCol, &value, &vlen);
|
||||||
|
@ -620,7 +628,7 @@ static SLastCol *tsdbCacheLoadCol(STsdb *pTsdb, SCacheRowsReader *pr, int16_t sl
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wb) {
|
if (wb) {
|
||||||
rocksMayWrite(pTsdb, false);
|
rocksMayWrite(pTsdb, false, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
|
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
|
||||||
|
@ -671,6 +679,13 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
|
||||||
pLastCol = &noneCol;
|
pLastCol = &noneCol;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
taosArraySet(pLastArray, idxKey->idx, pLastCol);
|
||||||
|
// taosArrayRemove(remainCols, i);
|
||||||
|
|
||||||
|
if (!pTmpColArray) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol));
|
SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol));
|
||||||
*pTmpLastCol = *pLastCol;
|
*pTmpLastCol = *pLastCol;
|
||||||
pLastCol = pTmpLastCol;
|
pLastCol = pTmpLastCol;
|
||||||
|
@ -687,7 +702,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
|
||||||
}
|
}
|
||||||
|
|
||||||
// store result back to rocks cache
|
// store result back to rocks cache
|
||||||
wb = pTsdb->rCache.writebatch;
|
wb = pTsdb->rCache.rwritebatch;
|
||||||
char *value = NULL;
|
char *value = NULL;
|
||||||
size_t vlen = 0;
|
size_t vlen = 0;
|
||||||
tsdbCacheSerialize(pLastCol, &value, &vlen);
|
tsdbCacheSerialize(pLastCol, &value, &vlen);
|
||||||
|
@ -696,13 +711,10 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
|
||||||
size_t klen = ROCKS_KEY_LEN;
|
size_t klen = ROCKS_KEY_LEN;
|
||||||
rocksdb_writebatch_put(wb, (char *)key, klen, value, vlen);
|
rocksdb_writebatch_put(wb, (char *)key, klen, value, vlen);
|
||||||
taosMemoryFree(value);
|
taosMemoryFree(value);
|
||||||
|
|
||||||
taosArraySet(pLastArray, idxKey->idx, pLastCol);
|
|
||||||
// taosArrayRemove(remainCols, i);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wb) {
|
if (wb) {
|
||||||
rocksMayWrite(pTsdb, false);
|
rocksMayWrite(pTsdb, false, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
taosArrayDestroy(pTmpColArray);
|
taosArrayDestroy(pTmpColArray);
|
||||||
|
@ -932,7 +944,7 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE
|
||||||
size_t *values_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t));
|
size_t *values_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t));
|
||||||
char **errs = taosMemoryCalloc(num_keys * 2, sizeof(char *));
|
char **errs = taosMemoryCalloc(num_keys * 2, sizeof(char *));
|
||||||
taosThreadMutexLock(&pTsdb->rCache.rMutex);
|
taosThreadMutexLock(&pTsdb->rCache.rMutex);
|
||||||
rocksMayWrite(pTsdb, true);
|
rocksMayWrite(pTsdb, true, false);
|
||||||
rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys * 2, (const char *const *)keys_list,
|
rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys * 2, (const char *const *)keys_list,
|
||||||
keys_list_sizes, values_list, values_list_sizes, errs);
|
keys_list_sizes, values_list, values_list_sizes, errs);
|
||||||
for (int i = 0; i < num_keys; ++i) {
|
for (int i = 0; i < num_keys; ++i) {
|
||||||
|
@ -971,7 +983,7 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE
|
||||||
taosMemoryFree(values_list);
|
taosMemoryFree(values_list);
|
||||||
taosMemoryFree(values_list_sizes);
|
taosMemoryFree(values_list_sizes);
|
||||||
|
|
||||||
rocksMayWrite(pTsdb, true);
|
rocksMayWrite(pTsdb, true, false);
|
||||||
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
|
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
|
||||||
|
|
||||||
_exit:
|
_exit:
|
||||||
|
|
|
@ -3112,6 +3112,10 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
|
||||||
// load the last data block of current table
|
// load the last data block of current table
|
||||||
STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)pStatus->pTableIter;
|
STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)pStatus->pTableIter;
|
||||||
if (pReader->pIgnoreTables && taosHashGet(*pReader->pIgnoreTables, &pScanInfo->uid, sizeof(pScanInfo->uid))) {
|
if (pReader->pIgnoreTables && taosHashGet(*pReader->pIgnoreTables, &pScanInfo->uid, sizeof(pScanInfo->uid))) {
|
||||||
|
// reset the index in last block when handing a new file
|
||||||
|
doCleanupTableScanInfo(pScanInfo);
|
||||||
|
pStatus->mapDataCleaned = true;
|
||||||
|
|
||||||
bool hasNexTable = moveToNextTable(pUidList, pStatus);
|
bool hasNexTable = moveToNextTable(pUidList, pStatus);
|
||||||
if (!hasNexTable) {
|
if (!hasNexTable) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
|
|
@ -2195,6 +2195,13 @@ static bool lastRowScanOptMayBeOptimized(SLogicNode* pNode) {
|
||||||
FOREACH(pFunc, ((SAggLogicNode*)pNode)->pAggFuncs) {
|
FOREACH(pFunc, ((SAggLogicNode*)pNode)->pAggFuncs) {
|
||||||
SFunctionNode* pAggFunc = (SFunctionNode*)pFunc;
|
SFunctionNode* pAggFunc = (SFunctionNode*)pFunc;
|
||||||
if (FUNCTION_TYPE_LAST == pAggFunc->funcType) {
|
if (FUNCTION_TYPE_LAST == pAggFunc->funcType) {
|
||||||
|
SNode* pPar = nodesListGetNode(pAggFunc->pParameterList, 0);
|
||||||
|
if (QUERY_NODE_COLUMN == nodeType(pPar)) {
|
||||||
|
SColumnNode* pCol = (SColumnNode*)pPar;
|
||||||
|
if (pCol->colType == COLUMN_TYPE_TAG) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
if (hasSelectFunc || QUERY_NODE_VALUE == nodeType(nodesListGetNode(pAggFunc->pParameterList, 0))) {
|
if (hasSelectFunc || QUERY_NODE_VALUE == nodeType(nodesListGetNode(pAggFunc->pParameterList, 0))) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -2295,6 +2302,7 @@ static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogic
|
||||||
if (NULL != cxt.pLastCols) {
|
if (NULL != cxt.pLastCols) {
|
||||||
cxt.doAgg = false;
|
cxt.doAgg = false;
|
||||||
lastRowScanOptSetLastTargets(pScan->pScanCols, cxt.pLastCols, true);
|
lastRowScanOptSetLastTargets(pScan->pScanCols, cxt.pLastCols, true);
|
||||||
|
nodesWalkExprs(pScan->pScanPseudoCols, lastRowScanOptSetColDataType, &cxt);
|
||||||
lastRowScanOptSetLastTargets(pScan->node.pTargets, cxt.pLastCols, false);
|
lastRowScanOptSetLastTargets(pScan->node.pTargets, cxt.pLastCols, false);
|
||||||
nodesClearList(cxt.pLastCols);
|
nodesClearList(cxt.pLastCols);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue