Merge branch '3.0' into feature/3.0_interval_hash_optimize
This commit is contained in:
commit
657d3deac9
|
@ -4,7 +4,7 @@ sidebar_label: Documentation Home
|
||||||
slug: /
|
slug: /
|
||||||
---
|
---
|
||||||
|
|
||||||
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) time-series database optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It’s written mainly for architects, developers, and system administrators.
|
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It’s written mainly for architects, developers, and system administrators.
|
||||||
|
|
||||||
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
|
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
|
||||||
|
|
||||||
|
@ -22,6 +22,8 @@ If you want to know more about TDengine tools, the REST API, and connectors for
|
||||||
|
|
||||||
If you are very interested in the internal design of TDengine, please read the chapter [Inside TDengine](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully.
|
If you are very interested in the internal design of TDengine, please read the chapter [Inside TDengine](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully.
|
||||||
|
|
||||||
|
To get more general introduction about time series database, please read through [a series of articles](https://tdengine.com/tsdb/). To lean more competitive advantages about TDengine, please read through [a series of blogs](https://tdengine.com/tdengine/).
|
||||||
|
|
||||||
TDengine is an open-source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly.
|
TDengine is an open-source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly.
|
||||||
|
|
||||||
Together, we make a difference!
|
Together, we make a difference!
|
||||||
|
|
|
@ -3,7 +3,7 @@ title: Introduction
|
||||||
toc_max_heading_level: 2
|
toc_max_heading_level: 2
|
||||||
---
|
---
|
||||||
|
|
||||||
TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
|
TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [high-performance](https://tdengine.com/tdengine/high-performance-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
|
||||||
|
|
||||||
This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine.
|
This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine.
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ For more details on features, please read through the entire documentation.
|
||||||
|
|
||||||
## Competitive Advantages
|
## Competitive Advantages
|
||||||
|
|
||||||
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other time series databases, with the following advantages.
|
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb), with the following advantages.
|
||||||
|
|
||||||
- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
|
- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
|
||||||
|
|
||||||
|
@ -127,3 +127,8 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
|
||||||
- [TDengine vs OpenTSDB](https://tdengine.com/2019/09/12/710.html)
|
- [TDengine vs OpenTSDB](https://tdengine.com/2019/09/12/710.html)
|
||||||
- [TDengine vs Cassandra](https://tdengine.com/2019/09/12/708.html)
|
- [TDengine vs Cassandra](https://tdengine.com/2019/09/12/708.html)
|
||||||
- [TDengine vs InfluxDB](https://tdengine.com/2019/09/12/706.html)
|
- [TDengine vs InfluxDB](https://tdengine.com/2019/09/12/706.html)
|
||||||
|
|
||||||
|
## More readings
|
||||||
|
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
|
||||||
|
- [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/)
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,98 @@
|
||||||
|
---
|
||||||
|
sidebar_label: JupyterLab
|
||||||
|
title: Connect JupyterLab to TDengine
|
||||||
|
---
|
||||||
|
|
||||||
|
JupyterLab is the next generation of the ubiquitous Jupyter Notebook. In this note we show you how to install the TDengine Python connector to connect to TDengine in JupyterLab. You can then insert data and perform queries against the TDengine instance within JupyterLab.
|
||||||
|
|
||||||
|
## Install JupyterLab
|
||||||
|
Installing JupyterLab is very easy. Installation instructions can be found at:
|
||||||
|
|
||||||
|
https://jupyterlab.readthedocs.io/en/stable/getting_started/installation.html.
|
||||||
|
|
||||||
|
If you don't feel like clicking on the link here are the instructions.
|
||||||
|
Jupyter's preferred Python package manager is pip, so we show the instructions for pip.
|
||||||
|
You can also use **conda** or **pipenv** if you are managing Python environments.
|
||||||
|
````
|
||||||
|
pip install jupyterlab
|
||||||
|
````
|
||||||
|
|
||||||
|
For **conda** you can run:
|
||||||
|
````
|
||||||
|
conda install -c conda-forge jupyterlab
|
||||||
|
````
|
||||||
|
|
||||||
|
For **pipenv** you can run:
|
||||||
|
````
|
||||||
|
pipenv install jupyterlab
|
||||||
|
pipenv shell
|
||||||
|
````
|
||||||
|
|
||||||
|
## Run JupyterLab
|
||||||
|
You can start JupyterLab from the command line by running:
|
||||||
|
````
|
||||||
|
jupyter lab
|
||||||
|
````
|
||||||
|
This will automatically launch your default browser and connect to your JupyterLab instance, usually on port 8888.
|
||||||
|
|
||||||
|
## Install the TDengine Python connector
|
||||||
|
You can now install the TDengine Python connector as follows.
|
||||||
|
|
||||||
|
Start a new Python kernel in JupyterLab.
|
||||||
|
|
||||||
|
If using **conda** run the following:
|
||||||
|
````
|
||||||
|
# Install a conda package in the current Jupyter kernel
|
||||||
|
import sys
|
||||||
|
!conda install --yes --prefix {sys.prefix} taospy
|
||||||
|
````
|
||||||
|
If using **pip** run the following:
|
||||||
|
````
|
||||||
|
# Install a pip package in the current Jupyter kernel
|
||||||
|
import sys
|
||||||
|
!{sys.executable} -m pip install taospy
|
||||||
|
````
|
||||||
|
|
||||||
|
## Connect to TDengine
|
||||||
|
You can find detailed examples to use the Python connector, in the TDengine documentation here.
|
||||||
|
Once you have installed the TDengine Python connector in your JupyterLab kernel, the process of connecting to TDengine is the same as that you would use if you weren't using JupyterLab.
|
||||||
|
Each TDengine instance, has a database called "log" which has monitoring information about the TDengine instance.
|
||||||
|
In the "log" database there is a [supertable](https://docs.tdengine.com/taos-sql/stable/) called "disks_info".
|
||||||
|
|
||||||
|
The structure of this table is as follows:
|
||||||
|
````
|
||||||
|
taos> desc disks_info;
|
||||||
|
Field | Type | Length | Note |
|
||||||
|
=================================================================================
|
||||||
|
ts | TIMESTAMP | 8 | |
|
||||||
|
datadir_l0_used | FLOAT | 4 | |
|
||||||
|
datadir_l0_total | FLOAT | 4 | |
|
||||||
|
datadir_l1_used | FLOAT | 4 | |
|
||||||
|
datadir_l1_total | FLOAT | 4 | |
|
||||||
|
datadir_l2_used | FLOAT | 4 | |
|
||||||
|
datadir_l2_total | FLOAT | 4 | |
|
||||||
|
dnode_id | INT | 4 | TAG |
|
||||||
|
dnode_ep | BINARY | 134 | TAG |
|
||||||
|
Query OK, 9 row(s) in set (0.000238s)
|
||||||
|
````
|
||||||
|
|
||||||
|
The code below is used to fetch data from this table into a pandas DataFrame.
|
||||||
|
|
||||||
|
````
|
||||||
|
import sys
|
||||||
|
import taos
|
||||||
|
import pandas
|
||||||
|
|
||||||
|
def sqlQuery(conn):
|
||||||
|
df: pandas.DataFrame = pandas.read_sql("select * from log.disks_info limit 500", conn)
|
||||||
|
print(df)
|
||||||
|
return df
|
||||||
|
|
||||||
|
conn = taos.connect()
|
||||||
|
|
||||||
|
result = sqlQuery(conn)
|
||||||
|
|
||||||
|
print(result)
|
||||||
|
````
|
||||||
|
|
||||||
|
TDengine has connectors for various languages including Node.js, Go, PHP and there are kernels for these languages which can be found [here](https://github.com/jupyter/jupyter/wiki/Jupyter-kernels).
|
|
@ -173,7 +173,8 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
|
||||||
pTscObj->pAppInfo->totalDnodes = pRsp->query->totalDnodes;
|
pTscObj->pAppInfo->totalDnodes = pRsp->query->totalDnodes;
|
||||||
pTscObj->pAppInfo->onlineDnodes = pRsp->query->onlineDnodes;
|
pTscObj->pAppInfo->onlineDnodes = pRsp->query->onlineDnodes;
|
||||||
pTscObj->connId = pRsp->query->connId;
|
pTscObj->connId = pRsp->query->connId;
|
||||||
tscTrace("conn %p hb rsp, dnodes %d/%d", pTscObj->connId, pTscObj->pAppInfo->onlineDnodes, pTscObj->pAppInfo->totalDnodes);
|
tscTrace("conn %p hb rsp, dnodes %d/%d", pTscObj->connId, pTscObj->pAppInfo->onlineDnodes,
|
||||||
|
pTscObj->pAppInfo->totalDnodes);
|
||||||
|
|
||||||
if (pRsp->query->killRid) {
|
if (pRsp->query->killRid) {
|
||||||
tscDebug("request rid %" PRIx64 " need to be killed now", pRsp->query->killRid);
|
tscDebug("request rid %" PRIx64 " need to be killed now", pRsp->query->killRid);
|
||||||
|
@ -297,7 +298,8 @@ static int32_t hbAsyncCallBack(void *param, SDataBuf *pMsg, int32_t code) {
|
||||||
|
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
(*pInst)->onlineDnodes = ((*pInst)->totalDnodes ? 0 : -1);
|
(*pInst)->onlineDnodes = ((*pInst)->totalDnodes ? 0 : -1);
|
||||||
tscDebug("hb rsp error %s, update server status %d/%d", tstrerror(code), (*pInst)->onlineDnodes, (*pInst)->totalDnodes);
|
tscDebug("hb rsp error %s, update server status %d/%d", tstrerror(code), (*pInst)->onlineDnodes,
|
||||||
|
(*pInst)->totalDnodes);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rspNum) {
|
if (rspNum) {
|
||||||
|
@ -657,6 +659,8 @@ int32_t hbGatherAppInfo(void) {
|
||||||
|
|
||||||
for (int32_t i = 0; i < sz; ++i) {
|
for (int32_t i = 0; i < sz; ++i) {
|
||||||
SAppHbMgr *pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, i);
|
SAppHbMgr *pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, i);
|
||||||
|
if (pAppHbMgr == NULL) continue;
|
||||||
|
|
||||||
uint64_t clusterId = pAppHbMgr->pAppInstInfo->clusterId;
|
uint64_t clusterId = pAppHbMgr->pAppInstInfo->clusterId;
|
||||||
SAppHbReq *pApp = taosHashGet(clientHbMgr.appSummary, &clusterId, sizeof(clusterId));
|
SAppHbReq *pApp = taosHashGet(clientHbMgr.appSummary, &clusterId, sizeof(clusterId));
|
||||||
if (NULL == pApp) {
|
if (NULL == pApp) {
|
||||||
|
@ -694,15 +698,21 @@ static void *hbThreadFunc(void *param) {
|
||||||
hbGatherAppInfo();
|
hbGatherAppInfo();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SArray *mgr = taosArrayInit(sz, sizeof(void *));
|
||||||
for (int i = 0; i < sz; i++) {
|
for (int i = 0; i < sz; i++) {
|
||||||
SAppHbMgr *pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, i);
|
SAppHbMgr *pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, i);
|
||||||
|
if (pAppHbMgr == NULL) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t connCnt = atomic_load_32(&pAppHbMgr->connKeyCnt);
|
int32_t connCnt = atomic_load_32(&pAppHbMgr->connKeyCnt);
|
||||||
if (connCnt == 0) {
|
if (connCnt == 0) {
|
||||||
|
taosArrayPush(mgr, &pAppHbMgr);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
SClientHbBatchReq *pReq = hbGatherAllInfo(pAppHbMgr);
|
SClientHbBatchReq *pReq = hbGatherAllInfo(pAppHbMgr);
|
||||||
if (pReq == NULL) {
|
if (pReq == NULL || taosArrayGetP(clientHbMgr.appHbMgrs, i) == NULL) {
|
||||||
|
tFreeClientHbBatchReq(pReq);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
int tlen = tSerializeSClientHbBatchReq(NULL, 0, pReq);
|
int tlen = tSerializeSClientHbBatchReq(NULL, 0, pReq);
|
||||||
|
@ -711,6 +721,7 @@ static void *hbThreadFunc(void *param) {
|
||||||
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
tFreeClientHbBatchReq(pReq);
|
tFreeClientHbBatchReq(pReq);
|
||||||
// hbClearReqInfo(pAppHbMgr);
|
// hbClearReqInfo(pAppHbMgr);
|
||||||
|
taosArrayPush(mgr, &pAppHbMgr);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -722,6 +733,7 @@ static void *hbThreadFunc(void *param) {
|
||||||
tFreeClientHbBatchReq(pReq);
|
tFreeClientHbBatchReq(pReq);
|
||||||
// hbClearReqInfo(pAppHbMgr);
|
// hbClearReqInfo(pAppHbMgr);
|
||||||
taosMemoryFree(buf);
|
taosMemoryFree(buf);
|
||||||
|
taosArrayPush(mgr, &pAppHbMgr);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
pInfo->fp = hbAsyncCallBack;
|
pInfo->fp = hbAsyncCallBack;
|
||||||
|
@ -741,8 +753,12 @@ static void *hbThreadFunc(void *param) {
|
||||||
// hbClearReqInfo(pAppHbMgr);
|
// hbClearReqInfo(pAppHbMgr);
|
||||||
|
|
||||||
atomic_add_fetch_32(&pAppHbMgr->reportCnt, 1);
|
atomic_add_fetch_32(&pAppHbMgr->reportCnt, 1);
|
||||||
|
taosArrayPush(mgr, &pAppHbMgr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
taosArrayDestroy(clientHbMgr.appHbMgrs);
|
||||||
|
clientHbMgr.appHbMgrs = mgr;
|
||||||
|
|
||||||
taosThreadMutexUnlock(&clientHbMgr.lock);
|
taosThreadMutexUnlock(&clientHbMgr.lock);
|
||||||
|
|
||||||
taosMsleep(HEARTBEAT_INTERVAL);
|
taosMsleep(HEARTBEAT_INTERVAL);
|
||||||
|
@ -834,7 +850,7 @@ void hbRemoveAppHbMrg(SAppHbMgr **pAppHbMgr) {
|
||||||
if (pItem == *pAppHbMgr) {
|
if (pItem == *pAppHbMgr) {
|
||||||
hbFreeAppHbMgr(*pAppHbMgr);
|
hbFreeAppHbMgr(*pAppHbMgr);
|
||||||
*pAppHbMgr = NULL;
|
*pAppHbMgr = NULL;
|
||||||
taosArrayRemove(clientHbMgr.appHbMgrs, i);
|
taosArraySet(clientHbMgr.appHbMgrs, i, pAppHbMgr);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -845,6 +861,7 @@ void appHbMgrCleanup(void) {
|
||||||
int sz = taosArrayGetSize(clientHbMgr.appHbMgrs);
|
int sz = taosArrayGetSize(clientHbMgr.appHbMgrs);
|
||||||
for (int i = 0; i < sz; i++) {
|
for (int i = 0; i < sz; i++) {
|
||||||
SAppHbMgr *pTarget = taosArrayGetP(clientHbMgr.appHbMgrs, i);
|
SAppHbMgr *pTarget = taosArrayGetP(clientHbMgr.appHbMgrs, i);
|
||||||
|
if (pTarget == NULL) continue;
|
||||||
hbFreeAppHbMgr(pTarget);
|
hbFreeAppHbMgr(pTarget);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -859,7 +876,14 @@ int hbMgrInit() {
|
||||||
|
|
||||||
clientHbMgr.appSummary = taosHashInit(10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
clientHbMgr.appSummary = taosHashInit(10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||||
clientHbMgr.appHbMgrs = taosArrayInit(0, sizeof(void *));
|
clientHbMgr.appHbMgrs = taosArrayInit(0, sizeof(void *));
|
||||||
taosThreadMutexInit(&clientHbMgr.lock, NULL);
|
|
||||||
|
TdThreadMutexAttr attr = {0};
|
||||||
|
taosThreadMutexAttrSetType(&attr, PTHREAD_MUTEX_RECURSIVE);
|
||||||
|
int ret = taosThreadMutexAttrInit(&attr);
|
||||||
|
assert(ret == 0);
|
||||||
|
|
||||||
|
taosThreadMutexInit(&clientHbMgr.lock, &attr);
|
||||||
|
taosThreadMutexAttrDestroy(&attr);
|
||||||
|
|
||||||
// init handle funcs
|
// init handle funcs
|
||||||
hbMgrInitHandle();
|
hbMgrInitHandle();
|
||||||
|
|
|
@ -16,19 +16,9 @@
|
||||||
#include "tsdb.h"
|
#include "tsdb.h"
|
||||||
|
|
||||||
static bool tsdbShouldDoRetention(STsdb *pTsdb, int64_t now) {
|
static bool tsdbShouldDoRetention(STsdb *pTsdb, int64_t now) {
|
||||||
STsdbKeepCfg *keepCfg = &pTsdb->keepCfg;
|
|
||||||
|
|
||||||
if ((keepCfg->keep0 == keepCfg->keep1) && (keepCfg->keep1 == keepCfg->keep2)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tfsGetLevel(pTsdb->pVnode->pTfs) <= 1) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int32_t iSet = 0; iSet < taosArrayGetSize(pTsdb->fs.aDFileSet); iSet++) {
|
for (int32_t iSet = 0; iSet < taosArrayGetSize(pTsdb->fs.aDFileSet); iSet++) {
|
||||||
SDFileSet *pSet = (SDFileSet *)taosArrayGet(pTsdb->fs.aDFileSet, iSet);
|
SDFileSet *pSet = (SDFileSet *)taosArrayGet(pTsdb->fs.aDFileSet, iSet);
|
||||||
int32_t expLevel = tsdbFidLevel(pSet->fid, keepCfg, now);
|
int32_t expLevel = tsdbFidLevel(pSet->fid, &pTsdb->keepCfg, now);
|
||||||
SDiskID did;
|
SDiskID did;
|
||||||
|
|
||||||
if (expLevel == pSet->diskId.level) continue;
|
if (expLevel == pSet->diskId.level) continue;
|
||||||
|
|
|
@ -94,7 +94,7 @@ void *openRefSpace(void *param) {
|
||||||
pSpace->rsetId = taosOpenRef(50, myfree);
|
pSpace->rsetId = taosOpenRef(50, myfree);
|
||||||
|
|
||||||
if (pSpace->rsetId < 0) {
|
if (pSpace->rsetId < 0) {
|
||||||
printf("failed to open ref, reson:%s\n", tstrerror(pSpace->rsetId));
|
printf("failed to open ref, reason:%s\n", tstrerror(pSpace->rsetId));
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue