Merge remote-tracking branch 'origin/develop' into feature/linux

This commit is contained in:
Shengliang Guan 2021-03-19 02:00:06 +00:00
commit 8b4bb3fcc4
22 changed files with 495 additions and 383 deletions

View File

@ -160,24 +160,13 @@ mkdir debug && cd debug
cmake .. && cmake --build . cmake .. && cmake --build .
``` ```
# Quick Run
# Quick Run
To quickly start a TDengine server after building, run the command below in terminal:
```bash
./build/bin/taosd -c test/cfg
```
In another terminal, use the TDengine shell to connect the server:
```bash
./build/bin/taos -c test/cfg
```
option "-c test/cfg" specifies the system configuration file directory.
# Installing # Installing
After building successfully, TDengine can be installed by: After building successfully, TDengine can be installed by:
```bash ```bash
sudo make install sudo make install
``` ```
Users can find more information about directories installed on the system in the [directory and files](https://www.taosdata.com/en/documentation/administrator/#Directory-and-Files) section. Since version 2.0, installing from source code will also configure service management for TDengine. Users can find more information about directories installed on the system in the [directory and files](https://www.taosdata.com/en/documentation/administrator/#Directory-and-Files) section. Since version 2.0, installing from source code will also configure service management for TDengine.
Users can also choose to [install from packages](https://www.taosdata.com/en/getting-started/#Install-from-Package) for it. Users can also choose to [install from packages](https://www.taosdata.com/en/getting-started/#Install-from-Package) for it.
@ -193,6 +182,20 @@ taos
If TDengine shell connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown. If TDengine shell connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown.
## Quick Run
If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal:
```bash
./build/bin/taosd -c test/cfg
```
In another terminal, use the TDengine shell to connect the server:
```bash
./build/bin/taos -c test/cfg
```
option "-c test/cfg" specifies the system configuration file directory.
# Try TDengine # Try TDengine
It is easy to run SQL commands from TDengine shell which is the same as other SQL databases. It is easy to run SQL commands from TDengine shell which is the same as other SQL databases.
```sql ```sql

View File

@ -145,7 +145,7 @@ TDengine 建议用数据采集点的名字(如上表中的D1001)来做表名。
在TDengine的设计里**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。当为某个具体数据采集点创建表时,用户使用超级表的定义做模板,同时指定该具体采集点(表)的标签值。与传统的关系型数据库相比,表(一个数据采集点)是带有静态标签的,而且这些标签可以事后增加、删除、修改。**一张超级表包含有多张表这些表具有相同的时序数据schema但带有不同的标签值**。 在TDengine的设计里**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。当为某个具体数据采集点创建表时,用户使用超级表的定义做模板,同时指定该具体采集点(表)的标签值。与传统的关系型数据库相比,表(一个数据采集点)是带有静态标签的,而且这些标签可以事后增加、删除、修改。**一张超级表包含有多张表这些表具有相同的时序数据schema但带有不同的标签值**。
当对多个具有相同数据类型的数据采集点进行聚合操作时TDengine将先把满足标签过滤条件的表从超级表的中查找出来,然后再扫描这些表的时序数据,进行聚合操作,这样能将需要扫描的数据集大幅减少,从而大幅提高聚合计算的性能。 当对多个具有相同数据类型的数据采集点进行聚合操作时TDengine会先把满足标签过滤条件的表从超级表中找出来,然后再扫描这些表的时序数据,进行聚合操作,这样需要扫描的数据集会大幅减少,从而显著提高聚合计算的性能。
## <a class="anchor" id="cluster"></a>集群与基本逻辑单元 ## <a class="anchor" id="cluster"></a>集群与基本逻辑单元

View File

@ -125,7 +125,7 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
```mysql ```mysql
ALTER DATABASE db_name CACHELAST 0; ALTER DATABASE db_name CACHELAST 0;
``` ```
CACHELAST 参数控制是否在内存中缓存数据子表的 last_row。缺省值为 0取值范围 [0, 1]。其中 0 表示不启用、1 表示启用。(从 2.0.11 版本开始支持) CACHELAST 参数控制是否在内存中缓存数据子表的 last_row。缺省值为 0取值范围 [0, 1]。其中 0 表示不启用、1 表示启用。(从 2.0.11 版本开始支持,修改后需要重启服务器生效。
**Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。 **Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。

View File

@ -16,13 +16,13 @@
## 1. TDengine2.0之前的版本升级到2.0及以上的版本应该注意什么?☆☆☆ ## 1. TDengine2.0之前的版本升级到2.0及以上的版本应该注意什么?☆☆☆
2.0版在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作: 2.0版在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作:
1. 删除配置文件,执行 <code> sudo rm -rf /etc/taos/taos.cfg </code> 1. 删除配置文件,执行 `sudo rm -rf /etc/taos/taos.cfg`
2. 删除日志文件,执行 <code> sudo rm -rf /var/log/taos/ </code> 2. 删除日志文件,执行 `sudo rm -rf /var/log/taos/`
3. 确保数据已经不再需要的前提下,删除数据文件,执行 <code> sudo rm -rf /var/lib/taos/ </code> 3. 确保数据已经不再需要的前提下,删除数据文件,执行 `sudo rm -rf /var/lib/taos/`
4. 安装最新稳定版本的TDengine 4. 安装最新稳定版本的 TDengine
5. 如果数据需要迁移数据或者数据文件损坏,请联系涛思数据官方技术支持团队,进行协助解决 5. 如果需要迁移数据或者数据文件损坏,请联系涛思数据官方技术支持团队,进行协助解决
## 2. Windows平台下JDBCDriver找不到动态链接库怎么办 ## 2. Windows平台下JDBCDriver找不到动态链接库怎么办

View File

@ -213,10 +213,10 @@ fi
if echo $osinfo | grep -qwi "ubuntu" ; then if echo $osinfo | grep -qwi "ubuntu" ; then
# echo "this is ubuntu system" # echo "this is ubuntu system"
${csudo} rm -f /var/lib/dpkg/info/tdengine* || : ${csudo} dpkg --force-all -P tdengine || :
elif echo $osinfo | grep -qwi "debian" ; then elif echo $osinfo | grep -qwi "debian" ; then
# echo "this is debian system" # echo "this is debian system"
${csudo} rm -f /var/lib/dpkg/info/tdengine* || : ${csudo} dpkg --force-all -P tdengine || :
elif echo $osinfo | grep -qwi "centos" ; then elif echo $osinfo | grep -qwi "centos" ; then
# echo "this is centos system" # echo "this is centos system"
${csudo} rpm -e --noscripts tdengine || : ${csudo} rpm -e --noscripts tdengine || :

View File

@ -281,7 +281,7 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) {
} }
static void tscAsyncResultCallback(SSchedMsg *pMsg) { static void tscAsyncResultCallback(SSchedMsg *pMsg) {
SSqlObj* pSql = pMsg->ahandle; SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)pMsg->ahandle);
if (pSql == NULL || pSql->signature != pSql) { if (pSql == NULL || pSql->signature != pSql) {
tscDebug("%p SqlObj is freed, not add into queue async res", pSql); tscDebug("%p SqlObj is freed, not add into queue async res", pSql);
return; return;
@ -292,23 +292,26 @@ static void tscAsyncResultCallback(SSchedMsg *pMsg) {
SSqlRes *pRes = &pSql->res; SSqlRes *pRes = &pSql->res;
if (pSql->fp == NULL || pSql->fetchFp == NULL){ if (pSql->fp == NULL || pSql->fetchFp == NULL){
taosReleaseRef(tscObjRef, pSql->self);
return; return;
} }
pSql->fp = pSql->fetchFp; pSql->fp = pSql->fetchFp;
(*pSql->fp)(pSql->param, pSql, pRes->code); (*pSql->fp)(pSql->param, pSql, pRes->code);
taosReleaseRef(tscObjRef, pSql->self);
} }
void tscAsyncResultOnError(SSqlObj* pSql) { void tscAsyncResultOnError(SSqlObj* pSql) {
SSchedMsg schedMsg = {0}; SSchedMsg schedMsg = {0};
schedMsg.fp = tscAsyncResultCallback; schedMsg.fp = tscAsyncResultCallback;
schedMsg.ahandle = pSql; schedMsg.ahandle = (void *)pSql->self;
schedMsg.thandle = (void *)1; schedMsg.thandle = (void *)1;
schedMsg.msg = 0; schedMsg.msg = 0;
taosScheduleTask(tscQhandle, &schedMsg); taosScheduleTask(tscQhandle, &schedMsg);
} }
int tscSendMsgToServer(SSqlObj *pSql); int tscSendMsgToServer(SSqlObj *pSql);
void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {

View File

@ -4138,13 +4138,21 @@ static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) {
} }
int32_t retVal = TSDB_CODE_SUCCESS; int32_t retVal = TSDB_CODE_SUCCESS;
int32_t bufLen = 0;
if (IS_NUMERIC_TYPE(vVariant->nType)) {
bufLen = 60; // The maximum length of string that a number is converted to.
} else {
bufLen = vVariant->nLen + 1;
}
if (schemaType == TSDB_DATA_TYPE_BINARY) { if (schemaType == TSDB_DATA_TYPE_BINARY) {
char *tmp = calloc(1, vVariant->nLen + TSDB_NCHAR_SIZE); char *tmp = calloc(1, bufLen * TSDB_NCHAR_SIZE);
retVal = tVariantDump(vVariant, tmp, schemaType, false); retVal = tVariantDump(vVariant, tmp, schemaType, false);
free(tmp); free(tmp);
} else if (schemaType == TSDB_DATA_TYPE_NCHAR) { } else if (schemaType == TSDB_DATA_TYPE_NCHAR) {
// pRight->value.nLen + 1 is larger than the actual nchar string length // pRight->value.nLen + 1 is larger than the actual nchar string length
char *tmp = calloc(1, (vVariant->nLen + 1) * TSDB_NCHAR_SIZE); char *tmp = calloc(1, bufLen * TSDB_NCHAR_SIZE);
retVal = tVariantDump(vVariant, tmp, schemaType, false); retVal = tVariantDump(vVariant, tmp, schemaType, false);
free(tmp); free(tmp);
} else { } else {
@ -4155,7 +4163,7 @@ static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) {
if (retVal != TSDB_CODE_SUCCESS) { if (retVal != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
} }
}while (0); } while (0);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }

View File

@ -25,6 +25,8 @@
#include "tdataformat.h" #include "tdataformat.h"
#include "tname.h" #include "tname.h"
#include "hash.h" #include "hash.h"
#include "tlockfree.h"
#include "tlist.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@ -172,10 +174,32 @@ typedef struct STsdbQueryCond {
int32_t type; // data block load type: int32_t type; // data block load type:
} STsdbQueryCond; } STsdbQueryCond;
typedef struct STableData STableData;
typedef struct {
T_REF_DECLARE()
SRWLatch latch;
TSKEY keyFirst;
TSKEY keyLast;
int64_t numOfRows;
int32_t maxTables;
STableData **tData;
SList * actList;
SList * extraBuffList;
SList * bufBlockList;
int64_t pointsAdd; // TODO
int64_t storageAdd; // TODO
} SMemTable;
typedef struct {
SMemTable* mem;
SMemTable* imem;
SMemTable mtable;
SMemTable* omem;
} SMemSnapshot;
typedef struct SMemRef { typedef struct SMemRef {
int32_t ref; int32_t ref;
void * mem; SMemSnapshot snapshot;
void * imem;
} SMemRef; } SMemRef;
typedef struct SDataBlockInfo { typedef struct SDataBlockInfo {

File diff suppressed because it is too large Load Diff

View File

@ -1840,7 +1840,7 @@ static void doFreeQueryHandle(SQueryRuntimeEnv* pRuntimeEnv) {
pRuntimeEnv->pQueryHandle = NULL; pRuntimeEnv->pQueryHandle = NULL;
SMemRef* pMemRef = &pQuery->memRef; SMemRef* pMemRef = &pQuery->memRef;
assert(pMemRef->ref == 0 && pMemRef->imem == NULL && pMemRef->mem == NULL); assert(pMemRef->ref == 0 && pMemRef->snapshot.imem == NULL && pMemRef->snapshot.mem == NULL);
} }
static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {

View File

@ -31,29 +31,14 @@ typedef struct {
SSkipListIterator *pIter; SSkipListIterator *pIter;
} SCommitIter; } SCommitIter;
typedef struct { struct STableData {
uint64_t uid; uint64_t uid;
TSKEY keyFirst; TSKEY keyFirst;
TSKEY keyLast; TSKEY keyLast;
int64_t numOfRows; int64_t numOfRows;
SSkipList* pData; SSkipList* pData;
T_REF_DECLARE() T_REF_DECLARE()
} STableData; };
typedef struct {
T_REF_DECLARE()
SRWLatch latch;
TSKEY keyFirst;
TSKEY keyLast;
int64_t numOfRows;
int32_t maxTables;
STableData** tData;
SList* actList;
SList* extraBuffList;
SList* bufBlockList;
int64_t pointsAdd; // TODO
int64_t storageAdd; // TODO
} SMemTable;
enum { TSDB_UPDATE_META, TSDB_DROP_META }; enum { TSDB_UPDATE_META, TSDB_DROP_META };
@ -77,8 +62,8 @@ typedef struct {
int tsdbRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable); int tsdbRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable);
int tsdbUnRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable); int tsdbUnRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable);
int tsdbTakeMemSnapshot(STsdbRepo* pRepo, SMemTable** pMem, SMemTable** pIMem, SArray* pATable); int tsdbTakeMemSnapshot(STsdbRepo* pRepo, SMemSnapshot* pSnapshot, SArray* pATable);
void tsdbUnTakeMemSnapShot(STsdbRepo* pRepo, SMemTable* pMem, SMemTable* pIMem); void tsdbUnTakeMemSnapShot(STsdbRepo* pRepo, SMemSnapshot* pSnapshot);
void* tsdbAllocBytes(STsdbRepo* pRepo, int bytes); void* tsdbAllocBytes(STsdbRepo* pRepo, int bytes);
int tsdbAsyncCommit(STsdbRepo* pRepo); int tsdbAsyncCommit(STsdbRepo* pRepo);
int tsdbLoadDataFromCache(STable* pTable, SSkipListIterator* pIter, TSKEY maxKey, int maxRowsToRead, SDataCols* pCols, int tsdbLoadDataFromCache(STable* pTable, SSkipListIterator* pIter, TSKEY maxKey, int maxRowsToRead, SDataCols* pCols,

View File

@ -124,88 +124,80 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) {
return 0; return 0;
} }
int tsdbTakeMemSnapshot(STsdbRepo *pRepo, SMemTable **pMem, SMemTable **pIMem, SArray *pATable) { int tsdbTakeMemSnapshot(STsdbRepo *pRepo, SMemSnapshot *pSnapshot, SArray *pATable) {
SMemTable *tmem; memset(pSnapshot, 0, sizeof(*pSnapshot));
// Get snap object
if (tsdbLockRepo(pRepo) < 0) return -1; if (tsdbLockRepo(pRepo) < 0) return -1;
tmem = pRepo->mem; pSnapshot->omem = pRepo->mem;
*pIMem = pRepo->imem; pSnapshot->imem = pRepo->imem;
tsdbRefMemTable(pRepo, tmem); tsdbRefMemTable(pRepo, pRepo->mem);
tsdbRefMemTable(pRepo, *pIMem); tsdbRefMemTable(pRepo, pRepo->imem);
if (tsdbUnlockRepo(pRepo) < 0) return -1; if (tsdbUnlockRepo(pRepo) < 0) return -1;
// Copy mem objects and ref needed STableData if (pSnapshot->omem) {
if (tmem) { taosRLockLatch(&(pSnapshot->omem->latch));
taosRLockLatch(&(tmem->latch));
*pMem = (SMemTable *)calloc(1, sizeof(**pMem)); pSnapshot->mem = &(pSnapshot->mtable);
if (*pMem == NULL) {
pSnapshot->mem->tData = (STableData **)calloc(pSnapshot->omem->maxTables, sizeof(STableData *));
if (pSnapshot->mem->tData == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
taosRUnLockLatch(&(tmem->latch)); taosRUnLockLatch(&(pSnapshot->omem->latch));
tsdbUnRefMemTable(pRepo, tmem); tsdbUnRefMemTable(pRepo, pSnapshot->omem);
tsdbUnRefMemTable(pRepo, *pIMem); tsdbUnRefMemTable(pRepo, pSnapshot->imem);
*pMem = NULL; pSnapshot->mem = NULL;
*pIMem = NULL; pSnapshot->imem = NULL;
pSnapshot->omem = NULL;
return -1; return -1;
} }
(*pMem)->tData = (STableData **)calloc(tmem->maxTables, sizeof(STableData *)); pSnapshot->mem->keyFirst = pSnapshot->omem->keyFirst;
if ((*pMem)->tData == NULL) { pSnapshot->mem->keyLast = pSnapshot->omem->keyLast;
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; pSnapshot->mem->numOfRows = pSnapshot->omem->numOfRows;
taosRUnLockLatch(&(tmem->latch)); pSnapshot->mem->maxTables = pSnapshot->omem->maxTables;
free(*pMem);
tsdbUnRefMemTable(pRepo, tmem);
tsdbUnRefMemTable(pRepo, *pIMem);
*pMem = NULL;
*pIMem = NULL;
return -1;
}
(*pMem)->keyFirst = tmem->keyFirst;
(*pMem)->keyLast = tmem->keyLast;
(*pMem)->numOfRows = tmem->numOfRows;
(*pMem)->maxTables = tmem->maxTables;
for (size_t i = 0; i < taosArrayGetSize(pATable); i++) { for (size_t i = 0; i < taosArrayGetSize(pATable); i++) {
STable * pTable = *(STable **)taosArrayGet(pATable, i); STable * pTable = *(STable **)taosArrayGet(pATable, i);
int32_t tid = TABLE_TID(pTable); int32_t tid = TABLE_TID(pTable);
STableData *pTableData = (tid < tmem->maxTables) ? tmem->tData[tid] : NULL; STableData *pTableData = (tid < pSnapshot->omem->maxTables) ? pSnapshot->omem->tData[tid] : NULL;
if ((pTableData == NULL) || (TABLE_UID(pTable) != pTableData->uid)) continue; if ((pTableData == NULL) || (TABLE_UID(pTable) != pTableData->uid)) continue;
(*pMem)->tData[tid] = tmem->tData[tid]; pSnapshot->mem->tData[tid] = pTableData;
T_REF_INC(tmem->tData[tid]); T_REF_INC(pTableData);
} }
taosRUnLockLatch(&(tmem->latch)); taosRUnLockLatch(&(pSnapshot->omem->latch));
} }
tsdbUnRefMemTable(pRepo, tmem); tsdbDebug("vgId:%d take memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), pSnapshot->omem, pSnapshot->imem);
tsdbDebug("vgId:%d take memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), *pMem, *pIMem);
return 0; return 0;
} }
void tsdbUnTakeMemSnapShot(STsdbRepo *pRepo, SMemTable *pMem, SMemTable *pIMem) { void tsdbUnTakeMemSnapShot(STsdbRepo *pRepo, SMemSnapshot *pSnapshot) {
tsdbDebug("vgId:%d untake memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), pMem, pIMem); tsdbDebug("vgId:%d untake memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), pSnapshot->omem, pSnapshot->imem);
if (pMem != NULL) { if (pSnapshot->mem) {
for (size_t i = 0; i < pMem->maxTables; i++) { ASSERT(pSnapshot->omem != NULL);
STableData *pTableData = pMem->tData[i];
for (size_t i = 0; i < pSnapshot->mem->maxTables; i++) {
STableData *pTableData = pSnapshot->mem->tData[i];
if (pTableData) { if (pTableData) {
tsdbFreeTableData(pTableData); tsdbFreeTableData(pTableData);
} }
} }
free(pMem->tData); tfree(pSnapshot->mem->tData);
free(pMem);
tsdbUnRefMemTable(pRepo, pSnapshot->omem);
} }
if (pIMem != NULL) { tsdbUnRefMemTable(pRepo, pSnapshot->imem);
tsdbUnRefMemTable(pRepo, pIMem);
} pSnapshot->mem = NULL;
pSnapshot->imem = NULL;
pSnapshot->omem = NULL;
} }
void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) { void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) {

View File

@ -194,7 +194,7 @@ static void tsdbMayTakeMemSnapshot(STsdbQueryHandle* pQueryHandle, SArray* psTab
SMemRef* pMemRef = pQueryHandle->pMemRef; SMemRef* pMemRef = pQueryHandle->pMemRef;
if (pQueryHandle->pMemRef->ref++ == 0) { if (pQueryHandle->pMemRef->ref++ == 0) {
tsdbTakeMemSnapshot(pQueryHandle->pTsdb, (SMemTable**)&(pMemRef->mem), (SMemTable**)&(pMemRef->imem), psTable); tsdbTakeMemSnapshot(pQueryHandle->pTsdb, &(pMemRef->snapshot), psTable);
} }
taosArrayDestroy(psTable); taosArrayDestroy(psTable);
@ -208,9 +208,7 @@ static void tsdbMayUnTakeMemSnapshot(STsdbQueryHandle* pQueryHandle) {
} }
if (--pMemRef->ref == 0) { if (--pMemRef->ref == 0) {
tsdbUnTakeMemSnapShot(pQueryHandle->pTsdb, pMemRef->mem, pMemRef->imem); tsdbUnTakeMemSnapShot(pQueryHandle->pTsdb, &(pMemRef->snapshot));
pMemRef->mem = NULL;
pMemRef->imem = NULL;
} }
pQueryHandle->pMemRef = NULL; pQueryHandle->pMemRef = NULL;
@ -231,8 +229,8 @@ int64_t tsdbGetNumOfRowsInMemTable(TsdbQueryHandleT* pHandle) {
STableData* pMem = NULL; STableData* pMem = NULL;
STableData* pIMem = NULL; STableData* pIMem = NULL;
SMemTable *pMemT = (SMemTable *)(pMemRef->mem); SMemTable* pMemT = pMemRef->snapshot.mem;
SMemTable *pIMemT = (SMemTable *)(pMemRef->imem); SMemTable* pIMemT = pMemRef->snapshot.imem;
if (pMemT && pCheckInfo->tableId.tid < pMemT->maxTables) { if (pMemT && pCheckInfo->tableId.tid < pMemT->maxTables) {
pMem = pMemT->tData[pCheckInfo->tableId.tid]; pMem = pMemT->tData[pCheckInfo->tableId.tid];
@ -605,7 +603,7 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
int32_t order = pHandle->order; int32_t order = pHandle->order;
// no data in buffer, abort // no data in buffer, abort
if (pHandle->pMemRef->mem == NULL && pHandle->pMemRef->imem == NULL) { if (pHandle->pMemRef->snapshot.mem == NULL && pHandle->pMemRef->snapshot.imem == NULL) {
return false; return false;
} }
@ -614,8 +612,8 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
STableData* pMem = NULL; STableData* pMem = NULL;
STableData* pIMem = NULL; STableData* pIMem = NULL;
SMemTable* pMemT = pHandle->pMemRef->mem; SMemTable* pMemT = pHandle->pMemRef->snapshot.mem;
SMemTable* pIMemT = pHandle->pMemRef->imem; SMemTable* pIMemT = pHandle->pMemRef->snapshot.imem;
if (pMemT && pCheckInfo->tableId.tid < pMemT->maxTables) { if (pMemT && pCheckInfo->tableId.tid < pMemT->maxTables) {
pMem = pMemT->tData[pCheckInfo->tableId.tid]; pMem = pMemT->tData[pCheckInfo->tableId.tid];

View File

@ -677,13 +677,13 @@ static int32_t tsdbRecvDFileSetInfo(SSyncH *pSynch) {
static int tsdbReload(STsdbRepo *pRepo, bool isMfChanged) { static int tsdbReload(STsdbRepo *pRepo, bool isMfChanged) {
// TODO: may need to stop and restart stream // TODO: may need to stop and restart stream
if (isMfChanged) { // if (isMfChanged) {
tsdbCloseMeta(pRepo); tsdbCloseMeta(pRepo);
tsdbFreeMeta(pRepo->tsdbMeta); tsdbFreeMeta(pRepo->tsdbMeta);
pRepo->tsdbMeta = tsdbNewMeta(REPO_CFG(pRepo)); pRepo->tsdbMeta = tsdbNewMeta(REPO_CFG(pRepo));
tsdbOpenMeta(pRepo); tsdbOpenMeta(pRepo);
tsdbLoadMetaCache(pRepo, true); tsdbLoadMetaCache(pRepo, true);
} // }
tsdbUnRefMemTable(pRepo, pRepo->mem); tsdbUnRefMemTable(pRepo, pRepo->mem);
tsdbUnRefMemTable(pRepo, pRepo->imem); tsdbUnRefMemTable(pRepo, pRepo->imem);

View File

@ -147,7 +147,7 @@ static int32_t vnodeAlterImp(SVnodeObj *pVnode, SCreateVnodeMsg *pVnodeCfg) {
vDebug("vgId:%d, tsdbchanged:%d syncchanged:%d while alter vnode", pVnode->vgId, tsdbCfgChanged, syncCfgChanged); vDebug("vgId:%d, tsdbchanged:%d syncchanged:%d while alter vnode", pVnode->vgId, tsdbCfgChanged, syncCfgChanged);
if (tsdbCfgChanged || syncCfgChanged) { if (/*tsdbCfgChanged || */syncCfgChanged) {
// vnode in non-ready state and still needs to return success instead of TSDB_CODE_VND_INVALID_STATUS // vnode in non-ready state and still needs to return success instead of TSDB_CODE_VND_INVALID_STATUS
// dbCfgVersion can be corrected by status msg // dbCfgVersion can be corrected by status msg
if (!vnodeSetUpdatingStatus(pVnode)) { if (!vnodeSetUpdatingStatus(pVnode)) {

View File

@ -18,13 +18,15 @@ import argparse
class BuildDockerCluser: class BuildDockerCluser:
def __init__(self, hostName, user, password, configDir, numOfNodes, clusterVersion): def __init__(self, hostName, user, password, configDir, numOfNodes, clusterVersion, dockerDir, removeFlag):
self.hostName = hostName self.hostName = hostName
self.user = user self.user = user
self.password = password self.password = password
self.configDir = configDir self.configDir = configDir
self.numOfNodes = numOfNodes self.numOfNodes = numOfNodes
self.clusterVersion = clusterVersion self.clusterVersion = clusterVersion
self.dockerDir = dockerDir
self.removeFlag = removeFlag
def getConnection(self): def getConnection(self):
self.conn = taos.connect( self.conn = taos.connect(
@ -46,7 +48,10 @@ class BuildDockerCluser:
if self.numOfNodes < 2 or self.numOfNodes > 5: if self.numOfNodes < 2 or self.numOfNodes > 5:
print("the number of nodes must be between 2 and 5") print("the number of nodes must be between 2 and 5")
exit(0) exit(0)
os.system("./buildClusterEnv.sh -n %d -v %s" % (self.numOfNodes, self.clusterVersion)) print("remove Flag value %s" % self.removeFlag)
if self.removeFlag == False:
os.system("./cleanClusterEnv.sh -d %s" % self.dockerDir)
os.system("./buildClusterEnv.sh -n %d -v %s -d %s" % (self.numOfNodes, self.clusterVersion, self.dockerDir))
self.getConnection() self.getConnection()
self.createDondes() self.createDondes()
self.startArbitrator() self.startArbitrator()
@ -91,10 +96,24 @@ parser.add_argument(
'-v', '-v',
'--version', '--version',
action='store', action='store',
default='2.0.14.1', default='2.0.17.1',
type=str, type=str,
help='the version of the cluster to be build, Default is 2.0.14.1') help='the version of the cluster to be build, Default is 2.0.17.1')
parser.add_argument(
'-d',
'--docker-dir',
action='store',
default='/data',
type=str,
help='the data dir for docker, default is /data')
parser.add_argument(
'--flag',
action='store_true',
help='remove docker containers flag, default: True')
args = parser.parse_args() args = parser.parse_args()
cluster = BuildDockerCluser(args.host, args.user, args.password, args.config_dir, args.num_of_nodes, args.version) cluster = BuildDockerCluser(args.host, args.user, args.password, args.config_dir, args.num_of_nodes, args.version, args.docker_dir, args.flag)
cluster.run() cluster.run()
# usage 1: python3 basic.py -n 2 --flag (flag is True)
# usage 2: python3 basic.py -n 2 (flag should be False when it is not specified)

View File

@ -1,18 +1,19 @@
#!/bin/bash #!/bin/bash
echo "Executing buildClusterEnv.sh" echo "Executing buildClusterEnv.sh"
DOCKER_DIR=/data
CURR_DIR=`pwd` CURR_DIR=`pwd`
if [ $# != 4 ]; then if [ $# != 6 ]; then
echo "argument list need input : " echo "argument list need input : "
echo " -n numOfNodes" echo " -n numOfNodes"
echo " -v version" echo " -v version"
echo " -d docker dir"
exit 1 exit 1
fi fi
NUM_OF_NODES= NUM_OF_NODES=
VERSION= VERSION=
while getopts "n:v:" arg DOCKER_DIR=
while getopts "n:v:d:" arg
do do
case $arg in case $arg in
n) n)
@ -21,6 +22,9 @@ do
v) v)
VERSION=$OPTARG VERSION=$OPTARG
;; ;;
d)
DOCKER_DIR=$OPTARG
;;
?) ?)
echo "unkonwn argument" echo "unkonwn argument"
;; ;;
@ -31,29 +35,28 @@ function addTaoscfg {
for i in {1..5} for i in {1..5}
do do
touch /data/node$i/cfg/taos.cfg touch /data/node$i/cfg/taos.cfg
echo 'firstEp tdnode1:6030' > /data/node$i/cfg/taos.cfg echo 'firstEp tdnode1:6030' > $DOCKER_DIR/node$i/cfg/taos.cfg
echo 'fqdn tdnode'$i >> /data/node$i/cfg/taos.cfg echo 'fqdn tdnode'$i >> $DOCKER_DIR/node$i/cfg/taos.cfg
echo 'arbitrator tdnode1:6042' >> /data/node$i/cfg/taos.cfg echo 'arbitrator tdnode1:6042' >> $DOCKER_DIR/node$i/cfg/taos.cfg
done done
} }
function createDIR { function createDIR {
for i in {1..5} for i in {1..5}
do do
mkdir -p /data/node$i/data mkdir -p $DOCKER_DIR/node$i/data
mkdir -p /data/node$i/log mkdir -p $DOCKER_DIR/node$i/log
mkdir -p /data/node$i/cfg mkdir -p $DOCKER_DIR/node$i/cfg
mkdir -p /data/node$i/core mkdir -p $DOCKER_DIR/node$i/core
done done
} }
function cleanEnv { function cleanEnv {
echo "Clean up docker environment"
for i in {1..5} for i in {1..5}
do do
echo /data/node$i/data/* rm -rf $DOCKER_DIR/node$i/data/*
rm -rf /data/node$i/data/* rm -rf $DOCKER_DIR/node$i/log/*
echo /data/node$i/log/*
rm -rf /data/node$i/log/*
done done
} }
@ -98,19 +101,19 @@ function clusterUp {
if [ $NUM_OF_NODES -eq 2 ]; then if [ $NUM_OF_NODES -eq 2 ]; then
echo "create 2 dnodes" echo "create 2 dnodes"
PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose up -d PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz TARBITRATORPKG=TDengine-arbitrator-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose up -d
fi fi
if [ $NUM_OF_NODES -eq 3 ]; then if [ $NUM_OF_NODES -eq 3 ]; then
PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node3.yml up -d PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz TARBITRATORPKG=TDengine-arbitrator-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node3.yml up -d
fi fi
if [ $NUM_OF_NODES -eq 4 ]; then if [ $NUM_OF_NODES -eq 4 ]; then
PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node3.yml -f node4.yml up -d PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz TARBITRATORPKG=TDengine-arbitrator-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node3.yml -f node4.yml up -d
fi fi
if [ $NUM_OF_NODES -eq 5 ]; then if [ $NUM_OF_NODES -eq 5 ]; then
PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node3.yml -f node4.yml -f node5.yml up -d PACKAGE=TDengine-server-$VERSION-Linux-x64.tar.gz TARBITRATORPKG=TDengine-arbitrator-$VERSION-Linux-x64.tar.gz DIR=TDengine-server-$VERSION DIR2=TDengine-arbitrator-$VERSION VERSION=$VERSION docker-compose -f docker-compose.yml -f node3.yml -f node4.yml -f node5.yml up -d
fi fi
echo "docker compose finish" echo "docker compose finish"

View File

@ -0,0 +1,39 @@
#!/bin/bash
echo "Executing cleanClusterEnv.sh"
CURR_DIR=`pwd`
if [ $# != 2 ]; then
echo "argument list need input : "
echo " -d docker dir"
exit 1
fi
DOCKER_DIR=
while getopts "d:" arg
do
case $arg in
d)
DOCKER_DIR=$OPTARG
;;
?)
echo "unkonwn argument"
;;
esac
done
function removeDockerContainers {
cd $DOCKER_DIR
docker-compose down --remove-orphans
}
function cleanEnv {
echo "Clean up docker environment"
for i in {1..5}
do
rm -rf $DOCKER_DIR/node$i/data/*
rm -rf $DOCKER_DIR/node$i/log/*
done
}
removeDockerContainers
cleanEnv

View File

@ -275,7 +275,7 @@ python3 ./test.py -f functions/function_twa.py -r 1
python3 ./test.py -f functions/function_twa_test2.py python3 ./test.py -f functions/function_twa_test2.py
python3 ./test.py -f functions/function_stddev_td2555.py python3 ./test.py -f functions/function_stddev_td2555.py
python3 ./test.py -f insert/metadataUpdate.py python3 ./test.py -f insert/metadataUpdate.py
python3 ./test.py -f query/last_cache.py #python3 ./test.py -f query/last_cache.py
python3 ./test.py -f query/last_row_cache.py python3 ./test.py -f query/last_row_cache.py
python3 ./test.py -f account/account_create.py python3 ./test.py -f account/account_create.py
python3 ./test.py -f alter/alter_table.py python3 ./test.py -f alter/alter_table.py

View File

@ -118,7 +118,11 @@ class TDTestCase:
if i == 1 or i == 5 or i == 6 or i == 7 or i == 9 or i == 8 :continue if i == 1 or i == 5 or i == 6 or i == 7 or i == 9 or i == 8 :continue
tdSql.query('select stddev(c%d),stddev(c%d) from s group by c%d' %( i+1 , i+1 , i+1 ) ) tdSql.query('select stddev(c%d),stddev(c%d) from s group by c%d' %( i+1 , i+1 , i+1 ) )
#add for TD-3318
tdSql.execute('create table t1(ts timestamp, k int, b binary(12));')
tdSql.execute("insert into t1 values(now, 1, 'abc');")
tdLog.info("select stddev(k) from t1 where b <> 'abc' interval(1s);")
tdSql.query("select stddev(k) from t1 where b <> 'abc' interval(1s);")
def stop(self): def stop(self):

View File

@ -316,4 +316,13 @@ if $data13 != @20-02-02 01:01:01.000@ then
return -1 return -1
endi endi
print ===============================>td-3361
sql create table ttm1(ts timestamp, k int) tags(a nchar(12));
sql create table ttm1_t1 using ttm1 tags('abcdef')
sql insert into ttm1_t1 values(now, 1)
sql select * from ttm1 where a=123456789012
if $row != 0 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -29,7 +29,7 @@ function dohavecore(){
proc=`echo $corefile|cut -d "_" -f3` proc=`echo $corefile|cut -d "_" -f3`
if [ -n "$corefile" ];then if [ -n "$corefile" ];then
echo 'taosd or taos has generated core' echo 'taosd or taos has generated core'
tar -zcvPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz /usr/local/taos/ tar -zcPf $corepath'taos_'`date "+%Y_%m_%d_%H_%M_%S"`.tar.gz /usr/local/taos/
if [[ $1 == 1 ]];then if [[ $1 == 1 ]];then
echo '\n'|gdb /usr/local/taos/bin/$proc $core_file -ex "bt 10" -ex quit echo '\n'|gdb /usr/local/taos/bin/$proc $core_file -ex "bt 10" -ex quit
exit 8 exit 8