Merge branch 'develop' into feature/TD-5331
This commit is contained in:
commit
0f3c6dd768
|
@ -126,7 +126,7 @@ taos> source <filename>;
|
|||
$ taosdemo
|
||||
```
|
||||
|
||||
该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "t0" 到 "t9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupdId,groupdId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。
|
||||
该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupdId,groupdId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。
|
||||
|
||||
执行这条命令大概需要几分钟,最后共插入 1 亿条记录。
|
||||
|
||||
|
@ -156,10 +156,10 @@ taos> select count(*) from test.meters where location="beijing";
|
|||
taos> select avg(current), max(voltage), min(phase) from test.meters where groupdId=10;
|
||||
```
|
||||
|
||||
- 对表 t10 按 10s 进行平均值、最大值和最小值聚合统计:
|
||||
- 对表 d10 按 10s 进行平均值、最大值和最小值聚合统计:
|
||||
|
||||
```mysql
|
||||
taos> select avg(current), max(voltage), min(phase) from test.t10 interval(10s);
|
||||
taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
|
||||
```
|
||||
|
||||
**Note:** taosdemo 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help` 详细列出。您可以设置不同参数进行体验。
|
||||
|
|
|
@ -108,6 +108,7 @@ int32_t converToStr(char *str, int type, void *buf, int32_t bufSize, int32_t *le
|
|||
|
||||
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks);
|
||||
void tscDestroyDataBlock(STableDataBlocks* pDataBlock, bool removeMeta);
|
||||
void tscSortRemoveDataBlockDupRowsRaw(STableDataBlocks* dataBuf);
|
||||
int tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf, SBlockKeyInfo* pBlkKeyInfo);
|
||||
int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows);
|
||||
|
||||
|
|
|
@ -160,6 +160,7 @@ typedef struct SInsertStatementParam {
|
|||
SHashObj *pTableBlockHashList; // data block for each table
|
||||
SArray *pDataBlocks; // SArray<STableDataBlocks*>. Merged submit block for each vgroup
|
||||
int8_t schemaAttached; // denote if submit block is built with table schema or not
|
||||
uint8_t payloadType; // EPayloadType. 0: K-V payload for non-prepare insert, 1: rawPayload for prepare insert
|
||||
STagData tagData; // NOTE: pTagData->data is used as a variant length array
|
||||
|
||||
int32_t batchSize; // for parameter ('?') binding and batch processing
|
||||
|
@ -171,6 +172,14 @@ typedef struct SInsertStatementParam {
|
|||
char *sql; // current sql statement position
|
||||
} SInsertStatementParam;
|
||||
|
||||
typedef enum {
|
||||
PAYLOAD_TYPE_KV = 0,
|
||||
PAYLOAD_TYPE_RAW = 1,
|
||||
} EPayloadType;
|
||||
|
||||
#define IS_RAW_PAYLOAD(t) \
|
||||
(((int)(t)) == PAYLOAD_TYPE_RAW) // 0: K-V payload for non-prepare insert, 1: rawPayload for prepare insert
|
||||
|
||||
// TODO extract sql parser supporter
|
||||
typedef struct {
|
||||
int command;
|
||||
|
|
|
@ -425,7 +425,7 @@ static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *pay
|
|||
case TSDB_DATA_TYPE_BOOL: { // bool
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
tdGetNullVal(TSDB_DATA_TYPE_BOOL), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset);
|
||||
getNullValue(TSDB_DATA_TYPE_BOOL), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset);
|
||||
} else {
|
||||
if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) {
|
||||
if (strncmp(pToken->z, "true", pToken->n) == 0) {
|
||||
|
@ -459,7 +459,7 @@ static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *pay
|
|||
case TSDB_DATA_TYPE_TINYINT:
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
tdGetNullVal(TSDB_DATA_TYPE_TINYINT), TYPE_BYTES[TSDB_DATA_TYPE_TINYINT], tOffset);
|
||||
getNullValue(TSDB_DATA_TYPE_TINYINT), TYPE_BYTES[TSDB_DATA_TYPE_TINYINT], tOffset);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
|
@ -479,7 +479,7 @@ static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *pay
|
|||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
tdGetNullVal(TSDB_DATA_TYPE_UTINYINT), TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT], tOffset);
|
||||
getNullValue(TSDB_DATA_TYPE_UTINYINT), TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT], tOffset);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
|
@ -499,7 +499,7 @@ static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *pay
|
|||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
tdGetNullVal(TSDB_DATA_TYPE_SMALLINT), TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT], tOffset);
|
||||
getNullValue(TSDB_DATA_TYPE_SMALLINT), TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT], tOffset);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
|
@ -520,7 +520,7 @@ static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *pay
|
|||
if (isNullStr(pToken)) {
|
||||
*sizeAppend =
|
||||
tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
tdGetNullVal(TSDB_DATA_TYPE_USMALLINT), TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT], tOffset);
|
||||
getNullValue(TSDB_DATA_TYPE_USMALLINT), TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT], tOffset);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
|
@ -540,7 +540,7 @@ static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *pay
|
|||
case TSDB_DATA_TYPE_INT:
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
tdGetNullVal(TSDB_DATA_TYPE_INT), TYPE_BYTES[TSDB_DATA_TYPE_INT], tOffset);
|
||||
getNullValue(TSDB_DATA_TYPE_INT), TYPE_BYTES[TSDB_DATA_TYPE_INT], tOffset);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
|
@ -560,7 +560,7 @@ static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *pay
|
|||
case TSDB_DATA_TYPE_UINT:
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
tdGetNullVal(TSDB_DATA_TYPE_UINT), TYPE_BYTES[TSDB_DATA_TYPE_UINT], tOffset);
|
||||
getNullValue(TSDB_DATA_TYPE_UINT), TYPE_BYTES[TSDB_DATA_TYPE_UINT], tOffset);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
|
@ -580,7 +580,7 @@ static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *pay
|
|||
case TSDB_DATA_TYPE_BIGINT:
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
tdGetNullVal(TSDB_DATA_TYPE_BIGINT), TYPE_BYTES[TSDB_DATA_TYPE_BIGINT], tOffset);
|
||||
getNullValue(TSDB_DATA_TYPE_BIGINT), TYPE_BYTES[TSDB_DATA_TYPE_BIGINT], tOffset);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
|
@ -598,7 +598,7 @@ static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *pay
|
|||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
tdGetNullVal(TSDB_DATA_TYPE_UBIGINT), TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT], tOffset);
|
||||
getNullValue(TSDB_DATA_TYPE_UBIGINT), TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT], tOffset);
|
||||
} else {
|
||||
ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
|
@ -639,7 +639,7 @@ static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *pay
|
|||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
if (isNullStr(pToken)) {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
tdGetNullVal(TSDB_DATA_TYPE_DOUBLE), TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE], tOffset);
|
||||
getNullValue(TSDB_DATA_TYPE_DOUBLE), TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE], tOffset);
|
||||
} else {
|
||||
double dv;
|
||||
if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) {
|
||||
|
@ -661,7 +661,7 @@ static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *pay
|
|||
if (pToken->type == TK_NULL) {
|
||||
payloadColSetId(payload, pSchema->colId);
|
||||
payloadColSetType(payload, pSchema->type);
|
||||
memcpy(POINTER_SHIFT(payloadStart, tOffset), tdGetNullVal(TSDB_DATA_TYPE_BINARY), VARSTR_HEADER_SIZE + CHAR_BYTES);
|
||||
memcpy(POINTER_SHIFT(payloadStart, tOffset), getNullValue(TSDB_DATA_TYPE_BINARY), VARSTR_HEADER_SIZE + CHAR_BYTES);
|
||||
*sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + CHAR_BYTES);
|
||||
} else { // too long values will return invalid sql, not be truncated automatically
|
||||
if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor
|
||||
|
@ -684,7 +684,7 @@ static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *pay
|
|||
if (pToken->type == TK_NULL) {
|
||||
payloadColSetId(payload, pSchema->colId);
|
||||
payloadColSetType(payload, pSchema->type);
|
||||
memcpy(POINTER_SHIFT(payloadStart,tOffset), tdGetNullVal(TSDB_DATA_TYPE_NCHAR), VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE);
|
||||
memcpy(POINTER_SHIFT(payloadStart,tOffset), getNullValue(TSDB_DATA_TYPE_NCHAR), VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE);
|
||||
*sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE);
|
||||
} else {
|
||||
// if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long'
|
||||
|
@ -716,7 +716,7 @@ static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *pay
|
|||
*kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP]);
|
||||
} else {
|
||||
*sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type,
|
||||
tdGetNullVal(TSDB_DATA_TYPE_TIMESTAMP),
|
||||
getNullValue(TSDB_DATA_TYPE_TIMESTAMP),
|
||||
TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset);
|
||||
}
|
||||
} else {
|
||||
|
@ -1069,9 +1069,8 @@ int32_t FORCE_INLINE tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTabl
|
|||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
// data block is disordered, sort it in ascending order
|
||||
static void tscSortRemoveDataBlockDupRowsOld(STableDataBlocks *dataBuf) {
|
||||
void tscSortRemoveDataBlockDupRowsRaw(STableDataBlocks *dataBuf) {
|
||||
SSubmitBlk *pBlocks = (SSubmitBlk *)dataBuf->pData;
|
||||
|
||||
// size is less than the total size, since duplicated rows may be removed yet.
|
||||
|
@ -1114,7 +1113,6 @@ static void tscSortRemoveDataBlockDupRowsOld(STableDataBlocks *dataBuf) {
|
|||
|
||||
dataBuf->prevTS = INT64_MIN;
|
||||
}
|
||||
#endif
|
||||
|
||||
// data block is disordered, sort it in ascending order
|
||||
int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlkKeyInfo) {
|
||||
|
|
|
@ -293,7 +293,6 @@ static char* normalStmtBuildSql(STscStmt* stmt) {
|
|||
return taosStringBuilderGetResult(&sb, NULL);
|
||||
}
|
||||
|
||||
#if 0
|
||||
static int fillColumnsNull(STableDataBlocks* pBlock, int32_t rowNum) {
|
||||
SParsedDataColInfo* spd = &pBlock->boundColumnInfo;
|
||||
int32_t offset = 0;
|
||||
|
@ -321,129 +320,8 @@ static int fillColumnsNull(STableDataBlocks* pBlock, int32_t rowNum) {
|
|||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* input:
|
||||
* - schema:
|
||||
* - payload:
|
||||
* - spd:
|
||||
* output:
|
||||
* - pBlock with data block replaced by K-V format
|
||||
*/
|
||||
static int refactorPayload(STableDataBlocks* pBlock, int32_t rowNum) {
|
||||
SParsedDataColInfo* spd = &pBlock->boundColumnInfo;
|
||||
SSchema* schema = (SSchema*)pBlock->pTableMeta->schema;
|
||||
SMemRowHelper* pHelper = &pBlock->rowHelper;
|
||||
STableMeta* pTableMeta = pBlock->pTableMeta;
|
||||
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
|
||||
int code = TSDB_CODE_SUCCESS;
|
||||
int32_t extendedRowSize = getExtendedRowSize(&tinfo);
|
||||
TDRowTLenT destPayloadSize = sizeof(SSubmitBlk);
|
||||
|
||||
ASSERT(pHelper->allNullLen >= 8);
|
||||
|
||||
TDRowTLenT destAllocSize = sizeof(SSubmitBlk) + rowNum * extendedRowSize;
|
||||
SSubmitBlk* pDestBlock = tcalloc(destAllocSize, 1);
|
||||
if (pDestBlock == NULL) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
memcpy(pDestBlock, pBlock->pData, sizeof(SSubmitBlk));
|
||||
char* destPayload = (char*)pDestBlock + sizeof(SSubmitBlk);
|
||||
|
||||
char* srcPayload = (char*)pBlock->pData + sizeof(SSubmitBlk);
|
||||
|
||||
for (int n = 0; n < rowNum; ++n) {
|
||||
payloadSetNCols(destPayload, spd->numOfBound);
|
||||
|
||||
TDRowTLenT dataRowLen = pHelper->allNullLen;
|
||||
TDRowTLenT kvRowLen = TD_MEM_ROW_KV_VER_SIZE + sizeof(SColIdx) * spd->numOfBound;
|
||||
TDRowTLenT payloadValOffset = payloadValuesOffset(destPayload); // rely on payloadNCols
|
||||
TDRowLenT colValOffset = 0;
|
||||
|
||||
char* kvPrimaryKeyStart = destPayload + PAYLOAD_HEADER_LEN; // primaryKey in 1st column tuple
|
||||
char* kvStart = kvPrimaryKeyStart + PAYLOAD_COL_HEAD_LEN; // the column tuple behind the primaryKey
|
||||
|
||||
for (int32_t i = 0; i < spd->numOfBound; ++i) {
|
||||
int32_t colIndex = spd->boundedColumns[i];
|
||||
ASSERT(spd->cols[colIndex].hasVal);
|
||||
char* start = srcPayload + spd->cols[colIndex].offset;
|
||||
SSchema* pSchema = &schema[colIndex]; // get colId here
|
||||
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
|
||||
|
||||
// the primary key locates in 1st column
|
||||
if (!IS_DATA_COL_ORDERED(spd->orderStatus)) {
|
||||
ASSERT(spd->colIdxInfo != NULL);
|
||||
if (!isPrimaryKey) {
|
||||
kvStart = POINTER_SHIFT(kvPrimaryKeyStart, spd->colIdxInfo[i].finalIdx * PAYLOAD_COL_HEAD_LEN);
|
||||
} else {
|
||||
ASSERT(spd->colIdxInfo[i].finalIdx == 0);
|
||||
}
|
||||
}
|
||||
if (isPrimaryKey) {
|
||||
payloadColSetId(kvPrimaryKeyStart, pSchema->colId);
|
||||
payloadColSetType(kvPrimaryKeyStart, pSchema->type);
|
||||
payloadColSetOffset(kvPrimaryKeyStart, colValOffset);
|
||||
memcpy(POINTER_SHIFT(destPayload, payloadValOffset + colValOffset), start, TYPE_BYTES[pSchema->type]);
|
||||
colValOffset += TYPE_BYTES[pSchema->type];
|
||||
kvRowLen += TYPE_BYTES[pSchema->type];
|
||||
} else {
|
||||
payloadColSetId(kvStart, pSchema->colId);
|
||||
payloadColSetType(kvStart, pSchema->type);
|
||||
payloadColSetOffset(kvStart, colValOffset);
|
||||
if (IS_VAR_DATA_TYPE(pSchema->type)) {
|
||||
varDataCopy(POINTER_SHIFT(destPayload, payloadValOffset + colValOffset), start);
|
||||
colValOffset += varDataTLen(start);
|
||||
kvRowLen += varDataTLen(start);
|
||||
if (pSchema->type == TSDB_DATA_TYPE_BINARY) {
|
||||
dataRowLen += (varDataLen(start) - CHAR_BYTES);
|
||||
} else if (pSchema->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
dataRowLen += (varDataLen(start) - TSDB_NCHAR_SIZE);
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
} else {
|
||||
memcpy(POINTER_SHIFT(destPayload, payloadValOffset + colValOffset), start, TYPE_BYTES[pSchema->type]);
|
||||
colValOffset += TYPE_BYTES[pSchema->type];
|
||||
kvRowLen += TYPE_BYTES[pSchema->type];
|
||||
}
|
||||
|
||||
if (IS_DATA_COL_ORDERED(spd->orderStatus)) {
|
||||
kvStart += PAYLOAD_COL_HEAD_LEN; // move to next column
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} // end of column
|
||||
|
||||
if (kvRowLen < dataRowLen) {
|
||||
payloadSetType(destPayload, SMEM_ROW_KV);
|
||||
} else {
|
||||
payloadSetType(destPayload, SMEM_ROW_DATA);
|
||||
}
|
||||
|
||||
ASSERT(colValOffset <= TSDB_MAX_BYTES_PER_ROW);
|
||||
|
||||
TDRowTLenT len = payloadValOffset + colValOffset;
|
||||
payloadSetTLen(destPayload, len);
|
||||
|
||||
// next loop
|
||||
srcPayload += pBlock->rowSize;
|
||||
destPayload += len;
|
||||
|
||||
destPayloadSize += len;
|
||||
} // end of row
|
||||
|
||||
ASSERT(destPayloadSize <= destAllocSize);
|
||||
|
||||
tfree(pBlock->pData);
|
||||
pBlock->pData = (char*)pDestBlock;
|
||||
pBlock->nAllocSize = destAllocSize;
|
||||
pBlock->size = destPayloadSize;
|
||||
|
||||
return code;
|
||||
}
|
||||
#if 0
|
||||
int32_t fillTablesColumnsNull(SSqlObj* pSql) {
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
||||
|
@ -466,90 +344,9 @@ int32_t fillTablesColumnsNull(SSqlObj* pSql) {
|
|||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* check and sort
|
||||
*/
|
||||
static int initPayloadEnv(STableDataBlocks* pBlock, int32_t rowNum) {
|
||||
SParsedDataColInfo* spd = &pBlock->boundColumnInfo;
|
||||
if (spd->orderStatus != ORDER_STATUS_UNKNOWN) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
bool isOrdered = true;
|
||||
int32_t lastColIdx = -1;
|
||||
for (int32_t i = 0; i < spd->numOfBound; ++i) {
|
||||
ASSERT(spd->cols[i].hasVal);
|
||||
int32_t colIdx = spd->boundedColumns[i];
|
||||
if (isOrdered) {
|
||||
if (lastColIdx > colIdx) {
|
||||
isOrdered = false;
|
||||
break;
|
||||
} else {
|
||||
lastColIdx = colIdx;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spd->orderStatus = isOrdered ? ORDER_STATUS_ORDERED : ORDER_STATUS_DISORDERED;
|
||||
|
||||
if (isOrdered) {
|
||||
spd->colIdxInfo = NULL;
|
||||
} else {
|
||||
spd->colIdxInfo = calloc(spd->numOfBound, sizeof(SBoundIdxInfo));
|
||||
if (spd->colIdxInfo == NULL) {
|
||||
return TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||
}
|
||||
SBoundIdxInfo* pColIdx = spd->colIdxInfo;
|
||||
for (uint16_t i = 0; i < spd->numOfBound; ++i) {
|
||||
pColIdx[i].schemaColIdx = (uint16_t)spd->boundedColumns[i];
|
||||
pColIdx[i].boundIdx = i;
|
||||
}
|
||||
qsort(pColIdx, spd->numOfBound, sizeof(SBoundIdxInfo), schemaIdxCompar);
|
||||
for (uint16_t i = 0; i < spd->numOfBound; ++i) {
|
||||
pColIdx[i].finalIdx = i;
|
||||
}
|
||||
qsort(pColIdx, spd->numOfBound, sizeof(SBoundIdxInfo), boundIdxCompar);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Refactor the raw payload structure to K-V format as the in tsParseOneRow()
|
||||
*/
|
||||
int32_t fillTablesPayload(SSqlObj* pSql) {
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
int code = TSDB_CODE_SUCCESS;
|
||||
|
||||
STableDataBlocks** p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, NULL);
|
||||
|
||||
STableDataBlocks* pOneTableBlock = *p;
|
||||
while (pOneTableBlock) {
|
||||
SSubmitBlk* pBlocks = (SSubmitBlk*)pOneTableBlock->pData;
|
||||
|
||||
if (pBlocks->numOfRows > 0) {
|
||||
initSMemRowHelper(&pOneTableBlock->rowHelper, tscGetTableSchema(pOneTableBlock->pTableMeta),
|
||||
tscGetNumOfColumns(pOneTableBlock->pTableMeta), 0);
|
||||
if ((code = initPayloadEnv(pOneTableBlock, pBlocks->numOfRows)) != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
if ((code = refactorPayload(pOneTableBlock, pBlocks->numOfRows)) != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
};
|
||||
}
|
||||
|
||||
p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, p);
|
||||
if (p == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
pOneTableBlock = *p;
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// functions for insertion statement preparation
|
||||
static FORCE_INLINE int doBindParam(STableDataBlocks* pBlock, char* data, SParamInfo* param, TAOS_BIND* bind, int32_t colNum) {
|
||||
|
@ -1362,12 +1159,9 @@ static int insertStmtExecute(STscStmt* stmt) {
|
|||
pBlk->uid = pTableMeta->id.uid;
|
||||
pBlk->tid = pTableMeta->id.tid;
|
||||
|
||||
int code = fillTablesPayload(stmt->pSql);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
fillTablesColumnsNull(stmt->pSql);
|
||||
|
||||
code = tscMergeTableDataBlocks(&stmt->pSql->cmd.insertParam, false);
|
||||
int code = tscMergeTableDataBlocks(&stmt->pSql->cmd.insertParam, false);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -1444,7 +1238,7 @@ static int insertBatchStmtExecute(STscStmt* pStmt) {
|
|||
return TSDB_CODE_TSC_APP_ERROR;
|
||||
}
|
||||
|
||||
fillTablesPayload(pStmt->pSql);
|
||||
fillTablesColumnsNull(pStmt->pSql);
|
||||
|
||||
if ((code = tscMergeTableDataBlocks(&pStmt->pSql->cmd.insertParam, false)) != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
|
@ -2131,6 +1925,7 @@ int taos_stmt_execute(TAOS_STMT* stmt) {
|
|||
|
||||
pStmt->last = STMT_EXECUTE;
|
||||
|
||||
pStmt->pSql->cmd.insertParam.payloadType = PAYLOAD_TYPE_RAW;
|
||||
if (pStmt->multiTbInsert) {
|
||||
ret = insertBatchStmtExecute(pStmt);
|
||||
} else {
|
||||
|
|
|
@ -6010,6 +6010,16 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
return invalidOperationMsg(pMsg, msg22);
|
||||
}
|
||||
|
||||
SSchema* pSchema = (SSchema*) pTableMetaInfo->pTableMeta->schema;
|
||||
int16_t numOfColumns = pTableMetaInfo->pTableMeta->tableInfo.numOfColumns;
|
||||
int16_t i;
|
||||
uint32_t nLen = 0;
|
||||
for (i = 0; i < numOfColumns; ++i) {
|
||||
nLen += pSchema[i].colId != columnIndex.columnIndex ? pSchema[i].bytes : pItem->bytes;
|
||||
}
|
||||
if (nLen >= TSDB_MAX_BYTES_PER_ROW) {
|
||||
return invalidOperationMsg(pMsg, msg24);
|
||||
}
|
||||
TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, pItem->bytes);
|
||||
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
}else if (pAlterSQL->type == TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN) {
|
||||
|
@ -6051,6 +6061,17 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
return invalidOperationMsg(pMsg, msg22);
|
||||
}
|
||||
|
||||
SSchema* pSchema = (SSchema*) pTableMetaInfo->pTableMeta->schema;
|
||||
int16_t numOfColumns = pTableMetaInfo->pTableMeta->tableInfo.numOfColumns;
|
||||
int16_t i;
|
||||
uint32_t nLen = 0;
|
||||
for (i = 0; i < numOfColumns; ++i) {
|
||||
nLen += pSchema[i].colId != columnIndex.columnIndex ? pSchema[i].bytes : pItem->bytes;
|
||||
}
|
||||
if (nLen >= TSDB_MAX_BYTES_PER_ROW) {
|
||||
return invalidOperationMsg(pMsg, msg24);
|
||||
}
|
||||
|
||||
TAOS_FIELD f = tscCreateField(pColSchema->type, name.z, pItem->bytes);
|
||||
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
}
|
||||
|
|
|
@ -1819,14 +1819,14 @@ static SMemRow tdGenMemRowFromBuilder(SMemRowBuilder* pBuilder) {
|
|||
p = payloadNextCol(p);
|
||||
++i;
|
||||
} else {
|
||||
tdAppendColVal(trow, tdGetNullVal(pSchema[j].type), pSchema[j].type, toffset);
|
||||
tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset);
|
||||
toffset += TYPE_BYTES[pSchema[j].type];
|
||||
++j;
|
||||
}
|
||||
}
|
||||
|
||||
while (j < nCols) {
|
||||
tdAppendColVal(trow, tdGetNullVal(pSchema[j].type), pSchema[j].type, toffset);
|
||||
tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset);
|
||||
toffset += TYPE_BYTES[pSchema[j].type];
|
||||
++j;
|
||||
}
|
||||
|
@ -1866,7 +1866,8 @@ static SMemRow tdGenMemRowFromBuilder(SMemRowBuilder* pBuilder) {
|
|||
}
|
||||
|
||||
// Erase the empty space reserved for binary data
|
||||
static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bool includeSchema, SBlockKeyTuple *blkKeyTuple) {
|
||||
static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SInsertStatementParam* insertParam,
|
||||
SBlockKeyTuple* blkKeyTuple) {
|
||||
// TODO: optimize this function, handle the case while binary is not presented
|
||||
STableMeta* pTableMeta = pTableDataBlock->pTableMeta;
|
||||
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
|
||||
|
@ -1879,7 +1880,7 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo
|
|||
int32_t flen = 0; // original total length of row
|
||||
|
||||
// schema needs to be included into the submit data block
|
||||
if (includeSchema) {
|
||||
if (insertParam->schemaAttached) {
|
||||
int32_t numOfCols = tscGetNumOfColumns(pTableDataBlock->pTableMeta);
|
||||
for(int32_t j = 0; j < numOfCols; ++j) {
|
||||
STColumn* pCol = (STColumn*) pDataBlock;
|
||||
|
@ -1906,18 +1907,38 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo
|
|||
pBlock->dataLen = 0;
|
||||
int32_t numOfRows = htons(pBlock->numOfRows);
|
||||
|
||||
SMemRowBuilder rowBuilder;
|
||||
rowBuilder.pSchema = pSchema;
|
||||
rowBuilder.sversion = pTableMeta->sversion;
|
||||
rowBuilder.flen = flen;
|
||||
rowBuilder.nCols = tinfo.numOfColumns;
|
||||
rowBuilder.pDataBlock = pDataBlock;
|
||||
rowBuilder.pSubmitBlk = pBlock;
|
||||
rowBuilder.buf = p;
|
||||
if (IS_RAW_PAYLOAD(insertParam->payloadType)) {
|
||||
for (int32_t i = 0; i < numOfRows; ++i) {
|
||||
SMemRow memRow = (SMemRow)pDataBlock;
|
||||
memRowSetType(memRow, SMEM_ROW_DATA);
|
||||
SDataRow trow = memRowDataBody(memRow);
|
||||
dataRowSetLen(trow, (uint16_t)(TD_DATA_ROW_HEAD_SIZE + flen));
|
||||
dataRowSetVersion(trow, pTableMeta->sversion);
|
||||
|
||||
for (int32_t i = 0; i < numOfRows; ++i) {
|
||||
rowBuilder.buf = (blkKeyTuple + i)->payloadAddr;
|
||||
tdGenMemRowFromBuilder(&rowBuilder);
|
||||
int toffset = 0;
|
||||
for (int32_t j = 0; j < tinfo.numOfColumns; j++) {
|
||||
tdAppendColVal(trow, p, pSchema[j].type, toffset);
|
||||
toffset += TYPE_BYTES[pSchema[j].type];
|
||||
p += pSchema[j].bytes;
|
||||
}
|
||||
|
||||
pDataBlock = (char*)pDataBlock + memRowTLen(memRow);
|
||||
pBlock->dataLen += memRowTLen(memRow);
|
||||
}
|
||||
} else {
|
||||
SMemRowBuilder rowBuilder;
|
||||
rowBuilder.pSchema = pSchema;
|
||||
rowBuilder.sversion = pTableMeta->sversion;
|
||||
rowBuilder.flen = flen;
|
||||
rowBuilder.nCols = tinfo.numOfColumns;
|
||||
rowBuilder.pDataBlock = pDataBlock;
|
||||
rowBuilder.pSubmitBlk = pBlock;
|
||||
rowBuilder.buf = p;
|
||||
|
||||
for (int32_t i = 0; i < numOfRows; ++i) {
|
||||
rowBuilder.buf = (blkKeyTuple + i)->payloadAddr;
|
||||
tdGenMemRowFromBuilder(&rowBuilder);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t len = pBlock->dataLen + pBlock->schemaLen;
|
||||
|
@ -1963,6 +1984,7 @@ static void extractTableNameList(SInsertStatementParam *pInsertParam, bool freeB
|
|||
int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBlockMap) {
|
||||
const int INSERT_HEAD_SIZE = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
|
||||
int code = 0;
|
||||
bool isRawPayload = IS_RAW_PAYLOAD(pInsertParam->payloadType);
|
||||
void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
|
||||
SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES);
|
||||
|
||||
|
@ -1971,7 +1993,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
|
|||
STableDataBlocks* pOneTableBlock = *p;
|
||||
|
||||
SBlockKeyInfo blkKeyInfo = {0}; // share by pOneTableBlock
|
||||
|
||||
|
||||
while(pOneTableBlock) {
|
||||
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
|
||||
if (pBlocks->numOfRows > 0) {
|
||||
|
@ -2010,21 +2032,29 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
|
|||
}
|
||||
}
|
||||
|
||||
if ((code = tscSortRemoveDataBlockDupRows(pOneTableBlock, &blkKeyInfo)) != 0) {
|
||||
taosHashCleanup(pVnodeDataBlockHashList);
|
||||
tscDestroyBlockArrayList(pVnodeDataBlockList);
|
||||
tfree(dataBuf->pData);
|
||||
tfree(blkKeyInfo.pKeyTuple);
|
||||
return code;
|
||||
if (isRawPayload) {
|
||||
tscSortRemoveDataBlockDupRowsRaw(pOneTableBlock);
|
||||
char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize * (pBlocks->numOfRows - 1);
|
||||
|
||||
tscDebug("0x%" PRIx64 " name:%s, tid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64,
|
||||
pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName), pBlocks->tid,
|
||||
pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
|
||||
} else {
|
||||
if ((code = tscSortRemoveDataBlockDupRows(pOneTableBlock, &blkKeyInfo)) != 0) {
|
||||
taosHashCleanup(pVnodeDataBlockHashList);
|
||||
tscDestroyBlockArrayList(pVnodeDataBlockList);
|
||||
tfree(dataBuf->pData);
|
||||
tfree(blkKeyInfo.pKeyTuple);
|
||||
return code;
|
||||
}
|
||||
ASSERT(blkKeyInfo.pKeyTuple != NULL && pBlocks->numOfRows > 0);
|
||||
|
||||
SBlockKeyTuple* pLastKeyTuple = blkKeyInfo.pKeyTuple + pBlocks->numOfRows - 1;
|
||||
tscDebug("0x%" PRIx64 " name:%s, tid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64,
|
||||
pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName), pBlocks->tid,
|
||||
pBlocks->numOfRows, pBlocks->sversion, blkKeyInfo.pKeyTuple->skey, pLastKeyTuple->skey);
|
||||
}
|
||||
|
||||
ASSERT(blkKeyInfo.pKeyTuple != NULL && pBlocks->numOfRows > 0);
|
||||
|
||||
SBlockKeyTuple* pLastKeyTuple = blkKeyInfo.pKeyTuple + pBlocks->numOfRows - 1;
|
||||
tscDebug("0x%" PRIx64 " name:%s, tid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64,
|
||||
pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName), pBlocks->tid, pBlocks->numOfRows,
|
||||
pBlocks->sversion, blkKeyInfo.pKeyTuple->skey, pLastKeyTuple->skey);
|
||||
|
||||
|
||||
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
|
||||
|
||||
pBlocks->tid = htonl(pBlocks->tid);
|
||||
|
@ -2034,7 +2064,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl
|
|||
pBlocks->schemaLen = 0;
|
||||
|
||||
// erase the empty space reserved for binary data
|
||||
int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pInsertParam->schemaAttached, blkKeyInfo.pKeyTuple);
|
||||
int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pInsertParam, blkKeyInfo.pKeyTuple);
|
||||
assert(finalLen <= len);
|
||||
|
||||
dataBuf->size += (finalLen + sizeof(SSubmitBlk));
|
||||
|
|
|
@ -24,35 +24,6 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#pragma pack(push, 1)
|
||||
typedef struct {
|
||||
VarDataLenT len;
|
||||
uint8_t data;
|
||||
} SBinaryNullT;
|
||||
|
||||
typedef struct {
|
||||
VarDataLenT len;
|
||||
uint32_t data;
|
||||
} SNCharNullT;
|
||||
#pragma pack(pop)
|
||||
|
||||
extern const uint8_t BoolNull;
|
||||
extern const uint8_t TinyintNull;
|
||||
extern const uint16_t SmallintNull;
|
||||
extern const uint32_t IntNull;
|
||||
extern const uint64_t BigintNull;
|
||||
extern const uint64_t TimestampNull;
|
||||
extern const uint8_t UTinyintNull;
|
||||
extern const uint16_t USmallintNull;
|
||||
extern const uint32_t UIntNull;
|
||||
extern const uint64_t UBigintNull;
|
||||
extern const uint32_t FloatNull;
|
||||
extern const uint64_t DoubleNull;
|
||||
extern const SBinaryNullT BinaryNull;
|
||||
extern const SNCharNullT NcharNull;
|
||||
|
||||
const void *tdGetNullVal(int8_t type);
|
||||
|
||||
#define STR_TO_VARSTR(x, str) \
|
||||
do { \
|
||||
VarDataLenT __len = (VarDataLenT)strlen(str); \
|
||||
|
@ -287,7 +258,7 @@ void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints);
|
|||
// Get the data pointer from a column-wised data
|
||||
static FORCE_INLINE const void *tdGetColDataOfRow(SDataCol *pCol, int row) {
|
||||
if (isAllRowsNull(pCol)) {
|
||||
return tdGetNullVal(pCol->type);
|
||||
return getNullValue(pCol->type);
|
||||
}
|
||||
if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]);
|
||||
|
|
|
@ -18,21 +18,6 @@
|
|||
#include "tcoding.h"
|
||||
#include "wchar.h"
|
||||
|
||||
const uint8_t BoolNull = TSDB_DATA_BOOL_NULL;
|
||||
const uint8_t TinyintNull = TSDB_DATA_TINYINT_NULL;
|
||||
const uint16_t SmallintNull = TSDB_DATA_SMALLINT_NULL;
|
||||
const uint32_t IntNull = TSDB_DATA_INT_NULL;
|
||||
const uint64_t BigintNull = TSDB_DATA_BIGINT_NULL;
|
||||
const uint64_t TimestampNull = TSDB_DATA_BIGINT_NULL;
|
||||
const uint8_t UTinyintNull = TSDB_DATA_UTINYINT_NULL;
|
||||
const uint16_t USmallintNull = TSDB_DATA_USMALLINT_NULL;
|
||||
const uint32_t UIntNull = TSDB_DATA_UINT_NULL;
|
||||
const uint64_t UBigintNull = TSDB_DATA_UBIGINT_NULL;
|
||||
const uint32_t FloatNull = TSDB_DATA_FLOAT_NULL;
|
||||
const uint64_t DoubleNull = TSDB_DATA_DOUBLE_NULL;
|
||||
const SBinaryNullT BinaryNull = {1, TSDB_DATA_BINARY_NULL};
|
||||
const SNCharNullT NcharNull = {4, TSDB_DATA_NCHAR_NULL};
|
||||
|
||||
static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, int limit1, SDataCols *src2, int *iter2,
|
||||
int limit2, int tRows);
|
||||
|
||||
|
@ -453,7 +438,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
|
|||
SDataCol *pDataCol = &(pCols->cols[dcol]);
|
||||
if (rcol >= schemaNCols(pSchema)) {
|
||||
// dataColSetNullAt(pDataCol, pCols->numOfRows);
|
||||
dataColAppendVal(pDataCol, tdGetNullVal(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
|
||||
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
|
||||
dcol++;
|
||||
continue;
|
||||
}
|
||||
|
@ -468,7 +453,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
|
|||
rcol++;
|
||||
} else {
|
||||
// dataColSetNullAt(pDataCol, pCols->numOfRows);
|
||||
dataColAppendVal(pDataCol, tdGetNullVal(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
|
||||
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
|
||||
dcol++;
|
||||
}
|
||||
}
|
||||
|
@ -498,7 +483,7 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
|
|||
SDataCol *pDataCol = &(pCols->cols[dcol]);
|
||||
if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) {
|
||||
// dataColSetNullAt(pDataCol, pCols->numOfRows);
|
||||
dataColAppendVal(pDataCol, tdGetNullVal(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
|
||||
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
|
||||
++dcol;
|
||||
continue;
|
||||
}
|
||||
|
@ -514,7 +499,7 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
|
|||
++rcol;
|
||||
} else {
|
||||
// dataColSetNullAt(pDataCol, pCols->numOfRows);
|
||||
dataColAppendVal(pDataCol, tdGetNullVal(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
|
||||
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
|
||||
++dcol;
|
||||
}
|
||||
}
|
||||
|
@ -799,40 +784,4 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder) {
|
|||
memcpy(kvRowValues(row), pBuilder->buf, pBuilder->size);
|
||||
|
||||
return row;
|
||||
}
|
||||
|
||||
const void *tdGetNullVal(int8_t type) {
|
||||
switch (type) {
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
return &BoolNull;
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
return &TinyintNull;
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
return &SmallintNull;
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
return &IntNull;
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
return &BigintNull;
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
return &FloatNull;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
return &DoubleNull;
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
return &BinaryNull;
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
return &TimestampNull;
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
return &NcharNull;
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
return &UTinyintNull;
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
return &USmallintNull;
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
return &UIntNull;
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
return &UBigintNull;
|
||||
default:
|
||||
ASSERT(0);
|
||||
return NULL;
|
||||
}
|
||||
}
|
|
@ -492,30 +492,32 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) {
|
|||
}
|
||||
}
|
||||
|
||||
static uint8_t nullBool = TSDB_DATA_BOOL_NULL;
|
||||
static uint8_t nullTinyInt = TSDB_DATA_TINYINT_NULL;
|
||||
static uint16_t nullSmallInt = TSDB_DATA_SMALLINT_NULL;
|
||||
static uint32_t nullInt = TSDB_DATA_INT_NULL;
|
||||
static uint64_t nullBigInt = TSDB_DATA_BIGINT_NULL;
|
||||
static uint32_t nullFloat = TSDB_DATA_FLOAT_NULL;
|
||||
static uint64_t nullDouble = TSDB_DATA_DOUBLE_NULL;
|
||||
static uint8_t nullTinyIntu = TSDB_DATA_UTINYINT_NULL;
|
||||
static uint16_t nullSmallIntu = TSDB_DATA_USMALLINT_NULL;
|
||||
static uint32_t nullIntu = TSDB_DATA_UINT_NULL;
|
||||
static uint64_t nullBigIntu = TSDB_DATA_UBIGINT_NULL;
|
||||
static uint8_t nullBool = TSDB_DATA_BOOL_NULL;
|
||||
static uint8_t nullTinyInt = TSDB_DATA_TINYINT_NULL;
|
||||
static uint16_t nullSmallInt = TSDB_DATA_SMALLINT_NULL;
|
||||
static uint32_t nullInt = TSDB_DATA_INT_NULL;
|
||||
static uint64_t nullBigInt = TSDB_DATA_BIGINT_NULL;
|
||||
static uint32_t nullFloat = TSDB_DATA_FLOAT_NULL;
|
||||
static uint64_t nullDouble = TSDB_DATA_DOUBLE_NULL;
|
||||
static uint8_t nullTinyIntu = TSDB_DATA_UTINYINT_NULL;
|
||||
static uint16_t nullSmallIntu = TSDB_DATA_USMALLINT_NULL;
|
||||
static uint32_t nullIntu = TSDB_DATA_UINT_NULL;
|
||||
static uint64_t nullBigIntu = TSDB_DATA_UBIGINT_NULL;
|
||||
static SBinaryNullT nullBinary = {1, TSDB_DATA_BINARY_NULL};
|
||||
static SNCharNullT nullNchar = {4, TSDB_DATA_NCHAR_NULL};
|
||||
|
||||
static union {
|
||||
tstr str;
|
||||
char pad[sizeof(tstr) + 4];
|
||||
} nullBinary = {.str = {.len = 1}}, nullNchar = {.str = {.len = 4}};
|
||||
// static union {
|
||||
// tstr str;
|
||||
// char pad[sizeof(tstr) + 4];
|
||||
// } nullBinary = {.str = {.len = 1}}, nullNchar = {.str = {.len = 4}};
|
||||
|
||||
static void *nullValues[] = {
|
||||
static const void *nullValues[] = {
|
||||
&nullBool, &nullTinyInt, &nullSmallInt, &nullInt, &nullBigInt,
|
||||
&nullFloat, &nullDouble, &nullBinary, &nullBigInt, &nullNchar,
|
||||
&nullTinyIntu, &nullSmallIntu, &nullIntu, &nullBigIntu,
|
||||
};
|
||||
|
||||
void *getNullValue(int32_t type) {
|
||||
const void *getNullValue(int32_t type) {
|
||||
assert(type >= TSDB_DATA_TYPE_BOOL && type <= TSDB_DATA_TYPE_UBIGINT);
|
||||
return nullValues[type - 1];
|
||||
}
|
||||
|
|
|
@ -490,9 +490,9 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) {
|
|||
|
||||
for (int32_t i = 0; i < pSchema->numOfCols; i++) {
|
||||
STColumn *c = pSchema->columns + i;
|
||||
void* val = row[i];
|
||||
void *val = row[i];
|
||||
if (val == NULL) {
|
||||
val = getNullValue(c->type);
|
||||
val = (void *)getNullValue(c->type);
|
||||
} else if (c->type == TSDB_DATA_TYPE_BINARY) {
|
||||
val = ((char*)val) - sizeof(VarDataLenT);
|
||||
} else if (c->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
|
|
|
@ -174,6 +174,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_MND_FIELD_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x036C) //"Field does not exist")
|
||||
#define TSDB_CODE_MND_INVALID_STABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x036D) //"Super table does not exist")
|
||||
#define TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG TAOS_DEF_ERROR_CODE(0, 0x036E) //"Invalid create table message")
|
||||
#define TSDB_CODE_MND_EXCEED_MAX_ROW_BYTES TAOS_DEF_ERROR_CODE(0, 0x036F) //"Exceed max row bytes")
|
||||
|
||||
#define TSDB_CODE_MND_INVALID_FUNC_NAME TAOS_DEF_ERROR_CODE(0, 0x0370) //"Invalid func name")
|
||||
#define TSDB_CODE_MND_INVALID_FUNC_LEN TAOS_DEF_ERROR_CODE(0, 0x0371) //"Invalid func length")
|
||||
|
|
|
@ -20,6 +20,18 @@ typedef struct tstr {
|
|||
char data[];
|
||||
} tstr;
|
||||
|
||||
#pragma pack(push, 1)
|
||||
typedef struct {
|
||||
VarDataLenT len;
|
||||
uint8_t data;
|
||||
} SBinaryNullT;
|
||||
|
||||
typedef struct {
|
||||
VarDataLenT len;
|
||||
uint32_t data;
|
||||
} SNCharNullT;
|
||||
#pragma pack(pop)
|
||||
|
||||
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)
|
||||
|
||||
#define varDataLen(v) ((VarDataLenT *)(v))[0]
|
||||
|
@ -182,7 +194,7 @@ bool isValidDataType(int32_t type);
|
|||
void setVardataNull(char* val, int32_t type);
|
||||
void setNull(char *val, int32_t type, int32_t bytes);
|
||||
void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
|
||||
void *getNullValue(int32_t type);
|
||||
const void *getNullValue(int32_t type);
|
||||
|
||||
void assignVal(char *val, const char *src, int32_t len, int32_t type);
|
||||
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf);
|
||||
|
|
|
@ -71,16 +71,15 @@ extern char configDir[];
|
|||
|
||||
#define HEAD_BUFF_LEN TSDB_MAX_COLUMNS*24 // 16*MAX_COLUMNS + (192+32)*2 + insert into ..
|
||||
|
||||
#define MAX_SQL_SIZE 65536
|
||||
#define BUFFER_SIZE (65536*2)
|
||||
#define COND_BUF_LEN (BUFFER_SIZE - 30)
|
||||
#define COL_BUFFER_LEN (TSDB_MAX_BYTES_PER_ROW - 50)
|
||||
#define BUFFER_SIZE (50 + TSDB_DB_NAME_LEN + TSDB_TABLE_NAME_LEN + TSDB_MAX_BYTES_PER_ROW + TSDB_MAX_TAGS_LEN)
|
||||
#define COND_BUF_LEN (BUFFER_SIZE - 30)
|
||||
#define MAX_USERNAME_SIZE 64
|
||||
#define MAX_PASSWORD_SIZE 64
|
||||
#define MAX_HOSTNAME_SIZE 64
|
||||
#define MAX_TB_NAME_SIZE 64
|
||||
#define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space
|
||||
#define OPT_ABORT 1 /* –abort */
|
||||
#define STRING_LEN 60000
|
||||
#define MAX_PREPARED_RAND 1000000
|
||||
#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255.
|
||||
|
||||
|
@ -2707,7 +2706,7 @@ static int createSuperTable(
|
|||
|
||||
char command[BUFFER_SIZE] = "\0";
|
||||
|
||||
char cols[STRING_LEN] = "\0";
|
||||
char cols[COL_BUFFER_LEN] = "\0";
|
||||
int colIndex;
|
||||
int len = 0;
|
||||
|
||||
|
@ -2723,55 +2722,55 @@ static int createSuperTable(
|
|||
char* dataType = superTbl->columns[colIndex].dataType;
|
||||
|
||||
if (strcasecmp(dataType, "BINARY") == 0) {
|
||||
len += snprintf(cols + len, STRING_LEN - len,
|
||||
", col%d %s(%d)", colIndex, "BINARY",
|
||||
len += snprintf(cols + len, COL_BUFFER_LEN - len,
|
||||
", C%d %s(%d)", colIndex, "BINARY",
|
||||
superTbl->columns[colIndex].dataLen);
|
||||
lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
|
||||
} else if (strcasecmp(dataType, "NCHAR") == 0) {
|
||||
len += snprintf(cols + len, STRING_LEN - len,
|
||||
", col%d %s(%d)", colIndex, "NCHAR",
|
||||
len += snprintf(cols + len, COL_BUFFER_LEN - len,
|
||||
", C%d %s(%d)", colIndex, "NCHAR",
|
||||
superTbl->columns[colIndex].dataLen);
|
||||
lenOfOneRow += superTbl->columns[colIndex].dataLen + 3;
|
||||
} else if (strcasecmp(dataType, "INT") == 0) {
|
||||
if ((g_args.demo_mode) && (colIndex == 1)) {
|
||||
len += snprintf(cols + len, STRING_LEN - len,
|
||||
len += snprintf(cols + len, COL_BUFFER_LEN - len,
|
||||
", VOLTAGE INT");
|
||||
} else {
|
||||
len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "INT");
|
||||
len += snprintf(cols + len, COL_BUFFER_LEN - len, ", C%d %s", colIndex, "INT");
|
||||
}
|
||||
lenOfOneRow += 11;
|
||||
} else if (strcasecmp(dataType, "BIGINT") == 0) {
|
||||
len += snprintf(cols + len, STRING_LEN - len, ", col%d %s",
|
||||
len += snprintf(cols + len, COL_BUFFER_LEN - len, ", C%d %s",
|
||||
colIndex, "BIGINT");
|
||||
lenOfOneRow += 21;
|
||||
} else if (strcasecmp(dataType, "SMALLINT") == 0) {
|
||||
len += snprintf(cols + len, STRING_LEN - len, ", col%d %s",
|
||||
len += snprintf(cols + len, COL_BUFFER_LEN - len, ", C%d %s",
|
||||
colIndex, "SMALLINT");
|
||||
lenOfOneRow += 6;
|
||||
} else if (strcasecmp(dataType, "TINYINT") == 0) {
|
||||
len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TINYINT");
|
||||
len += snprintf(cols + len, COL_BUFFER_LEN - len, ", C%d %s", colIndex, "TINYINT");
|
||||
lenOfOneRow += 4;
|
||||
} else if (strcasecmp(dataType, "BOOL") == 0) {
|
||||
len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BOOL");
|
||||
len += snprintf(cols + len, COL_BUFFER_LEN - len, ", C%d %s", colIndex, "BOOL");
|
||||
lenOfOneRow += 6;
|
||||
} else if (strcasecmp(dataType, "FLOAT") == 0) {
|
||||
if (g_args.demo_mode) {
|
||||
if (colIndex == 0) {
|
||||
len += snprintf(cols + len, STRING_LEN - len, ", CURRENT FLOAT");
|
||||
len += snprintf(cols + len, COL_BUFFER_LEN - len, ", CURRENT FLOAT");
|
||||
} else if (colIndex == 2) {
|
||||
len += snprintf(cols + len, STRING_LEN - len, ", PHASE FLOAT");
|
||||
len += snprintf(cols + len, COL_BUFFER_LEN - len, ", PHASE FLOAT");
|
||||
}
|
||||
} else {
|
||||
len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "FLOAT");
|
||||
len += snprintf(cols + len, COL_BUFFER_LEN - len, ", C%d %s", colIndex, "FLOAT");
|
||||
}
|
||||
|
||||
lenOfOneRow += 22;
|
||||
} else if (strcasecmp(dataType, "DOUBLE") == 0) {
|
||||
len += snprintf(cols + len, STRING_LEN - len, ", col%d %s",
|
||||
len += snprintf(cols + len, COL_BUFFER_LEN - len, ", C%d %s",
|
||||
colIndex, "DOUBLE");
|
||||
lenOfOneRow += 42;
|
||||
} else if (strcasecmp(dataType, "TIMESTAMP") == 0) {
|
||||
len += snprintf(cols + len, STRING_LEN - len, ", col%d %s",
|
||||
len += snprintf(cols + len, COL_BUFFER_LEN - len, ", C%d %s",
|
||||
colIndex, "TIMESTAMP");
|
||||
lenOfOneRow += 21;
|
||||
} else {
|
||||
|
@ -2803,60 +2802,63 @@ static int createSuperTable(
|
|||
return -1;
|
||||
}
|
||||
|
||||
char tags[STRING_LEN] = "\0";
|
||||
char tags[TSDB_MAX_TAGS_LEN] = "\0";
|
||||
int tagIndex;
|
||||
len = 0;
|
||||
|
||||
int lenOfTagOfOneRow = 0;
|
||||
len += snprintf(tags + len, STRING_LEN - len, "(");
|
||||
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "(");
|
||||
for (tagIndex = 0; tagIndex < superTbl->tagCount; tagIndex++) {
|
||||
char* dataType = superTbl->tags[tagIndex].dataType;
|
||||
|
||||
if (strcasecmp(dataType, "BINARY") == 0) {
|
||||
if ((g_args.demo_mode) && (tagIndex == 1)) {
|
||||
len += snprintf(tags + len, STRING_LEN - len,
|
||||
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
|
||||
"location BINARY(%d), ",
|
||||
superTbl->tags[tagIndex].dataLen);
|
||||
} else {
|
||||
len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ",
|
||||
tagIndex, "BINARY", superTbl->tags[tagIndex].dataLen);
|
||||
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
|
||||
"t%d %s(%d), ", tagIndex, "BINARY",
|
||||
superTbl->tags[tagIndex].dataLen);
|
||||
}
|
||||
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3;
|
||||
} else if (strcasecmp(dataType, "NCHAR") == 0) {
|
||||
len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex,
|
||||
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
|
||||
"t%d %s(%d), ", tagIndex,
|
||||
"NCHAR", superTbl->tags[tagIndex].dataLen);
|
||||
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3;
|
||||
} else if (strcasecmp(dataType, "INT") == 0) {
|
||||
if ((g_args.demo_mode) && (tagIndex == 0)) {
|
||||
len += snprintf(tags + len, STRING_LEN - len, "groupId INT, ");
|
||||
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
|
||||
"groupId INT, ");
|
||||
} else {
|
||||
len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
|
||||
"INT");
|
||||
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
|
||||
"t%d %s, ", tagIndex, "INT");
|
||||
}
|
||||
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 11;
|
||||
} else if (strcasecmp(dataType, "BIGINT") == 0) {
|
||||
len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
|
||||
"BIGINT");
|
||||
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
|
||||
"t%d %s, ", tagIndex, "BIGINT");
|
||||
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 21;
|
||||
} else if (strcasecmp(dataType, "SMALLINT") == 0) {
|
||||
len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
|
||||
"SMALLINT");
|
||||
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
|
||||
"t%d %s, ", tagIndex, "SMALLINT");
|
||||
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6;
|
||||
} else if (strcasecmp(dataType, "TINYINT") == 0) {
|
||||
len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
|
||||
"TINYINT");
|
||||
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
|
||||
"t%d %s, ", tagIndex, "TINYINT");
|
||||
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 4;
|
||||
} else if (strcasecmp(dataType, "BOOL") == 0) {
|
||||
len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
|
||||
"BOOL");
|
||||
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
|
||||
"t%d %s, ", tagIndex, "BOOL");
|
||||
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6;
|
||||
} else if (strcasecmp(dataType, "FLOAT") == 0) {
|
||||
len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
|
||||
"FLOAT");
|
||||
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
|
||||
"t%d %s, ", tagIndex, "FLOAT");
|
||||
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 22;
|
||||
} else if (strcasecmp(dataType, "DOUBLE") == 0) {
|
||||
len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex,
|
||||
"DOUBLE");
|
||||
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len,
|
||||
"t%d %s, ", tagIndex, "DOUBLE");
|
||||
lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 42;
|
||||
} else {
|
||||
taos_close(taos);
|
||||
|
@ -2867,7 +2869,7 @@ static int createSuperTable(
|
|||
}
|
||||
|
||||
len -= 2;
|
||||
len += snprintf(tags + len, STRING_LEN - len, ")");
|
||||
len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, ")");
|
||||
|
||||
superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow;
|
||||
|
||||
|
@ -3020,175 +3022,175 @@ static int createDatabasesAndStables() {
|
|||
|
||||
static void* createTable(void *sarg)
|
||||
{
|
||||
threadInfo *pThreadInfo = (threadInfo *)sarg;
|
||||
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
|
||||
threadInfo *pThreadInfo = (threadInfo *)sarg;
|
||||
SSuperTable* superTblInfo = pThreadInfo->superTblInfo;
|
||||
|
||||
uint64_t lastPrintTime = taosGetTimestampMs();
|
||||
uint64_t lastPrintTime = taosGetTimestampMs();
|
||||
|
||||
int buff_len;
|
||||
buff_len = BUFFER_SIZE / 8;
|
||||
int buff_len;
|
||||
buff_len = BUFFER_SIZE;
|
||||
|
||||
pThreadInfo->buffer = calloc(buff_len, 1);
|
||||
if (pThreadInfo->buffer == NULL) {
|
||||
errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__);
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
int len = 0;
|
||||
int batchNum = 0;
|
||||
|
||||
verbosePrint("%s() LN%d: Creating table from %"PRIu64" to %"PRIu64"\n",
|
||||
__func__, __LINE__,
|
||||
pThreadInfo->start_table_from, pThreadInfo->end_table_to);
|
||||
|
||||
for (uint64_t i = pThreadInfo->start_table_from;
|
||||
i <= pThreadInfo->end_table_to; i++) {
|
||||
if (0 == g_Dbs.use_metric) {
|
||||
snprintf(pThreadInfo->buffer, buff_len,
|
||||
"create table if not exists %s.%s%"PRIu64" %s;",
|
||||
pThreadInfo->db_name,
|
||||
g_args.tb_prefix, i,
|
||||
pThreadInfo->cols);
|
||||
} else {
|
||||
if (superTblInfo == NULL) {
|
||||
errorPrint("%s() LN%d, use metric, but super table info is NULL\n",
|
||||
__func__, __LINE__);
|
||||
free(pThreadInfo->buffer);
|
||||
pThreadInfo->buffer = calloc(buff_len, 1);
|
||||
if (pThreadInfo->buffer == NULL) {
|
||||
errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__);
|
||||
exit(-1);
|
||||
} else {
|
||||
if (0 == len) {
|
||||
batchNum = 0;
|
||||
memset(pThreadInfo->buffer, 0, buff_len);
|
||||
len += snprintf(pThreadInfo->buffer + len,
|
||||
buff_len - len, "create table ");
|
||||
}
|
||||
char* tagsValBuf = NULL;
|
||||
if (0 == superTblInfo->tagSource) {
|
||||
tagsValBuf = generateTagVaulesForStb(superTblInfo, i);
|
||||
}
|
||||
|
||||
int len = 0;
|
||||
int batchNum = 0;
|
||||
|
||||
verbosePrint("%s() LN%d: Creating table from %"PRIu64" to %"PRIu64"\n",
|
||||
__func__, __LINE__,
|
||||
pThreadInfo->start_table_from, pThreadInfo->end_table_to);
|
||||
|
||||
for (uint64_t i = pThreadInfo->start_table_from;
|
||||
i <= pThreadInfo->end_table_to; i++) {
|
||||
if (0 == g_Dbs.use_metric) {
|
||||
snprintf(pThreadInfo->buffer, buff_len,
|
||||
"create table if not exists %s.%s%"PRIu64" %s;",
|
||||
pThreadInfo->db_name,
|
||||
g_args.tb_prefix, i,
|
||||
pThreadInfo->cols);
|
||||
} else {
|
||||
tagsValBuf = getTagValueFromTagSample(
|
||||
superTblInfo,
|
||||
i % superTblInfo->tagSampleCount);
|
||||
if (superTblInfo == NULL) {
|
||||
errorPrint("%s() LN%d, use metric, but super table info is NULL\n",
|
||||
__func__, __LINE__);
|
||||
free(pThreadInfo->buffer);
|
||||
exit(-1);
|
||||
} else {
|
||||
if (0 == len) {
|
||||
batchNum = 0;
|
||||
memset(pThreadInfo->buffer, 0, buff_len);
|
||||
len += snprintf(pThreadInfo->buffer + len,
|
||||
buff_len - len, "create table ");
|
||||
}
|
||||
char* tagsValBuf = NULL;
|
||||
if (0 == superTblInfo->tagSource) {
|
||||
tagsValBuf = generateTagVaulesForStb(superTblInfo, i);
|
||||
} else {
|
||||
tagsValBuf = getTagValueFromTagSample(
|
||||
superTblInfo,
|
||||
i % superTblInfo->tagSampleCount);
|
||||
}
|
||||
if (NULL == tagsValBuf) {
|
||||
free(pThreadInfo->buffer);
|
||||
return NULL;
|
||||
}
|
||||
len += snprintf(pThreadInfo->buffer + len,
|
||||
buff_len - len,
|
||||
"if not exists %s.%s%"PRIu64" using %s.%s tags %s ",
|
||||
pThreadInfo->db_name, superTblInfo->childTblPrefix,
|
||||
i, pThreadInfo->db_name,
|
||||
superTblInfo->sTblName, tagsValBuf);
|
||||
free(tagsValBuf);
|
||||
batchNum++;
|
||||
if ((batchNum < superTblInfo->batchCreateTableNum)
|
||||
&& ((buff_len - len)
|
||||
>= (superTblInfo->lenOfTagOfOneRow + 256))) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (NULL == tagsValBuf) {
|
||||
free(pThreadInfo->buffer);
|
||||
return NULL;
|
||||
|
||||
len = 0;
|
||||
if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer,
|
||||
NO_INSERT_TYPE, false)){
|
||||
errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer);
|
||||
free(pThreadInfo->buffer);
|
||||
return NULL;
|
||||
}
|
||||
len += snprintf(pThreadInfo->buffer + len,
|
||||
buff_len - len,
|
||||
"if not exists %s.%s%"PRIu64" using %s.%s tags %s ",
|
||||
pThreadInfo->db_name, superTblInfo->childTblPrefix,
|
||||
i, pThreadInfo->db_name,
|
||||
superTblInfo->sTblName, tagsValBuf);
|
||||
free(tagsValBuf);
|
||||
batchNum++;
|
||||
if ((batchNum < superTblInfo->batchCreateTableNum)
|
||||
&& ((buff_len - len)
|
||||
>= (superTblInfo->lenOfTagOfOneRow + 256))) {
|
||||
continue;
|
||||
|
||||
uint64_t currentPrintTime = taosGetTimestampMs();
|
||||
if (currentPrintTime - lastPrintTime > 30*1000) {
|
||||
printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n",
|
||||
pThreadInfo->threadID, pThreadInfo->start_table_from, i);
|
||||
lastPrintTime = currentPrintTime;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
len = 0;
|
||||
if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer,
|
||||
NO_INSERT_TYPE, false)){
|
||||
errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer);
|
||||
free(pThreadInfo->buffer);
|
||||
return NULL;
|
||||
if (0 != len) {
|
||||
if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer,
|
||||
NO_INSERT_TYPE, false)) {
|
||||
errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer);
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t currentPrintTime = taosGetTimestampMs();
|
||||
if (currentPrintTime - lastPrintTime > 30*1000) {
|
||||
printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n",
|
||||
pThreadInfo->threadID, pThreadInfo->start_table_from, i);
|
||||
lastPrintTime = currentPrintTime;
|
||||
}
|
||||
}
|
||||
|
||||
if (0 != len) {
|
||||
if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer,
|
||||
NO_INSERT_TYPE, false)) {
|
||||
errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer);
|
||||
}
|
||||
}
|
||||
|
||||
free(pThreadInfo->buffer);
|
||||
return NULL;
|
||||
free(pThreadInfo->buffer);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int startMultiThreadCreateChildTable(
|
||||
char* cols, int threads, uint64_t tableFrom, int64_t ntables,
|
||||
char* db_name, SSuperTable* superTblInfo) {
|
||||
|
||||
pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
|
||||
threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
|
||||
pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
|
||||
threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
|
||||
|
||||
if ((NULL == pids) || (NULL == infos)) {
|
||||
printf("malloc failed\n");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
if (threads < 1) {
|
||||
threads = 1;
|
||||
}
|
||||
|
||||
int64_t a = ntables / threads;
|
||||
if (a < 1) {
|
||||
threads = ntables;
|
||||
a = 1;
|
||||
}
|
||||
|
||||
int64_t b = 0;
|
||||
b = ntables % threads;
|
||||
|
||||
for (int64_t i = 0; i < threads; i++) {
|
||||
threadInfo *pThreadInfo = infos + i;
|
||||
pThreadInfo->threadID = i;
|
||||
tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN);
|
||||
pThreadInfo->superTblInfo = superTblInfo;
|
||||
verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name);
|
||||
pThreadInfo->taos = taos_connect(
|
||||
g_Dbs.host,
|
||||
g_Dbs.user,
|
||||
g_Dbs.password,
|
||||
db_name,
|
||||
g_Dbs.port);
|
||||
if (pThreadInfo->taos == NULL) {
|
||||
errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n",
|
||||
__func__, __LINE__, taos_errstr(NULL));
|
||||
free(pids);
|
||||
free(infos);
|
||||
return -1;
|
||||
if ((NULL == pids) || (NULL == infos)) {
|
||||
printf("malloc failed\n");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
pThreadInfo->start_table_from = tableFrom;
|
||||
pThreadInfo->ntables = i<b?a+1:a;
|
||||
pThreadInfo->end_table_to = i < b ? tableFrom + a : tableFrom + a - 1;
|
||||
tableFrom = pThreadInfo->end_table_to + 1;
|
||||
pThreadInfo->use_metric = true;
|
||||
pThreadInfo->cols = cols;
|
||||
pThreadInfo->minDelay = UINT64_MAX;
|
||||
pthread_create(pids + i, NULL, createTable, pThreadInfo);
|
||||
}
|
||||
if (threads < 1) {
|
||||
threads = 1;
|
||||
}
|
||||
|
||||
for (int i = 0; i < threads; i++) {
|
||||
pthread_join(pids[i], NULL);
|
||||
}
|
||||
int64_t a = ntables / threads;
|
||||
if (a < 1) {
|
||||
threads = ntables;
|
||||
a = 1;
|
||||
}
|
||||
|
||||
for (int i = 0; i < threads; i++) {
|
||||
threadInfo *pThreadInfo = infos + i;
|
||||
taos_close(pThreadInfo->taos);
|
||||
}
|
||||
int64_t b = 0;
|
||||
b = ntables % threads;
|
||||
|
||||
free(pids);
|
||||
free(infos);
|
||||
for (int64_t i = 0; i < threads; i++) {
|
||||
threadInfo *pThreadInfo = infos + i;
|
||||
pThreadInfo->threadID = i;
|
||||
tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN);
|
||||
pThreadInfo->superTblInfo = superTblInfo;
|
||||
verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name);
|
||||
pThreadInfo->taos = taos_connect(
|
||||
g_Dbs.host,
|
||||
g_Dbs.user,
|
||||
g_Dbs.password,
|
||||
db_name,
|
||||
g_Dbs.port);
|
||||
if (pThreadInfo->taos == NULL) {
|
||||
errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n",
|
||||
__func__, __LINE__, taos_errstr(NULL));
|
||||
free(pids);
|
||||
free(infos);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
pThreadInfo->start_table_from = tableFrom;
|
||||
pThreadInfo->ntables = i<b?a+1:a;
|
||||
pThreadInfo->end_table_to = i < b ? tableFrom + a : tableFrom + a - 1;
|
||||
tableFrom = pThreadInfo->end_table_to + 1;
|
||||
pThreadInfo->use_metric = true;
|
||||
pThreadInfo->cols = cols;
|
||||
pThreadInfo->minDelay = UINT64_MAX;
|
||||
pthread_create(pids + i, NULL, createTable, pThreadInfo);
|
||||
}
|
||||
|
||||
for (int i = 0; i < threads; i++) {
|
||||
pthread_join(pids[i], NULL);
|
||||
}
|
||||
|
||||
for (int i = 0; i < threads; i++) {
|
||||
threadInfo *pThreadInfo = infos + i;
|
||||
taos_close(pThreadInfo->taos);
|
||||
}
|
||||
|
||||
free(pids);
|
||||
free(infos);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void createChildTables() {
|
||||
char tblColsBuf[MAX_SQL_SIZE];
|
||||
char tblColsBuf[TSDB_MAX_BYTES_PER_ROW];
|
||||
int len;
|
||||
|
||||
for (int i = 0; i < g_Dbs.dbCount; i++) {
|
||||
|
@ -3220,21 +3222,21 @@ static void createChildTables() {
|
|||
}
|
||||
} else {
|
||||
// normal table
|
||||
len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP");
|
||||
len = snprintf(tblColsBuf, TSDB_MAX_BYTES_PER_ROW, "(TS TIMESTAMP");
|
||||
for (int j = 0; j < g_args.num_of_CPR; j++) {
|
||||
if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0)
|
||||
|| (strncasecmp(g_args.datatype[j],
|
||||
"NCHAR", strlen("NCHAR")) == 0)) {
|
||||
snprintf(tblColsBuf + len, MAX_SQL_SIZE - len,
|
||||
", COL%d %s(%d)", j, g_args.datatype[j], g_args.len_of_binary);
|
||||
snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len,
|
||||
",C%d %s(%d)", j, g_args.datatype[j], g_args.len_of_binary);
|
||||
} else {
|
||||
snprintf(tblColsBuf + len, MAX_SQL_SIZE - len,
|
||||
", COL%d %s", j, g_args.datatype[j]);
|
||||
snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len,
|
||||
",C%d %s", j, g_args.datatype[j]);
|
||||
}
|
||||
len = strlen(tblColsBuf);
|
||||
}
|
||||
|
||||
snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")");
|
||||
snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, ")");
|
||||
|
||||
verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n",
|
||||
__func__, __LINE__,
|
||||
|
@ -7986,7 +7988,7 @@ static void initOfQueryMeta() {
|
|||
tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE);
|
||||
}
|
||||
|
||||
static void setParaFromArg(){
|
||||
static void setParaFromArg() {
|
||||
if (g_args.host) {
|
||||
tstrncpy(g_Dbs.host, g_args.host, MAX_HOSTNAME_SIZE);
|
||||
} else {
|
||||
|
@ -8022,10 +8024,10 @@ static void setParaFromArg(){
|
|||
|
||||
g_Dbs.do_aggreFunc = true;
|
||||
|
||||
char dataString[STRING_LEN];
|
||||
char dataString[TSDB_MAX_BYTES_PER_ROW];
|
||||
char **data_type = g_args.datatype;
|
||||
|
||||
memset(dataString, 0, STRING_LEN);
|
||||
memset(dataString, 0, TSDB_MAX_BYTES_PER_ROW);
|
||||
|
||||
if (strcasecmp(data_type[0], "BINARY") == 0
|
||||
|| strcasecmp(data_type[0], "BOOL") == 0
|
||||
|
@ -8143,7 +8145,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile)
|
|||
}
|
||||
|
||||
int read_len = 0;
|
||||
char * cmd = calloc(1, MAX_SQL_SIZE);
|
||||
char * cmd = calloc(1, TSDB_MAX_BYTES_PER_ROW);
|
||||
size_t cmd_len = 0;
|
||||
char * line = NULL;
|
||||
size_t line_len = 0;
|
||||
|
@ -8151,7 +8153,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile)
|
|||
double t = taosGetTimestampMs();
|
||||
|
||||
while((read_len = tgetline(&line, &line_len, fp)) != -1) {
|
||||
if (read_len >= MAX_SQL_SIZE) continue;
|
||||
if (read_len >= TSDB_MAX_BYTES_PER_ROW) continue;
|
||||
line[--read_len] = '\0';
|
||||
|
||||
if (read_len == 0 || isCommentLine(line)) { // line starts with #
|
||||
|
@ -8174,7 +8176,7 @@ static void querySqlFile(TAOS* taos, char* sqlFile)
|
|||
tmfclose(fp);
|
||||
return;
|
||||
}
|
||||
memset(cmd, 0, MAX_SQL_SIZE);
|
||||
memset(cmd, 0, TSDB_MAX_BYTES_PER_ROW);
|
||||
cmd_len = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1503,6 +1503,18 @@ static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg) {
|
|||
return TSDB_CODE_MND_FIELD_NOT_EXIST;
|
||||
}
|
||||
|
||||
// check exceed max row bytes
|
||||
int32_t i;
|
||||
uint32_t nLen = 0;
|
||||
for (i = 0; i < pStable->numOfColumns; ++i) {
|
||||
nLen += (pStable->schema[i].colId == col) ? pAlter->schema[0].bytes : pStable->schema[i].bytes;
|
||||
}
|
||||
if (nLen > TSDB_MAX_BYTES_PER_ROW) {
|
||||
mError("msg:%p, app:%p stable:%s, change column, name:%s exceed max row bytes", pMsg, pMsg->rpcMsg.ahandle,
|
||||
pStable->info.tableId, name);
|
||||
return TSDB_CODE_MND_EXCEED_MAX_ROW_BYTES;
|
||||
}
|
||||
|
||||
// update
|
||||
SSchema *schema = (SSchema *) (pStable->schema + col);
|
||||
ASSERT(schema->type == TSDB_DATA_TYPE_BINARY || schema->type == TSDB_DATA_TYPE_NCHAR);
|
||||
|
|
|
@ -787,7 +787,7 @@ static char *getTagIndexKey(const void *pData) {
|
|||
void * res = tdGetKVRowValOfCol(pTable->tagVal, pCol->colId);
|
||||
if (res == NULL) {
|
||||
// treat the column as NULL if we cannot find it
|
||||
res = getNullValue(pCol->type);
|
||||
res = (char*)getNullValue(pCol->type);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -183,6 +183,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_ALREAY_EXIST, "Field already exists"
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_NOT_EXIST, "Field does not exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STABLE_NAME, "Super table does not exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG, "Invalid create table message")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_EXCEED_MAX_ROW_BYTES, "Exceed max row bytes")
|
||||
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_FUNC_NAME, "Invalid func name")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_FUNC_LEN, "Invalid func length")
|
||||
|
|
|
@ -187,19 +187,19 @@ class TDTestCase:
|
|||
"select * from t9 where t9.ts > '1969-12-31 22:00:00.000' and t9.ts <'1970-01-01 02:00:00.000' "
|
||||
)
|
||||
tdSql.checkRows(719)
|
||||
|
||||
|
||||
tdSql.query(
|
||||
"select * from t0,t1 where t0.ts=t1.ts and t1.ts >= '1970-01-01 00:00:00.000' "
|
||||
)
|
||||
tdSql.checkRows(680)
|
||||
|
||||
|
||||
tdSql.query(
|
||||
"select diff(col1) from t0 where t0.ts >= '1970-01-01 00:00:00.000' "
|
||||
"select diff(c1) from t0 where t0.ts >= '1970-01-01 00:00:00.000' "
|
||||
)
|
||||
tdSql.checkRows(679)
|
||||
|
||||
tdSql.query(
|
||||
"select t0,col1 from stb2 where stb2.ts < '1970-01-01 00:00:00.000' order by ts"
|
||||
"select t0,c1 from stb2 where stb2.ts < '1970-01-01 00:00:00.000' order by ts"
|
||||
)
|
||||
tdSql.checkRows(43200)
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ class TDTestCase:
|
|||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
|
@ -39,7 +39,7 @@ class TDTestCase:
|
|||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
|
||||
def run(self):
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
|
@ -48,7 +48,7 @@ class TDTestCase:
|
|||
tdLog.info("taosd found in %s" % buildPath)
|
||||
binPath = buildPath+ "/build/bin/"
|
||||
|
||||
# insert: create one or mutiple tables per sql and insert multiple rows per sql
|
||||
# insert: create one or mutiple tables per sql and insert multiple rows per sql
|
||||
# test case for https://jira.taosdata.com:18080/browse/TD-4985
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/TD-4985/query-limit-offset.json -y " % binPath)
|
||||
tdSql.execute("use db")
|
||||
|
@ -56,27 +56,27 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 0, 10000)
|
||||
|
||||
for i in range(1000):
|
||||
tdSql.execute('''insert into stb00_9999 values(%d, %d, %d,'test99.%s')'''
|
||||
tdSql.execute('''insert into stb00_9999 values(%d, %d, %d,'test99.%s')'''
|
||||
% (1600000000000 + i, i, -10000+i, i))
|
||||
tdSql.execute('''insert into stb00_8888 values(%d, %d, %d,'test98.%s')'''
|
||||
tdSql.execute('''insert into stb00_8888 values(%d, %d, %d,'test98.%s')'''
|
||||
% (1600000000000 + i, i, -10000+i, i))
|
||||
tdSql.execute('''insert into stb00_7777 values(%d, %d, %d,'test97.%s')'''
|
||||
tdSql.execute('''insert into stb00_7777 values(%d, %d, %d,'test97.%s')'''
|
||||
% (1600000000000 + i, i, -10000+i, i))
|
||||
tdSql.execute('''insert into stb00_6666 values(%d, %d, %d,'test96.%s')'''
|
||||
tdSql.execute('''insert into stb00_6666 values(%d, %d, %d,'test96.%s')'''
|
||||
% (1600000000000 + i, i, -10000+i, i))
|
||||
tdSql.execute('''insert into stb00_5555 values(%d, %d, %d,'test95.%s')'''
|
||||
tdSql.execute('''insert into stb00_5555 values(%d, %d, %d,'test95.%s')'''
|
||||
% (1600000000000 + i, i, -10000+i, i))
|
||||
tdSql.execute('''insert into stb00_4444 values(%d, %d, %d,'test94.%s')'''
|
||||
tdSql.execute('''insert into stb00_4444 values(%d, %d, %d,'test94.%s')'''
|
||||
% (1600000000000 + i, i, -10000+i, i))
|
||||
tdSql.execute('''insert into stb00_3333 values(%d, %d, %d,'test93.%s')'''
|
||||
tdSql.execute('''insert into stb00_3333 values(%d, %d, %d,'test93.%s')'''
|
||||
% (1600000000000 + i, i, -10000+i, i))
|
||||
tdSql.execute('''insert into stb00_2222 values(%d, %d, %d,'test92.%s')'''
|
||||
tdSql.execute('''insert into stb00_2222 values(%d, %d, %d,'test92.%s')'''
|
||||
% (1600000000000 + i, i, -10000+i, i))
|
||||
tdSql.execute('''insert into stb00_1111 values(%d, %d, %d,'test91.%s')'''
|
||||
tdSql.execute('''insert into stb00_1111 values(%d, %d, %d,'test91.%s')'''
|
||||
% (1600000000000 + i, i, -10000+i, i))
|
||||
tdSql.execute('''insert into stb00_100 values(%d, %d, %d,'test90.%s')'''
|
||||
tdSql.execute('''insert into stb00_100 values(%d, %d, %d,'test90.%s')'''
|
||||
% (1600000000000 + i, i, -10000+i, i))
|
||||
tdSql.query("select * from stb0 where col2 like 'test99%' ")
|
||||
tdSql.query("select * from stb0 where c2 like 'test99%' ")
|
||||
tdSql.checkRows(1000)
|
||||
tdSql.query("select * from stb0 where tbname like 'stb00_9999' limit 10" )
|
||||
tdSql.checkData(0, 1, 0)
|
||||
|
@ -86,7 +86,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 1, 5)
|
||||
tdSql.checkData(1, 1, 6)
|
||||
tdSql.checkData(2, 1, 7)
|
||||
tdSql.query("select * from stb0 where col2 like 'test98%' ")
|
||||
tdSql.query("select * from stb0 where c2 like 'test98%' ")
|
||||
tdSql.checkRows(1000)
|
||||
tdSql.query("select * from stb0 where tbname like 'stb00_8888' limit 10" )
|
||||
tdSql.checkData(0, 1, 0)
|
||||
|
@ -96,7 +96,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 1, 5)
|
||||
tdSql.checkData(1, 1, 6)
|
||||
tdSql.checkData(2, 1, 7)
|
||||
tdSql.query("select * from stb0 where col2 like 'test97%' ")
|
||||
tdSql.query("select * from stb0 where c2 like 'test97%' ")
|
||||
tdSql.checkRows(1000)
|
||||
tdSql.query("select * from stb0 where tbname like 'stb00_7777' limit 10" )
|
||||
tdSql.checkData(0, 1, 0)
|
||||
|
@ -106,7 +106,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 1, 5)
|
||||
tdSql.checkData(1, 1, 6)
|
||||
tdSql.checkData(2, 1, 7)
|
||||
tdSql.query("select * from stb0 where col2 like 'test96%' ")
|
||||
tdSql.query("select * from stb0 where c2 like 'test96%' ")
|
||||
tdSql.checkRows(1000)
|
||||
tdSql.query("select * from stb0 where tbname like 'stb00_6666' limit 10" )
|
||||
tdSql.checkData(0, 1, 0)
|
||||
|
@ -116,7 +116,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 1, 5)
|
||||
tdSql.checkData(1, 1, 6)
|
||||
tdSql.checkData(2, 1, 7)
|
||||
tdSql.query("select * from stb0 where col2 like 'test95%' ")
|
||||
tdSql.query("select * from stb0 where c2 like 'test95%' ")
|
||||
tdSql.checkRows(1000)
|
||||
tdSql.query("select * from stb0 where tbname like 'stb00_5555' limit 10" )
|
||||
tdSql.checkData(0, 1, 0)
|
||||
|
@ -126,7 +126,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 1, 5)
|
||||
tdSql.checkData(1, 1, 6)
|
||||
tdSql.checkData(2, 1, 7)
|
||||
tdSql.query("select * from stb0 where col2 like 'test94%' ")
|
||||
tdSql.query("select * from stb0 where c2 like 'test94%' ")
|
||||
tdSql.checkRows(1000)
|
||||
tdSql.query("select * from stb0 where tbname like 'stb00_4444' limit 10" )
|
||||
tdSql.checkData(0, 1, 0)
|
||||
|
@ -136,7 +136,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 1, 5)
|
||||
tdSql.checkData(1, 1, 6)
|
||||
tdSql.checkData(2, 1, 7)
|
||||
tdSql.query("select * from stb0 where col2 like 'test93%' ")
|
||||
tdSql.query("select * from stb0 where c2 like 'test93%' ")
|
||||
tdSql.checkRows(1000)
|
||||
tdSql.query("select * from stb0 where tbname like 'stb00_3333' limit 100" )
|
||||
tdSql.checkData(0, 1, 0)
|
||||
|
@ -146,7 +146,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 1, 5)
|
||||
tdSql.checkData(1, 1, 6)
|
||||
tdSql.checkData(2, 1, 7)
|
||||
tdSql.query("select * from stb0 where col2 like 'test92%' ")
|
||||
tdSql.query("select * from stb0 where c2 like 'test92%' ")
|
||||
tdSql.checkRows(1000)
|
||||
tdSql.query("select * from stb0 where tbname like 'stb00_2222' limit 100" )
|
||||
tdSql.checkData(0, 1, 0)
|
||||
|
@ -156,7 +156,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 1, 5)
|
||||
tdSql.checkData(1, 1, 6)
|
||||
tdSql.checkData(2, 1, 7)
|
||||
tdSql.query("select * from stb0 where col2 like 'test91%' ")
|
||||
tdSql.query("select * from stb0 where c2 like 'test91%' ")
|
||||
tdSql.checkRows(1000)
|
||||
tdSql.query("select * from stb0 where tbname like 'stb00_1111' limit 100" )
|
||||
tdSql.checkData(0, 1, 0)
|
||||
|
@ -166,7 +166,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 1, 5)
|
||||
tdSql.checkData(1, 1, 6)
|
||||
tdSql.checkData(2, 1, 7)
|
||||
tdSql.query("select * from stb0 where col2 like 'test90%' ")
|
||||
tdSql.query("select * from stb0 where c2 like 'test90%' ")
|
||||
tdSql.checkRows(1000)
|
||||
tdSql.query("select * from stb0 where tbname like 'stb00_100' limit 100" )
|
||||
tdSql.checkData(0, 1, 0)
|
||||
|
@ -178,10 +178,10 @@ class TDTestCase:
|
|||
tdSql.checkData(2, 1, 7)
|
||||
|
||||
|
||||
os.system("rm -rf tools/taosdemoAllTest/TD-4985/query-limit-offset.py.sql")
|
||||
|
||||
|
||||
|
||||
os.system("rm -rf tools/taosdemoAllTest/TD-4985/query-limit-offset.py.sql")
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
|
|
@ -23,7 +23,7 @@ class TDTestCase:
|
|||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
|
@ -39,7 +39,7 @@ class TDTestCase:
|
|||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
|
||||
def run(self):
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
|
@ -48,7 +48,7 @@ class TDTestCase:
|
|||
tdLog.info("taosd found in %s" % buildPath)
|
||||
binPath = buildPath+ "/build/bin/"
|
||||
|
||||
# insert: create one or mutiple tables per sql and insert multiple rows per sql
|
||||
# insert: create one or mutiple tables per sql and insert multiple rows per sql
|
||||
# test case for https://jira.taosdata.com:18080/browse/TD-5213
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json -y " % binPath)
|
||||
tdSql.execute("use db")
|
||||
|
@ -58,29 +58,29 @@ class TDTestCase:
|
|||
# tdSql.query("select * from stb_old")
|
||||
# tdSql.checkRows(10)
|
||||
# tdSql.checkCols(1024)
|
||||
|
||||
|
||||
# tdSql.query("select count (tbname) from stb_new")
|
||||
# tdSql.checkData(0, 0, 10)
|
||||
|
||||
# tdSql.query("select * from stb_new")
|
||||
# tdSql.checkRows(10)
|
||||
# tdSql.checkCols(4096)
|
||||
|
||||
|
||||
# tdLog.info("stop dnode to commit data to disk")
|
||||
# tdDnodes.stop(1)
|
||||
# tdDnodes.start(1)
|
||||
# tdDnodes.start(1)
|
||||
|
||||
#regular table
|
||||
sql = "create table tb(ts timestamp, "
|
||||
for i in range(1022):
|
||||
sql += "col%d binary(14), " % (i + 1)
|
||||
sql += "col1023 binary(22))"
|
||||
sql += "c%d binary(14), " % (i + 1)
|
||||
sql += "c1023 binary(22))"
|
||||
tdSql.execute(sql)
|
||||
|
||||
for i in range(4):
|
||||
sql = "insert into tb values(%d, "
|
||||
for j in range(1022):
|
||||
str = "'%s', " % self.get_random_string(14)
|
||||
str = "'%s', " % self.get_random_string(14)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i))
|
||||
|
@ -94,19 +94,19 @@ class TDTestCase:
|
|||
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from tb")
|
||||
tdSql.checkData(0, 0, 4)
|
||||
tdSql.checkData(0, 0, 4)
|
||||
|
||||
|
||||
sql = "create table tb1(ts timestamp, "
|
||||
for i in range(4094):
|
||||
sql += "col%d binary(14), " % (i + 1)
|
||||
sql += "col4095 binary(22))"
|
||||
sql += "c%d binary(14), " % (i + 1)
|
||||
sql += "c4095 binary(22))"
|
||||
tdSql.execute(sql)
|
||||
|
||||
for i in range(4):
|
||||
sql = "insert into tb1 values(%d, "
|
||||
for j in range(4094):
|
||||
str = "'%s', " % self.get_random_string(14)
|
||||
str = "'%s', " % self.get_random_string(14)
|
||||
sql += str
|
||||
sql += "'%s')" % self.get_random_string(22)
|
||||
tdSql.execute(sql % (self.ts + i))
|
||||
|
@ -120,14 +120,14 @@ class TDTestCase:
|
|||
|
||||
time.sleep(1)
|
||||
tdSql.query("select count(*) from tb1")
|
||||
tdSql.checkData(0, 0, 4)
|
||||
|
||||
tdSql.checkData(0, 0, 4)
|
||||
|
||||
|
||||
|
||||
#os.system("rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql")
|
||||
|
||||
|
||||
|
||||
#os.system("rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql")
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
|
|
@ -23,7 +23,7 @@ class TDTestCase:
|
|||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
|
@ -39,7 +39,7 @@ class TDTestCase:
|
|||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
|
||||
def run(self):
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
|
@ -48,7 +48,7 @@ class TDTestCase:
|
|||
tdLog.info("taosd found in %s" % buildPath)
|
||||
binPath = buildPath+ "/build/bin/"
|
||||
|
||||
# insert: create one or mutiple tables per sql and insert multiple rows per sql
|
||||
# insert: create one or mutiple tables per sql and insert multiple rows per sql
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tnt1r.json -y " % binPath)
|
||||
tdSql.execute("use db")
|
||||
tdSql.query("select count (tbname) from stb0")
|
||||
|
@ -62,7 +62,7 @@ class TDTestCase:
|
|||
tdSql.query("select count(*) from stb01_1")
|
||||
tdSql.checkData(0, 0, 200)
|
||||
tdSql.query("select count(*) from stb1")
|
||||
tdSql.checkData(0, 0, 200000)
|
||||
tdSql.checkData(0, 0, 200000)
|
||||
|
||||
# restful connector insert data
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertRestful.json -y " % binPath)
|
||||
|
@ -81,7 +81,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 0, 200)
|
||||
|
||||
|
||||
# insert: create mutiple tables per sql and insert one rows per sql .
|
||||
# insert: create mutiple tables per sql and insert one rows per sql .
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tntmr.json -y " % binPath)
|
||||
tdSql.execute("use db")
|
||||
tdSql.query("select count (tbname) from stb0")
|
||||
|
@ -89,34 +89,34 @@ class TDTestCase:
|
|||
tdSql.query("select count (tbname) from stb1")
|
||||
tdSql.checkData(0, 0, 20)
|
||||
tdSql.query("select count(*) from stb00_0")
|
||||
tdSql.checkData(0, 0, 10000)
|
||||
tdSql.checkData(0, 0, 10000)
|
||||
tdSql.query("select count(*) from stb0")
|
||||
tdSql.checkData(0, 0, 100000)
|
||||
tdSql.checkData(0, 0, 100000)
|
||||
tdSql.query("select count(*) from stb01_0")
|
||||
tdSql.checkData(0, 0, 20000)
|
||||
tdSql.checkData(0, 0, 20000)
|
||||
tdSql.query("select count(*) from stb1")
|
||||
tdSql.checkData(0, 0, 400000)
|
||||
tdSql.checkData(0, 0, 400000)
|
||||
|
||||
# insert: using parament "insert_interval to controls spped of insert.
|
||||
# insert: using parament "insert_interval to controls spped of insert.
|
||||
# but We need to have accurate methods to control the speed, such as getting the speed value, checking the count and so on。
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insert-interval-speed.json -y" % binPath)
|
||||
tdSql.execute("use db")
|
||||
tdSql.query("show stables")
|
||||
tdSql.checkData(0, 4, 100)
|
||||
tdSql.query("select count(*) from stb00_0")
|
||||
tdSql.checkData(0, 0, 20000)
|
||||
tdSql.checkData(0, 0, 20000)
|
||||
tdSql.query("select count(*) from stb0")
|
||||
tdSql.checkData(0, 0, 2000000)
|
||||
tdSql.checkData(0, 0, 2000000)
|
||||
tdSql.query("show stables")
|
||||
tdSql.checkData(1, 4, 100)
|
||||
tdSql.query("select count(*) from stb01_0")
|
||||
tdSql.checkData(0, 0, 20000)
|
||||
tdSql.checkData(0, 0, 20000)
|
||||
tdSql.query("select count(*) from stb1")
|
||||
tdSql.checkData(0, 0, 2000000)
|
||||
|
||||
tdSql.checkData(0, 0, 2000000)
|
||||
|
||||
# spend 2min30s for 3 testcases.
|
||||
# insert: drop and child_table_exists combination test
|
||||
# insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset
|
||||
# insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insert-nodbnodrop.json -y" % binPath)
|
||||
tdSql.error("show dbno.stables")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insert-newdb.json -y" % binPath)
|
||||
|
@ -128,41 +128,41 @@ class TDTestCase:
|
|||
tdSql.query("select count (tbname) from stb2")
|
||||
tdSql.checkData(0, 0, 7)
|
||||
tdSql.query("select count (tbname) from stb3")
|
||||
tdSql.checkData(0, 0, 8)
|
||||
tdSql.checkData(0, 0, 8)
|
||||
tdSql.query("select count (tbname) from stb4")
|
||||
tdSql.checkData(0, 0, 8)
|
||||
tdSql.checkData(0, 0, 8)
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insert-offset.json -y" % binPath)
|
||||
tdSql.execute("use db")
|
||||
tdSql.execute("use db")
|
||||
tdSql.query("select count(*) from stb0")
|
||||
tdSql.checkData(0, 0, 50)
|
||||
tdSql.checkData(0, 0, 50)
|
||||
tdSql.query("select count(*) from stb1")
|
||||
tdSql.checkData(0, 0, 240)
|
||||
tdSql.checkData(0, 0, 240)
|
||||
tdSql.query("select count(*) from stb2")
|
||||
tdSql.checkData(0, 0, 220)
|
||||
tdSql.checkData(0, 0, 220)
|
||||
tdSql.query("select count(*) from stb3")
|
||||
tdSql.checkData(0, 0, 180)
|
||||
tdSql.query("select count(*) from stb4")
|
||||
tdSql.checkData(0, 0, 160)
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insert-newtable.json -y" % binPath)
|
||||
tdSql.execute("use db")
|
||||
tdSql.execute("use db")
|
||||
tdSql.query("select count(*) from stb0")
|
||||
tdSql.checkData(0, 0, 150)
|
||||
tdSql.checkData(0, 0, 150)
|
||||
tdSql.query("select count(*) from stb1")
|
||||
tdSql.checkData(0, 0, 360)
|
||||
tdSql.checkData(0, 0, 360)
|
||||
tdSql.query("select count(*) from stb2")
|
||||
tdSql.checkData(0, 0, 360)
|
||||
tdSql.checkData(0, 0, 360)
|
||||
tdSql.query("select count(*) from stb3")
|
||||
tdSql.checkData(0, 0, 340)
|
||||
tdSql.query("select count(*) from stb4")
|
||||
tdSql.checkData(0, 0, 400)
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insert-renewdb.json -y" % binPath)
|
||||
tdSql.execute("use db")
|
||||
tdSql.execute("use db")
|
||||
tdSql.query("select count(*) from stb0")
|
||||
tdSql.checkData(0, 0, 50)
|
||||
tdSql.checkData(0, 0, 50)
|
||||
tdSql.query("select count(*) from stb1")
|
||||
tdSql.checkData(0, 0, 120)
|
||||
tdSql.checkData(0, 0, 120)
|
||||
tdSql.query("select count(*) from stb2")
|
||||
tdSql.checkData(0, 0, 140)
|
||||
tdSql.checkData(0, 0, 140)
|
||||
tdSql.query("select count(*) from stb3")
|
||||
tdSql.checkData(0, 0, 160)
|
||||
tdSql.query("select count(*) from stb4")
|
||||
|
@ -170,59 +170,59 @@ class TDTestCase:
|
|||
|
||||
|
||||
# insert: let parament in json file is illegal, it'll expect error.
|
||||
tdSql.execute("drop database if exists db")
|
||||
tdSql.execute("drop database if exists db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsAndTagNumLarge1024.json -y " % binPath)
|
||||
tdSql.error("use db")
|
||||
tdSql.execute("drop database if exists db")
|
||||
tdSql.execute("drop database if exists db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertSigcolumnsNum1024.json -y " % binPath)
|
||||
tdSql.error("select * from db.stb0")
|
||||
tdSql.execute("drop database if exists db")
|
||||
tdSql.execute("drop database if exists db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsAndTagNum1024.json -y " % binPath)
|
||||
tdSql.query("select count(*) from db.stb0")
|
||||
tdSql.checkData(0, 0, 10000)
|
||||
tdSql.execute("drop database if exists db")
|
||||
tdSql.checkData(0, 0, 10000)
|
||||
tdSql.execute("drop database if exists db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json -y " % binPath)
|
||||
tdSql.query("select count(*) from db.stb0")
|
||||
tdSql.checkRows(0)
|
||||
tdSql.execute("drop database if exists db")
|
||||
tdSql.execute("drop database if exists db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsNum0.json -y " % binPath)
|
||||
tdSql.execute("use db")
|
||||
tdSql.execute("use db")
|
||||
tdSql.query("show stables like 'stb0%' ")
|
||||
tdSql.checkData(0, 2, 11)
|
||||
tdSql.execute("drop database if exists db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertTagsNumLarge128.json -y " % binPath)
|
||||
tdSql.error("use db1")
|
||||
tdSql.execute("drop database if exists db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json -y " % binPath)
|
||||
tdSql.query("select count(*) from db.stb0")
|
||||
tdSql.execute("drop database if exists db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertTagsNumLarge128.json -y " % binPath)
|
||||
tdSql.error("use db1")
|
||||
tdSql.execute("drop database if exists db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json -y " % binPath)
|
||||
tdSql.query("select count(*) from db.stb0")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select count(*) from db.stb1")
|
||||
tdSql.query("select count(*) from db.stb1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.error("select * from db.stb3")
|
||||
tdSql.error("select * from db.stb2")
|
||||
tdSql.execute("drop database if exists db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReq0.json -y " % binPath)
|
||||
tdSql.error("select count(*) from db.stb0")
|
||||
tdSql.execute("drop database if exists db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json -y " % binPath)
|
||||
tdSql.error("use db")
|
||||
tdSql.execute("drop database if exists db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTab0.json -y " % binPath)
|
||||
tdSql.error("use db")
|
||||
tdSql.execute("drop database if exists db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTabLess0.json -y " % binPath)
|
||||
tdSql.error("use db")
|
||||
tdSql.execute("drop database if exists blf")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json -y " % binPath)
|
||||
tdSql.execute("use blf")
|
||||
tdSql.query("select ts from blf.p_0_topics_7 limit 262800,1")
|
||||
tdSql.execute("drop database if exists db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReq0.json -y " % binPath)
|
||||
tdSql.error("select count(*) from db.stb0")
|
||||
tdSql.execute("drop database if exists db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json -y " % binPath)
|
||||
tdSql.error("use db")
|
||||
tdSql.execute("drop database if exists db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTab0.json -y " % binPath)
|
||||
tdSql.error("use db")
|
||||
tdSql.execute("drop database if exists db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertChildTabLess0.json -y " % binPath)
|
||||
tdSql.error("use db")
|
||||
tdSql.execute("drop database if exists blf")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertTimestepMulRowsLargeint16.json -y " % binPath)
|
||||
tdSql.execute("use blf")
|
||||
tdSql.query("select ts from blf.p_0_topics_7 limit 262800,1")
|
||||
tdSql.checkData(0, 0, "2020-03-31 12:00:00.000")
|
||||
tdSql.query("select first(ts) from blf.p_0_topics_2")
|
||||
tdSql.checkData(0, 0, "2019-10-01 00:00:00")
|
||||
tdSql.query("select last(ts) from blf.p_0_topics_6 ")
|
||||
tdSql.query("select last(ts) from blf.p_0_topics_6 ")
|
||||
tdSql.checkData(0, 0, "2020-09-29 23:59:00")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertMaxNumPerReq.json -y " % binPath)
|
||||
tdSql.execute("use db")
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insertMaxNumPerReq.json -y " % binPath)
|
||||
tdSql.execute("use db")
|
||||
tdSql.query("select count(*) from stb0")
|
||||
tdSql.checkData(0, 0, 5000000)
|
||||
tdSql.query("select count(*) from stb1")
|
||||
|
@ -230,7 +230,7 @@ class TDTestCase:
|
|||
|
||||
|
||||
|
||||
# insert: timestamp and step
|
||||
# insert: timestamp and step
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insert-timestep.json -y " % binPath)
|
||||
tdSql.execute("use db")
|
||||
tdSql.query("show stables")
|
||||
|
@ -239,13 +239,13 @@ class TDTestCase:
|
|||
tdSql.query("select count (tbname) from stb1")
|
||||
tdSql.checkData(0, 0, 20)
|
||||
tdSql.query("select last(ts) from db.stb00_0")
|
||||
tdSql.checkData(0, 0, "2020-10-01 00:00:00.019000")
|
||||
tdSql.checkData(0, 0, "2020-10-01 00:00:00.019000")
|
||||
tdSql.query("select count(*) from stb0")
|
||||
tdSql.checkData(0, 0, 200)
|
||||
tdSql.checkData(0, 0, 200)
|
||||
tdSql.query("select last(ts) from db.stb01_0")
|
||||
tdSql.checkData(0, 0, "2020-11-01 00:00:00.190000")
|
||||
tdSql.checkData(0, 0, "2020-11-01 00:00:00.190000")
|
||||
tdSql.query("select count(*) from stb1")
|
||||
tdSql.checkData(0, 0, 400)
|
||||
tdSql.checkData(0, 0, 400)
|
||||
|
||||
# # insert: disorder_ratio
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insert-disorder.json -g 2>&1 -y " % binPath)
|
||||
|
@ -255,14 +255,14 @@ class TDTestCase:
|
|||
tdSql.query("select count (tbname) from stb1")
|
||||
tdSql.checkData(0, 0, 1)
|
||||
tdSql.query("select count(*) from stb0")
|
||||
tdSql.checkData(0, 0, 10)
|
||||
tdSql.checkData(0, 0, 10)
|
||||
tdSql.query("select count(*) from stb1")
|
||||
tdSql.checkData(0, 0, 10)
|
||||
tdSql.checkData(0, 0, 10)
|
||||
|
||||
# insert: sample json
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insert-sample.json -y " % binPath)
|
||||
tdSql.execute("use dbtest123")
|
||||
tdSql.query("select col2 from stb0")
|
||||
tdSql.query("select c2 from stb0")
|
||||
tdSql.checkData(0, 0, 2147483647)
|
||||
tdSql.query("select * from stb1 where t1=-127")
|
||||
tdSql.checkRows(20)
|
||||
|
@ -271,13 +271,13 @@ class TDTestCase:
|
|||
tdSql.query("select * from stb1 where t2=126")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
# insert: test interlace parament
|
||||
# insert: test interlace parament
|
||||
os.system("%staosdemo -f tools/taosdemoAllTest/insert-interlace-row.json -y " % binPath)
|
||||
tdSql.execute("use db")
|
||||
tdSql.query("select count (tbname) from stb0")
|
||||
tdSql.checkData(0, 0, 100)
|
||||
tdSql.query("select count (*) from stb0")
|
||||
tdSql.checkData(0, 0, 15000)
|
||||
tdSql.checkData(0, 0, 15000)
|
||||
|
||||
|
||||
# # insert: auto_create
|
||||
|
@ -317,10 +317,10 @@ class TDTestCase:
|
|||
tdSql.checkRows(20)
|
||||
|
||||
os.system("rm -rf ./insert_res.txt")
|
||||
os.system("rm -rf tools/taosdemoAllTest/taosdemoTestInsertWithJson.py.sql")
|
||||
|
||||
|
||||
|
||||
os.system("rm -rf tools/taosdemoAllTest/taosdemoTestInsertWithJson.py.sql")
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
|
|
@ -59,11 +59,11 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 0, self.numberOfTables * self.numberOfRecords)
|
||||
|
||||
tdSql.query(
|
||||
"select sum(col1) from test.meters interval(1h) sliding(30m)")
|
||||
"select sum(c1) from test.meters interval(1h) sliding(30m)")
|
||||
tdSql.checkRows(2)
|
||||
|
||||
tdSql.query(
|
||||
"select apercentile(col1, 1) from test.meters interval(100s)")
|
||||
"select apercentile(c1, 1) from test.meters interval(100s)")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
tdSql.error("select loc, count(loc) from test.meters")
|
||||
|
|
|
@ -26,11 +26,11 @@ class TDTestCase:
|
|||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
global selfPath
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
|
@ -43,7 +43,7 @@ class TDTestCase:
|
|||
buildPath = root[:len(root)-len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
|
||||
def run(self):
|
||||
|
||||
# set path para
|
||||
|
@ -62,9 +62,9 @@ class TDTestCase:
|
|||
os.system("rm -rf %s/sim/dnode1/data/mnode_bak/" % testPath)
|
||||
tdSql.execute("drop database if exists db2")
|
||||
os.system("%staosdemo -f wal/insertDataDb1.json -y " % binPath)
|
||||
tdSql.execute("drop database if exists db1")
|
||||
tdSql.execute("drop database if exists db1")
|
||||
os.system("%staosdemo -f wal/insertDataDb2.json -y " % binPath)
|
||||
tdSql.execute("drop table if exists db2.stb0")
|
||||
tdSql.execute("drop table if exists db2.stb0")
|
||||
os.system("%staosdemo -f wal/insertDataDb2Newstab.json -y " % binPath)
|
||||
query_pid1 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1])
|
||||
print(query_pid1)
|
||||
|
@ -72,14 +72,14 @@ class TDTestCase:
|
|||
tdSql.execute("drop table if exists stb1_0")
|
||||
tdSql.execute("drop table if exists stb1_1")
|
||||
tdSql.execute("insert into stb0_0 values(1614218412000,8637,78.861045,'R','bf3')(1614218422000,8637,98.861045,'R','bf3')")
|
||||
tdSql.execute("alter table db2.stb0 add column col4 int")
|
||||
tdSql.execute("alter table db2.stb0 drop column col2")
|
||||
tdSql.execute("alter table db2.stb0 add tag t3 int;")
|
||||
tdSql.execute("alter table db2.stb0 add column c4 int")
|
||||
tdSql.execute("alter table db2.stb0 drop column c2")
|
||||
tdSql.execute("alter table db2.stb0 add tag t3 int;")
|
||||
tdSql.execute("alter table db2.stb0 drop tag t1")
|
||||
tdSql.execute("create table if not exists stb2_0 (ts timestamp, col0 int, col1 float) ")
|
||||
tdSql.execute("create table if not exists stb2_0 (ts timestamp, c0 int, c1 float) ")
|
||||
tdSql.execute("insert into stb2_0 values(1614218412000,8637,78.861045)")
|
||||
tdSql.execute("alter table stb2_0 add column col2 binary(4)")
|
||||
tdSql.execute("alter table stb2_0 drop column col1")
|
||||
tdSql.execute("alter table stb2_0 add column c2 binary(4)")
|
||||
tdSql.execute("alter table stb2_0 drop column c1")
|
||||
tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')")
|
||||
|
||||
# stop taosd and compact wal file
|
||||
|
@ -87,9 +87,9 @@ class TDTestCase:
|
|||
sleep(10)
|
||||
os.system("nohup %s/taosd --compact-mnode-wal -c %s/sim/dnode1/cfg/ & " %(binPath,testPath) )
|
||||
sleep(5)
|
||||
assert os.path.exists(walFilePath) , "%s is not generated, compact didn't take effect " % walFilePath
|
||||
assert os.path.exists(walFilePath) , "%s is not generated, compact didn't take effect " % walFilePath
|
||||
|
||||
# use new wal file to start taosd
|
||||
# use new wal file to start taosd
|
||||
tdDnodes.start(1)
|
||||
sleep(5)
|
||||
tdSql.execute("reset query cache")
|
||||
|
@ -108,14 +108,14 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 0, 2)
|
||||
tdSql.query("select count(*) from stb2_0")
|
||||
tdSql.checkData(0, 0, 2)
|
||||
|
||||
|
||||
# delete useless file
|
||||
testcaseFilename = os.path.split(__file__)[-1]
|
||||
os.system("rm -rf ./insert_res.txt")
|
||||
os.system("rm -rf wal/%s.sql" % testcaseFilename )
|
||||
|
||||
|
||||
|
||||
os.system("rm -rf wal/%s.sql" % testcaseFilename)
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
|
Loading…
Reference in New Issue