Merge branch '3.0' of https://github.com/taosdata/TDengine into 3.0
This commit is contained in:
commit
1a3631e0af
|
@ -35,7 +35,6 @@ extern "C" {
|
|||
#define CACHESCAN_RETRIEVE_TYPE_SINGLE 0x2
|
||||
#define CACHESCAN_RETRIEVE_LAST_ROW 0x4
|
||||
#define CACHESCAN_RETRIEVE_LAST 0x8
|
||||
#define CACHESCAN_RETRIEVE_PK 0x10
|
||||
|
||||
#define META_READER_LOCK 0x0
|
||||
#define META_READER_NOLOCK 0x1
|
||||
|
|
|
@ -894,35 +894,14 @@ typedef enum {
|
|||
READER_EXEC_ROWS = 0x2,
|
||||
} EExecMode;
|
||||
|
||||
#define LAST_COL_VERSION (0x1)
|
||||
|
||||
typedef struct {
|
||||
SRowKey rowKey;
|
||||
int8_t dirty;
|
||||
SColVal colVal;
|
||||
} SLastCol;
|
||||
|
||||
typedef struct {
|
||||
union {
|
||||
int64_t val;
|
||||
struct {
|
||||
uint8_t *pData;
|
||||
uint32_t nData;
|
||||
};
|
||||
};
|
||||
} SValueV0;
|
||||
|
||||
typedef struct {
|
||||
int16_t cid;
|
||||
int8_t type;
|
||||
int8_t flag;
|
||||
SValueV0 value;
|
||||
} SColValV0;
|
||||
|
||||
typedef struct {
|
||||
TSKEY ts;
|
||||
int8_t dirty;
|
||||
SColValV0 colVal;
|
||||
} SLastColV0;
|
||||
|
||||
int32_t tsdbOpenCache(STsdb *pTsdb);
|
||||
void tsdbCloseCache(STsdb *pTsdb);
|
||||
int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *row);
|
||||
|
|
|
@ -130,20 +130,14 @@ static void tsdbClosePgCache(STsdb *pTsdb) {
|
|||
enum {
|
||||
LFLAG_LAST_ROW = 0,
|
||||
LFLAG_LAST = 1,
|
||||
LFLAG_VERSION = 1 << 2,
|
||||
LFLAG_VERSION_BITS = (1 << 2 | 1 << 3),
|
||||
LFLAG_PRIMARY_KEY = CACHESCAN_RETRIEVE_PK,
|
||||
};
|
||||
|
||||
#define LAST_KEY_HAS_VERSION ((k).lflag & LFLAG_VERSION_BITS)
|
||||
|
||||
typedef struct {
|
||||
tb_uid_t uid;
|
||||
int16_t cid;
|
||||
int8_t lflag;
|
||||
} SLastKey;
|
||||
|
||||
#define HAS_PRIMARY_KEY(k) (((k).lflag & LFLAG_PRIMARY_KEY) == LFLAG_PRIMARY_KEY)
|
||||
#define IS_LAST_ROW_KEY(k) (((k).lflag & LFLAG_LAST) == LFLAG_LAST_ROW)
|
||||
#define IS_LAST_KEY(k) (((k).lflag & LFLAG_LAST) == LFLAG_LAST)
|
||||
|
||||
|
@ -336,142 +330,163 @@ static void rocksMayWrite(STsdb *pTsdb, bool force, bool read, bool lock) {
|
|||
}
|
||||
}
|
||||
|
||||
// note: new object do not own colVal's resource, just copy the pointer
|
||||
static SLastCol *tsdbCacheConvertLastColV0(SLastColV0 *pLastColV0) {
|
||||
SLastCol *pLastCol = taosMemoryCalloc(1, sizeof(SLastCol));
|
||||
if (pLastCol == NULL) return NULL;
|
||||
typedef struct {
|
||||
TSKEY ts;
|
||||
int8_t dirty;
|
||||
struct {
|
||||
int16_t cid;
|
||||
int8_t type;
|
||||
int8_t flag;
|
||||
union {
|
||||
int64_t val;
|
||||
struct {
|
||||
uint32_t nData;
|
||||
uint8_t *pData;
|
||||
};
|
||||
} value;
|
||||
} colVal;
|
||||
} SLastColV0;
|
||||
|
||||
static int32_t tsdbCacheDeserializeV0(char const *value, SLastCol *pLastCol) {
|
||||
SLastColV0 *pLastColV0 = (SLastColV0 *)value;
|
||||
|
||||
pLastCol->rowKey.ts = pLastColV0->ts;
|
||||
pLastCol->rowKey.numOfPKs = 0;
|
||||
pLastCol->dirty = pLastColV0->dirty;
|
||||
pLastCol->colVal.cid = pLastColV0->colVal.cid;
|
||||
pLastCol->colVal.flag = pLastColV0->colVal.flag;
|
||||
pLastCol->colVal.value.type = pLastColV0->colVal.type;
|
||||
pLastCol->colVal.value.val = pLastColV0->colVal.value.val;
|
||||
|
||||
return pLastCol;
|
||||
if (IS_VAR_DATA_TYPE(pLastCol->colVal.value.type)) {
|
||||
pLastCol->colVal.value.nData = pLastColV0->colVal.value.nData;
|
||||
pLastCol->colVal.value.pData = (uint8_t *)(&pLastColV0[1]);
|
||||
return sizeof(SLastColV0) + pLastColV0->colVal.value.nData;
|
||||
} else {
|
||||
pLastCol->colVal.value.val = pLastColV0->colVal.value.val;
|
||||
return sizeof(SLastColV0);
|
||||
}
|
||||
}
|
||||
|
||||
static SLastCol *tsdbCacheDeserializeV0(char const *value) {
|
||||
static SLastCol *tsdbCacheDeserialize(char const *value, size_t size) {
|
||||
if (!value) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SLastCol* pLastCol = taosMemoryCalloc(1, sizeof(SLastCol));
|
||||
if (NULL == pLastCol) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int32_t offset = tsdbCacheDeserializeV0(value, pLastCol);
|
||||
if (offset == size) {
|
||||
// version 0
|
||||
return pLastCol;
|
||||
} else if (offset > size) {
|
||||
terrno = TSDB_CODE_INVALID_DATA_FMT;
|
||||
taosMemoryFreeClear(pLastCol);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// version
|
||||
int8_t version = *(int8_t *)(value + offset);
|
||||
offset += sizeof(int8_t);
|
||||
|
||||
// numOfPKs
|
||||
pLastCol->rowKey.numOfPKs = *(uint8_t *)(value + offset);
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
// pks
|
||||
for (int32_t i = 0; i < pLastCol->rowKey.numOfPKs; i++) {
|
||||
pLastCol->rowKey.pks[i] = *(SValue *)(value + offset);
|
||||
offset += sizeof(SValue);
|
||||
|
||||
if (IS_VAR_DATA_TYPE(pLastCol->rowKey.pks[i].type)) {
|
||||
pLastCol->rowKey.pks[i].pData = (uint8_t *)value + offset;
|
||||
offset += pLastCol->rowKey.pks[i].nData;
|
||||
}
|
||||
}
|
||||
|
||||
if (offset > size) {
|
||||
terrno = TSDB_CODE_INVALID_DATA_FMT;
|
||||
taosMemoryFreeClear(pLastCol);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return pLastCol;
|
||||
}
|
||||
|
||||
/*
|
||||
typedef struct {
|
||||
SLastColV0 lastColV0;
|
||||
char colData[];
|
||||
int8_t version;
|
||||
uint8_t numOfPKs;
|
||||
SValue pks[0];
|
||||
char pk0Data[];
|
||||
SValue pks[1];
|
||||
char pk1Data[];
|
||||
...
|
||||
} SLastColDisk;
|
||||
*/
|
||||
static int32_t tsdbCacheSerializeV0(char const *value, SLastCol *pLastCol) {
|
||||
SLastColV0 *pLastColV0 = (SLastColV0 *)value;
|
||||
SColValV0 *pColVal = &pLastColV0->colVal;
|
||||
if (IS_VAR_DATA_TYPE(pColVal->type)) {
|
||||
if (pColVal->value.nData > 0) {
|
||||
pColVal->value.pData = (char *)value + sizeof(*pLastColV0);
|
||||
} else {
|
||||
pColVal->value.pData = NULL;
|
||||
|
||||
pLastColV0->ts = pLastCol->rowKey.ts;
|
||||
pLastColV0->dirty = pLastCol->dirty;
|
||||
pLastColV0->colVal.cid = pLastCol->colVal.cid;
|
||||
pLastColV0->colVal.flag = pLastCol->colVal.flag;
|
||||
pLastColV0->colVal.type = pLastCol->colVal.value.type;
|
||||
if (IS_VAR_DATA_TYPE(pLastCol->colVal.value.type)) {
|
||||
pLastColV0->colVal.value.nData = pLastCol->colVal.value.nData;
|
||||
if (pLastCol->colVal.value.nData > 0) {
|
||||
memcpy(&pLastColV0[1], pLastCol->colVal.value.pData, pLastCol->colVal.value.nData);
|
||||
}
|
||||
return sizeof(SLastColV0) + pLastCol->colVal.value.nData;
|
||||
} else {
|
||||
pLastColV0->colVal.value.val = pLastCol->colVal.value.val;
|
||||
return sizeof(SLastColV0);
|
||||
}
|
||||
|
||||
return tsdbCacheConvertLastColV0(pLastColV0);
|
||||
}
|
||||
|
||||
static SLastCol *tsdbCacheDeserializeV1(char const *value) {
|
||||
if (!value) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SLastCol *pLastCol = taosMemoryMalloc(sizeof(SLastCol));
|
||||
*pLastCol = *(SLastCol *)(value);
|
||||
|
||||
char *currentPos = (char *)value + sizeof(*pLastCol);
|
||||
for (int8_t i = 0; i < pLastCol->rowKey.numOfPKs; i++) {
|
||||
SValue *pValue = &pLastCol->rowKey.pks[i];
|
||||
if (IS_VAR_DATA_TYPE(pValue->type)) {
|
||||
if (pValue->nData > 0) {
|
||||
pValue->pData = currentPos;
|
||||
currentPos += pValue->nData;
|
||||
} else {
|
||||
pValue->pData = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SColVal *pColVal = &pLastCol->colVal;
|
||||
if (IS_VAR_DATA_TYPE(pColVal->value.type)) {
|
||||
if (pColVal->value.nData > 0) {
|
||||
pColVal->value.pData = currentPos;
|
||||
currentPos += pColVal->value.nData;
|
||||
} else {
|
||||
pColVal->value.pData = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return pLastCol;
|
||||
}
|
||||
|
||||
static SLastCol *tsdbCacheDeserialize(char const *value, int8_t lflag) {
|
||||
if (!value) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int8_t version = lflag & LFLAG_VERSION_BITS;
|
||||
|
||||
SLastCol *lastCol = NULL;
|
||||
switch (version) {
|
||||
case 0:
|
||||
lastCol = tsdbCacheDeserializeV0(value);
|
||||
break;
|
||||
case LFLAG_VERSION:
|
||||
lastCol = tsdbCacheDeserializeV1(value);
|
||||
break;
|
||||
defalut:
|
||||
tsdbError("invalid last key version %" PRId8 " , lflag:%" PRId8, version, lflag);
|
||||
break;
|
||||
}
|
||||
return lastCol;
|
||||
}
|
||||
|
||||
static uint32_t tsdbCacheCopyVarData(SValue *from, SValue *to) {
|
||||
ASSERT(from->nData >= 0);
|
||||
if (from->nData > 0) {
|
||||
memcpy(to->pData, from->pData, from->nData);
|
||||
}
|
||||
to->type = from->type;
|
||||
to->nData = from->nData;
|
||||
return from->nData;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tsdbCacheSerialize(SLastCol *pLastCol, char **value, size_t *size) {
|
||||
SColVal *pColVal = &pLastCol->colVal;
|
||||
size_t length = sizeof(*pLastCol);
|
||||
*size = sizeof(SLastColV0);
|
||||
if (IS_VAR_DATA_TYPE(pLastCol->colVal.value.type)) {
|
||||
*size += pLastCol->colVal.value.nData;
|
||||
}
|
||||
*size += sizeof(uint8_t) + sizeof(uint8_t); // version + numOfPKs
|
||||
|
||||
for (int8_t i = 0; i < pLastCol->rowKey.numOfPKs; i++) {
|
||||
*size += sizeof(SValue);
|
||||
if (IS_VAR_DATA_TYPE(pLastCol->rowKey.pks[i].type)) {
|
||||
length += pLastCol->rowKey.pks[i].nData;
|
||||
*size += pLastCol->rowKey.pks[i].nData;
|
||||
}
|
||||
}
|
||||
if (IS_VAR_DATA_TYPE(pColVal->value.type)) {
|
||||
length += pColVal->value.nData;
|
||||
}
|
||||
|
||||
*value = taosMemoryMalloc(length);
|
||||
*value = taosMemoryMalloc(*size);
|
||||
|
||||
// copy last col
|
||||
SLastCol *pToLastCol = (SLastCol *)(*value);
|
||||
*pToLastCol = *pLastCol;
|
||||
char *currentPos = *value + sizeof(*pLastCol);
|
||||
int32_t offset = tsdbCacheSerializeV0(*value, pLastCol);
|
||||
|
||||
// copy var data pks
|
||||
// version
|
||||
((uint8_t *)(*value + offset))[0] = LAST_COL_VERSION;
|
||||
offset++;
|
||||
|
||||
// numOfPKs
|
||||
((uint8_t *)(*value + offset))[0] = pLastCol->rowKey.numOfPKs;
|
||||
offset++;
|
||||
|
||||
// pks
|
||||
for (int8_t i = 0; i < pLastCol->rowKey.numOfPKs; i++) {
|
||||
SValue *pFromValue = &pLastCol->rowKey.pks[i];
|
||||
if (IS_VAR_DATA_TYPE(pFromValue->type)) {
|
||||
SValue *pToValue = &pToLastCol->rowKey.pks[i];
|
||||
pToValue->pData = (pFromValue->nData == 0) ? NULL : currentPos;
|
||||
currentPos += tsdbCacheCopyVarData(pFromValue, pToValue);
|
||||
((SValue *)(*value + offset))[0] = pLastCol->rowKey.pks[i];
|
||||
offset += sizeof(SValue);
|
||||
if (IS_VAR_DATA_TYPE(pLastCol->rowKey.pks[i].type)) {
|
||||
if (pLastCol->rowKey.pks[i].nData > 0) {
|
||||
memcpy(*value + offset, pLastCol->rowKey.pks[i].pData, pLastCol->rowKey.pks[i].nData);
|
||||
}
|
||||
offset += pLastCol->rowKey.pks[i].nData;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// copy var data value
|
||||
if (IS_VAR_DATA_TYPE(pColVal->value.type)) {
|
||||
SValue *pFromValue = &pColVal->value;
|
||||
SValue *pToValue = &pToLastCol->colVal.value;
|
||||
pToValue->pData = (pFromValue->nData == 0) ? NULL : currentPos;
|
||||
currentPos += tsdbCacheCopyVarData(pFromValue, pToValue);
|
||||
}
|
||||
*size = length;
|
||||
}
|
||||
|
||||
static void tsdbCachePutBatch(SLastCol *pLastCol, const void *key, size_t klen, SCacheFlushState *state) {
|
||||
|
@ -603,7 +618,7 @@ static int32_t tsdbCacheNewTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, i
|
|||
charge += pLastCol->colVal.value.nData;
|
||||
}
|
||||
|
||||
SLastKey *pLastKey = &(SLastKey){.lflag = lflag | LFLAG_VERSION, .uid = uid, .cid = cid};
|
||||
SLastKey *pLastKey = &(SLastKey){.lflag = lflag, .uid = uid, .cid = cid};
|
||||
LRUStatus status = taosLRUCacheInsert(pCache, pLastKey, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter, NULL,
|
||||
TAOS_LRU_PRIORITY_LOW, &pTsdb->flushState);
|
||||
if (status != TAOS_LRU_STATUS_OK) {
|
||||
|
@ -653,11 +668,9 @@ static int32_t tsdbCacheDropTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid,
|
|||
size_t *keys_list_sizes = taosMemoryCalloc(2, sizeof(size_t));
|
||||
const size_t klen = ROCKS_KEY_LEN;
|
||||
|
||||
int8_t lflag = hasPrimaryKey ? LFLAG_PRIMARY_KEY : 0;
|
||||
|
||||
char *keys = taosMemoryCalloc(2, sizeof(SLastKey));
|
||||
((SLastKey *)keys)[0] = (SLastKey){.lflag = lflag | LFLAG_LAST | LFLAG_VERSION, .uid = uid, .cid = cid};
|
||||
((SLastKey *)keys)[1] = (SLastKey){.lflag = lflag | LFLAG_LAST_ROW | LFLAG_VERSION, .uid = uid, .cid = cid};
|
||||
((SLastKey *)keys)[0] = (SLastKey){.lflag = LFLAG_LAST, .uid = uid, .cid = cid};
|
||||
((SLastKey *)keys)[1] = (SLastKey){.lflag = LFLAG_LAST_ROW, .uid = uid, .cid = cid};
|
||||
|
||||
keys_list[0] = keys;
|
||||
keys_list[1] = keys + sizeof(SLastKey);
|
||||
|
@ -681,13 +694,13 @@ static int32_t tsdbCacheDropTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid,
|
|||
|
||||
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
|
||||
{
|
||||
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[0], ((SLastKey*)keys_list[0])->lflag);
|
||||
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[0], values_list_sizes[0]);
|
||||
if (NULL != pLastCol) {
|
||||
rocksdb_writebatch_delete(wb, keys_list[0], klen);
|
||||
}
|
||||
taosMemoryFreeClear(pLastCol);
|
||||
|
||||
pLastCol = tsdbCacheDeserialize(values_list[1], ((SLastKey*)keys_list[1])->lflag);
|
||||
pLastCol = tsdbCacheDeserialize(values_list[1], values_list_sizes[1]);
|
||||
if (NULL != pLastCol) {
|
||||
rocksdb_writebatch_delete(wb, keys_list[1], klen);
|
||||
}
|
||||
|
@ -733,18 +746,12 @@ int32_t tsdbCacheNewTable(STsdb *pTsdb, tb_uid_t uid, tb_uid_t suid, SSchemaWrap
|
|||
taosThreadMutexLock(&pTsdb->lruMutex);
|
||||
|
||||
if (suid < 0) {
|
||||
int8_t lflag = 0;
|
||||
int nCols = pSchemaRow->nCols;
|
||||
if (nCols >= 2) {
|
||||
lflag = (pSchemaRow->pSchema[1].flags & COL_IS_KEY) ? LFLAG_PRIMARY_KEY : 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < nCols; ++i) {
|
||||
for (int i = 0; i < pSchemaRow->nCols; ++i) {
|
||||
int16_t cid = pSchemaRow->pSchema[i].colId;
|
||||
int8_t col_type = pSchemaRow->pSchema[i].type;
|
||||
|
||||
(void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, lflag | LFLAG_LAST_ROW);
|
||||
(void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, lflag | LFLAG_LAST);
|
||||
(void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, LFLAG_LAST_ROW);
|
||||
(void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, LFLAG_LAST);
|
||||
}
|
||||
} else {
|
||||
STSchema *pTSchema = NULL;
|
||||
|
@ -754,18 +761,12 @@ int32_t tsdbCacheNewTable(STsdb *pTsdb, tb_uid_t uid, tb_uid_t suid, SSchemaWrap
|
|||
return -1;
|
||||
}
|
||||
|
||||
int8_t lflag = 0;
|
||||
int nCols = pTSchema->numOfCols;
|
||||
if (nCols >= 2) {
|
||||
lflag = (pTSchema->columns[1].flags & COL_IS_KEY) ? LFLAG_PRIMARY_KEY : 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < nCols; ++i) {
|
||||
for (int i = 0; i < pTSchema->numOfCols; ++i) {
|
||||
int16_t cid = pTSchema->columns[i].colId;
|
||||
int8_t col_type = pTSchema->columns[i].type;
|
||||
|
||||
(void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, lflag | LFLAG_LAST_ROW);
|
||||
(void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, lflag | LFLAG_LAST);
|
||||
(void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, LFLAG_LAST_ROW);
|
||||
(void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, LFLAG_LAST);
|
||||
}
|
||||
|
||||
taosMemoryFree(pTSchema);
|
||||
|
@ -1024,14 +1025,13 @@ int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow
|
|||
STsdbRowKey tsdbRowKey = {0};
|
||||
tsdbRowGetKey(pRow, &tsdbRowKey);
|
||||
SRowKey *pRowKey = &tsdbRowKey.key;
|
||||
int8_t lflag = (pRowKey->numOfPKs != 0) ? LFLAG_PRIMARY_KEY : 0;
|
||||
|
||||
taosThreadMutexLock(&pTsdb->lruMutex);
|
||||
for (int i = 0; i < num_keys; ++i) {
|
||||
SColVal *pColVal = (SColVal *)taosArrayGet(aColVal, i);
|
||||
int16_t cid = pColVal->cid;
|
||||
|
||||
SLastKey *key = &(SLastKey){.lflag = lflag | LFLAG_LAST_ROW | LFLAG_VERSION, .uid = uid, .cid = cid};
|
||||
SLastKey *key = &(SLastKey){.lflag = LFLAG_LAST_ROW, .uid = uid, .cid = cid};
|
||||
size_t klen = ROCKS_KEY_LEN;
|
||||
LRUHandle *h = taosLRUCacheLookup(pCache, key, klen);
|
||||
if (h) {
|
||||
|
@ -1049,7 +1049,7 @@ int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow
|
|||
}
|
||||
|
||||
if (COL_VAL_IS_VALUE(pColVal)) {
|
||||
key->lflag = lflag | LFLAG_LAST | LFLAG_VERSION;
|
||||
key->lflag = LFLAG_LAST;
|
||||
LRUHandle *h = taosLRUCacheLookup(pCache, key, klen);
|
||||
if (h) {
|
||||
SLastCol *pLastCol = (SLastCol *)taosLRUCacheValue(pCache, h);
|
||||
|
@ -1094,7 +1094,7 @@ int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow
|
|||
SColVal *pColVal = (SColVal *)TARRAY_DATA(aColVal) + idxKey->idx;
|
||||
// SColVal *pColVal = (SColVal *)taosArrayGet(aColVal, idxKey->idx);
|
||||
|
||||
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i], ((SLastKey*)keys_list[i])->lflag);
|
||||
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i], values_list_sizes[i]);
|
||||
SLastCol *PToFree = pLastCol;
|
||||
|
||||
if (IS_LAST_ROW_KEY(idxKey->key)) {
|
||||
|
@ -1106,8 +1106,8 @@ int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow
|
|||
if (NULL == pLastCol || cmp_res < 0 || (cmp_res == 0 && !COL_VAL_IS_NONE(pColVal))) {
|
||||
char *value = NULL;
|
||||
size_t vlen = 0;
|
||||
tsdbCacheSerialize(&(SLastCol){.rowKey = *pRowKey, .colVal = *pColVal}, &value,
|
||||
&vlen);
|
||||
SLastCol lastColTmp = {.rowKey = *pRowKey, .colVal = *pColVal};
|
||||
tsdbCacheSerialize(&lastColTmp, &value, &vlen);
|
||||
|
||||
taosThreadMutexLock(&pTsdb->rCache.rMutex);
|
||||
|
||||
|
@ -1115,7 +1115,7 @@ int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow
|
|||
|
||||
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
|
||||
|
||||
pLastCol = (SLastCol *)value;
|
||||
pLastCol = &lastColTmp;
|
||||
SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol));
|
||||
*pTmpLastCol = *pLastCol;
|
||||
pLastCol = pTmpLastCol;
|
||||
|
@ -1147,7 +1147,8 @@ int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow
|
|||
if (NULL == pLastCol || (tRowKeyCompare(&pLastCol->rowKey, pRowKey) != 1)) {
|
||||
char *value = NULL;
|
||||
size_t vlen = 0;
|
||||
tsdbCacheSerialize(&(SLastCol){.rowKey = *pRowKey, .colVal = *pColVal}, &value, &vlen);
|
||||
SLastCol lastColTmp = {.rowKey = *pRowKey, .colVal = *pColVal};
|
||||
tsdbCacheSerialize(&lastColTmp, &value, &vlen);
|
||||
|
||||
taosThreadMutexLock(&pTsdb->rCache.rMutex);
|
||||
|
||||
|
@ -1155,7 +1156,7 @@ int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *pRow
|
|||
|
||||
taosThreadMutexUnlock(&pTsdb->rCache.rMutex);
|
||||
|
||||
pLastCol = (SLastCol *)value;
|
||||
pLastCol = &lastColTmp;
|
||||
SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol));
|
||||
*pTmpLastCol = *pLastCol;
|
||||
pLastCol = pTmpLastCol;
|
||||
|
@ -1401,7 +1402,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
|
|||
|
||||
SIdxKey *idxKey = taosArrayGet(remainCols, 0);
|
||||
if (idxKey->key.cid != PRIMARYKEY_TIMESTAMP_COL_ID) {
|
||||
SLastKey *key = &(SLastKey){.lflag = ltype | LFLAG_VERSION, .uid = uid, .cid = PRIMARYKEY_TIMESTAMP_COL_ID};
|
||||
SLastKey *key = &(SLastKey){.lflag = ltype, .uid = uid, .cid = PRIMARYKEY_TIMESTAMP_COL_ID};
|
||||
|
||||
taosArrayInsert(remainCols, 0, &(SIdxKey){0, *key});
|
||||
}
|
||||
|
@ -1570,7 +1571,7 @@ static int32_t tsdbCacheLoadFromRocks(STsdb *pTsdb, tb_uid_t uid, SArray *pLastA
|
|||
|
||||
SLRUCache *pCache = pTsdb->lruCache;
|
||||
for (int i = 0, j = 0; i < num_keys && j < TARRAY_SIZE(remainCols); ++i) {
|
||||
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i], ((SLastKey*)keys_list[i])->lflag);
|
||||
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i], values_list_sizes[i]);
|
||||
SLastCol *PToFree = pLastCol;
|
||||
SIdxKey *idxKey = &((SIdxKey *)TARRAY_DATA(remainCols))[j];
|
||||
if (pLastCol) {
|
||||
|
@ -1637,7 +1638,7 @@ int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCache
|
|||
for (int i = 0; i < num_keys; ++i) {
|
||||
int16_t cid = ((int16_t *)TARRAY_DATA(pCidList))[i];
|
||||
|
||||
SLastKey key = {.lflag = ltype | LFLAG_VERSION, .uid = uid, .cid = cid};
|
||||
SLastKey key = {.lflag = ltype, .uid = uid, .cid = cid};
|
||||
// for select last_row, last case
|
||||
int32_t funcType = FUNCTION_TYPE_CACHE_LAST;
|
||||
if (pr->pFuncTypeList != NULL && taosArrayGetSize(pr->pFuncTypeList) > i) {
|
||||
|
@ -1726,17 +1727,12 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE
|
|||
size_t *keys_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t));
|
||||
const size_t klen = ROCKS_KEY_LEN;
|
||||
|
||||
int8_t lflag = 0;
|
||||
if (num_keys >= 2) {
|
||||
lflag = (pTSchema->columns[1].flags & COL_IS_KEY) ? LFLAG_PRIMARY_KEY : 0;
|
||||
}
|
||||
|
||||
for (int i = 0; i < num_keys; ++i) {
|
||||
int16_t cid = pTSchema->columns[i].colId;
|
||||
|
||||
char *keys = taosMemoryCalloc(2, sizeof(SLastKey));
|
||||
((SLastKey *)keys)[0] = (SLastKey){.lflag = lflag | LFLAG_LAST | LFLAG_VERSION, .uid = uid, .cid = cid};
|
||||
((SLastKey *)keys)[1] = (SLastKey){.lflag = lflag | LFLAG_LAST_ROW | LFLAG_VERSION, .uid = uid, .cid = cid};
|
||||
((SLastKey *)keys)[0] = (SLastKey){.lflag = LFLAG_LAST, .uid = uid, .cid = cid};
|
||||
((SLastKey *)keys)[1] = (SLastKey){.lflag = LFLAG_LAST_ROW, .uid = uid, .cid = cid};
|
||||
|
||||
keys_list[i] = keys;
|
||||
keys_list[num_keys + i] = keys + sizeof(SLastKey);
|
||||
|
@ -1766,14 +1762,14 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE
|
|||
|
||||
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
|
||||
for (int i = 0; i < num_keys; ++i) {
|
||||
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i], ((SLastKey *)keys_list[i])->lflag);
|
||||
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i], values_list_sizes[i]);
|
||||
taosThreadMutexLock(&pTsdb->rCache.rMutex);
|
||||
if (NULL != pLastCol && (pLastCol->rowKey.ts <= eKey && pLastCol->rowKey.ts >= sKey)) {
|
||||
rocksdb_writebatch_delete(wb, keys_list[i], klen);
|
||||
}
|
||||
taosMemoryFreeClear(pLastCol);
|
||||
|
||||
pLastCol = tsdbCacheDeserialize(values_list[i + num_keys], ((SLastKey *)keys_list[i + num_keys])->lflag);
|
||||
pLastCol = tsdbCacheDeserialize(values_list[i + num_keys], values_list_sizes[i + num_keys]);
|
||||
if (NULL != pLastCol && (pLastCol->rowKey.ts <= eKey && pLastCol->rowKey.ts >= sKey)) {
|
||||
rocksdb_writebatch_delete(wb, keys_list[num_keys + i], klen);
|
||||
}
|
||||
|
|
|
@ -385,9 +385,6 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
|
|||
}
|
||||
|
||||
int8_t ltype = (pr->type & CACHESCAN_RETRIEVE_LAST) >> 3;
|
||||
if (pr->rowKey.numOfPKs > 0) {
|
||||
ltype |= CACHESCAN_RETRIEVE_PK;
|
||||
}
|
||||
|
||||
STableKeyInfo* pTableList = pr->pTableList;
|
||||
|
||||
|
|
|
@ -327,6 +327,7 @@ static int32_t abort_edit(STFileSystem *fs) {
|
|||
} else if (fs->etype == TSDB_FEDIT_MERGE) {
|
||||
current_fname(fs->tsdb, fname, TSDB_FCURRENT_M);
|
||||
} else {
|
||||
tsdbError("vgId:%d %s failed since invalid etype:%d", TD_VID(fs->tsdb->pVnode), __func__, fs->etype);
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
|
|
|
@ -666,6 +666,7 @@ int32_t vnodeSnapWriterOpen(SVnode *pVnode, SSnapshotParam *pParam, SVSnapWriter
|
|||
|
||||
_err:
|
||||
vError("vgId:%d, vnode snapshot writer open failed since %s", TD_VID(pVnode), tstrerror(code));
|
||||
if (pWriter) taosMemoryFreeClear(pWriter);
|
||||
*ppWriter = NULL;
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -1144,7 +1144,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t ver, void *pReq,
|
|||
if (i < tbNames->size - 1) {
|
||||
taosStringBuilderAppendChar(&sb, ',');
|
||||
}
|
||||
taosMemoryFreeClear(*key);
|
||||
//taosMemoryFreeClear(*key);
|
||||
}
|
||||
|
||||
size_t len = 0;
|
||||
|
@ -1168,7 +1168,7 @@ _exit:
|
|||
taosArrayDestroy(tbUids);
|
||||
tDecoderClear(&decoder);
|
||||
tEncoderClear(&encoder);
|
||||
taosArrayDestroy(tbNames);
|
||||
taosArrayDestroyP(tbNames, taosMemoryFree);
|
||||
return rcode;
|
||||
}
|
||||
|
||||
|
|
|
@ -160,9 +160,6 @@ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SRe
|
|||
// partition by tbname
|
||||
if (oneTableForEachGroup(pTableListInfo) || (totalTables == 1)) {
|
||||
pInfo->retrieveType = CACHESCAN_RETRIEVE_TYPE_ALL | SCAN_ROW_TYPE(pScanNode->ignoreNull);
|
||||
if (pInfo->numOfPks > 0) {
|
||||
pInfo->retrieveType |= CACHESCAN_RETRIEVE_PK;
|
||||
}
|
||||
|
||||
STableKeyInfo* pList = tableListGetInfo(pTableListInfo, 0);
|
||||
|
||||
|
|
|
@ -485,6 +485,30 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_and_last_row.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_and_last_row.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_and_last_row.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last+last_row.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last+last_row.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last+last_row.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last+last_row.py -Q 4
|
||||
#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_1.py
|
||||
#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_1.py -R
|
||||
#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_1.py -Q 2
|
||||
#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_1.py -Q 3
|
||||
#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_1.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_2.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_2.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_2.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_2.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_2.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_3.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_3.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_3.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_3.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_3.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_4.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_4.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_4.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_4.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/primary_ts_base_4.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/leastsquares.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/leastsquares.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/length.py
|
||||
|
|
|
@ -162,15 +162,15 @@ class TDTestCase:
|
|||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
|
||||
|
||||
tdSql.query(f"select cast(c2 as timestamp) as b from {self.dbname}.t1")
|
||||
for i in range(len(data_t1_c2)):
|
||||
if data_t1_c2[i] is None:
|
||||
tdSql.checkData( i, 0 , None )
|
||||
elif i == 10:
|
||||
continue
|
||||
else:
|
||||
date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_t1_c2[i]) / 1000.0)
|
||||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
# tdSql.query(f"select cast(c2 as timestamp) as b from {self.dbname}.t1")
|
||||
# for i in range(len(data_t1_c2)):
|
||||
# if data_t1_c2[i] is None:
|
||||
# tdSql.checkData( i, 0 , None )
|
||||
# elif i == 10:
|
||||
# continue
|
||||
# else:
|
||||
# date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_t1_c2[i]) / 1000.0)
|
||||
# tdSql.checkData( i, 0, date_init_stamp)
|
||||
|
||||
|
||||
tdLog.printNoPrefix("==========step12: cast smallint to bigint, expect no changes")
|
||||
|
@ -389,15 +389,15 @@ class TDTestCase:
|
|||
date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_ct4_c6[i]) / 1000.0)
|
||||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
|
||||
tdSql.query(f"select cast(c6 as timestamp) as b from {self.dbname}.t1")
|
||||
for i in range(len(data_t1_c6)):
|
||||
if data_t1_c6[i] is None:
|
||||
tdSql.checkData( i, 0 , None )
|
||||
elif i == 10:
|
||||
continue
|
||||
else:
|
||||
date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_t1_c6[i]) / 1000.0)
|
||||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
# tdSql.query(f"select cast(c6 as timestamp) as b from {self.dbname}.t1")
|
||||
# for i in range(len(data_t1_c6)):
|
||||
# if data_t1_c6[i] is None:
|
||||
# tdSql.checkData( i, 0 , None )
|
||||
# elif i == 10:
|
||||
# continue
|
||||
# else:
|
||||
# date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_t1_c6[i]) / 1000.0)
|
||||
# tdSql.checkData( i, 0, date_init_stamp)
|
||||
|
||||
tdLog.printNoPrefix("==========step28: cast bool to bigint, expect no changes")
|
||||
tdSql.query(f"select c7 from {self.dbname}.ct4")
|
||||
|
|
|
@ -0,0 +1,353 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
from util.cases import tdCases
|
||||
#from .nestedQueryInterval import *
|
||||
from .nestedQuery import *
|
||||
from faker import Faker
|
||||
import random
|
||||
|
||||
class TDTestCase(TDTestCase):
|
||||
|
||||
def explain_scan_value(self,sql,cachemodel):
|
||||
tdLog.info(cachemodel)
|
||||
if cachemodel=='none':
|
||||
tdSql.query(sql)
|
||||
self.check_sql_result_include(sql,'Table Scan')
|
||||
self.check_sql_result_not_include(sql,'Last Row Scan')
|
||||
|
||||
elif cachemodel=='last_row':
|
||||
tdSql.query(sql)
|
||||
self.check_sql_result_include(sql,'Last Row Scan')
|
||||
self.check_sql_result_not_include(sql,'Table Scan')
|
||||
elif cachemodel=='last_row_1':
|
||||
tdSql.query("reset query cache;")
|
||||
tdSql.query(sql)
|
||||
self.check_sql_result_include(sql,'Last Row')
|
||||
self.check_sql_result_include(sql,'Table')
|
||||
|
||||
elif cachemodel=='last_value':
|
||||
tdSql.query(sql)
|
||||
self.check_sql_result_not_include(sql,'Last Row Scan')
|
||||
self.check_sql_result_include(sql,'Table Scan')
|
||||
elif cachemodel=='last_value_1':
|
||||
tdSql.query(sql)
|
||||
self.check_sql_result_include(sql,'Last Row Scan')
|
||||
self.check_sql_result_include(sql,'Table Scan')
|
||||
|
||||
elif cachemodel=='both':
|
||||
tdSql.query(sql)
|
||||
self.check_sql_result_include(sql,'Last Row Scan')
|
||||
self.check_sql_result_not_include(sql,'Table Scan')
|
||||
|
||||
else:
|
||||
tdSql.query(sql)
|
||||
tdLog.info(sql)
|
||||
tdLog.exit(f"explain_scan_value : checkEqual error")
|
||||
|
||||
|
||||
def check_sql_result_include(self, sql,include_result):
|
||||
result = os.popen("taos -s 'reset query cache; %s'" %sql)
|
||||
res = result.read()
|
||||
#tdLog.info(res)
|
||||
if (include_result in res):
|
||||
tdLog.info(f"check_sql_result_include : checkEqual success")
|
||||
else :
|
||||
tdLog.info(res)
|
||||
tdLog.info(sql)
|
||||
tdLog.exit(f"check_sql_result_include : checkEqual error")
|
||||
|
||||
def check_sql_result_not_include(self, sql,not_include_result):
|
||||
result = os.popen("taos -s 'reset query cache; %s'" %sql)
|
||||
res = result.read()
|
||||
#tdLog.info(res)
|
||||
if (not_include_result in res):
|
||||
tdLog.info(res)
|
||||
tdLog.info(sql)
|
||||
tdLog.exit(f"check_sql_result_not_include : checkEqual error")
|
||||
else :
|
||||
tdLog.info(f"check_sql_result_not_include : checkEqual success")
|
||||
|
||||
def cachemodel_none(self, dbname="nested"):
|
||||
|
||||
|
||||
sql = f"explain select last(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last_row(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last(*),last_row(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last(*),last(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last_row(*),last_row(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"none")
|
||||
|
||||
sql = f"explain select last(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last_row(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last(*),last_row(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last(*),last(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last_row(*),last_row(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
|
||||
sql = f"explain select last(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last_row(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last(*),last_row(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last(*),last(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last_row(*),last_row(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
|
||||
|
||||
# last(id+1)
|
||||
sql = f"explain select last(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last_row(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last(*),last_row(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last(*),last(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last_row(*),last_row(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"none")
|
||||
|
||||
sql = f"explain select last(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last_row(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last(*),last_row(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last(*),last(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last_row(*),last_row(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
|
||||
sql = f"explain select last(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last_row(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last(*),last_row(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last(*),last(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last_row(*),last_row(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
|
||||
|
||||
#last(id)+1
|
||||
sql = f"explain select last(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last_row(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last(*),last_row(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last(*),last(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last_row(*),last_row(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"none")
|
||||
|
||||
sql = f"explain select last(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last_row(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last(*),last_row(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last(*),last(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last_row(*),last_row(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
|
||||
sql = f"explain select last(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last_row(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last(*),last_row(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last(*),last(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
sql = f"explain select last_row(*),last_row(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"none")
|
||||
|
||||
|
||||
def cachemodel_last_row(self, dbname="nested"):
|
||||
|
||||
sql = f"explain select last(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"last_value")
|
||||
sql = f"explain select last_row(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"last_row")
|
||||
sql = f"explain select last(*),last_row(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"last_row_1")
|
||||
sql = f"explain select last(*),last(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"last_value")
|
||||
sql = f"explain select last_row(*),last_row(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"last_row")
|
||||
|
||||
def cachemodel_last_value(self, dbname="nested"):
|
||||
|
||||
sql = f"explain select last(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"last_row")
|
||||
sql = f"explain select last_row(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"last_value")
|
||||
sql = f"explain select last(*),last_row(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"last_row_1")
|
||||
sql = f"explain select last(*),last(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"last_row")
|
||||
sql = f"explain select last_row(*),last_row(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"last_value")
|
||||
|
||||
sql = f"explain select last(q_int)+1 from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"last_row")
|
||||
sql = f"explain select last_row(q_int)+1 from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"last_value")
|
||||
sql = f"explain select last(q_int)+1,last_row(q_int)+1 from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"last_row_1")
|
||||
sql = f"explain select last(q_int)+1,last(q_int)+1 from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"last_row")
|
||||
sql = f"explain select last_row(q_int)+1,last_row(q_int)+1 from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"last_value")
|
||||
|
||||
def cachemodel_both(self, dbname="nested"):
|
||||
|
||||
sql = f"explain select last(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"both")
|
||||
sql = f"explain select last_row(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"both")
|
||||
sql = f"explain select last(*),last_row(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"both")
|
||||
sql = f"explain select last(*),last(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"both")
|
||||
sql = f"explain select last_row(*),last_row(*) from {dbname}.stable_1 "
|
||||
self.explain_scan_value(sql,"both")
|
||||
|
||||
sql = f"explain select last(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"both")
|
||||
sql = f"explain select last_row(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"both")
|
||||
sql = f"explain select last(*),last_row(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"both")
|
||||
sql = f"explain select last(*),last(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"both")
|
||||
sql = f"explain select last_row(*),last_row(*) from {dbname}.stable_1 group by tbname"
|
||||
self.explain_scan_value(sql,"both")
|
||||
|
||||
sql = f"explain select last(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"both")
|
||||
sql = f"explain select last_row(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"both")
|
||||
sql = f"explain select last(*),last_row(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"both")
|
||||
sql = f"explain select last(*),last(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"both")
|
||||
sql = f"explain select last_row(*),last_row(*) from {dbname}.stable_1 partition by tbname"
|
||||
self.explain_scan_value(sql,"both")
|
||||
|
||||
def modify_tables(self):
|
||||
fake = Faker('zh_CN')
|
||||
tdSql.execute('delete from stable_1_3;')
|
||||
tdSql.execute('delete from stable_1_4;')
|
||||
tdSql.execute('''create table stable_1_5 using stable_1 tags('stable_1_5', '%d' , '%d', '%d' , '%d' , 1 , 'binary6.%s' , 'nchar6.%s' , '%f', '%f' ,'%d') ;'''
|
||||
%(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
|
||||
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
|
||||
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
|
||||
tdSql.execute('''create table stable_1_6 using stable_1 tags('stable_1_6', '%d' , '%d', '%d' , '%d' , 1 , 'binary6.%s' , 'nchar6.%s' , '%f', '%f' ,'%d') ;'''
|
||||
%(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
|
||||
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
|
||||
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
|
||||
tdSql.execute('''create table stable_1_7 using stable_1 tags('stable_1_7', '%d' , '%d', '%d' , '%d' , 1 , 'binary6.%s' , 'nchar6.%s' , '%f', '%f' ,'%d') ;'''
|
||||
%(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
|
||||
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
|
||||
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
|
||||
tdSql.execute('''create table stable_1_8 using stable_1 tags('stable_1_8', '%d' , '%d', '%d' , '%d' , 1 , 'binary6.%s' , 'nchar6.%s' , '%f', '%f' ,'%d') ;'''
|
||||
%(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
|
||||
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
|
||||
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
|
||||
tdSql.execute('''create table stable_1_9 using stable_1 tags('stable_1_9', '%d' , '%d', '%d' , '%d' , 1 , 'binary6.%s' , 'nchar6.%s' , '%f', '%f' ,'%d') ;'''
|
||||
%(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
|
||||
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
|
||||
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
|
||||
tdSql.execute('''create table stable_1_90 using stable_1 tags('stable_1_90', '%d' , '%d', '%d' , '%d' , 1 , 'binary6.%s' , 'nchar6.%s' , '%f', '%f' ,'%d') ;'''
|
||||
%(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
|
||||
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
|
||||
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
|
||||
tdSql.execute('''create table stable_1_91 using stable_1 tags('stable_1_91', '%d' , '%d', '%d' , '%d' , 1 , 'binary6.%s' , 'nchar6.%s' , '%f', '%f' ,'%d') ;'''
|
||||
%(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
|
||||
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
|
||||
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
|
||||
tdSql.execute('''create table stable_1_92 using stable_1 tags('stable_1_92', '%d' , '%d', '%d' , '%d' , 1 , 'binary6.%s' , 'nchar6.%s' , '%f', '%f' ,'%d') ;'''
|
||||
%(fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
|
||||
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
|
||||
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
|
||||
|
||||
tdSql.execute('alter stable stable_1 add tag t_int_null INT;')
|
||||
tdSql.execute('alter stable stable_1 add tag t_bigint_null BIGINT;')
|
||||
tdSql.execute('alter stable stable_1 add tag t_smallint_null SMALLINT;')
|
||||
tdSql.execute('alter stable stable_1 add tag t_tinyint_null TINYINT;')
|
||||
tdSql.execute('alter stable stable_1 add tag t_bool_null BOOL;')
|
||||
tdSql.execute('alter stable stable_1 add tag t_binary_null VARCHAR(100);')
|
||||
tdSql.execute('alter stable stable_1 add tag t_nchar_null NCHAR(100);')
|
||||
tdSql.execute('alter stable stable_1 add tag t_float_null FLOAT;')
|
||||
tdSql.execute('alter stable stable_1 add tag t_double_null DOUBLE;')
|
||||
tdSql.execute('alter stable stable_1 add tag t_ts_null TIMESTAMP;')
|
||||
|
||||
tdSql.execute('alter stable stable_1 drop column q_nchar8;')
|
||||
tdSql.execute('alter stable stable_1 drop column q_binary8;')
|
||||
tdSql.execute('alter stable stable_1 drop column q_nchar7;')
|
||||
tdSql.execute('alter stable stable_1 drop column q_binary7;')
|
||||
tdSql.execute('alter stable stable_1 drop column q_nchar6;')
|
||||
tdSql.execute('alter stable stable_1 drop column q_binary6;')
|
||||
tdSql.execute('alter stable stable_1 drop column q_nchar5;')
|
||||
tdSql.execute('alter stable stable_1 drop column q_binary5;')
|
||||
tdSql.execute('alter stable stable_1 drop column q_nchar4;')
|
||||
tdSql.execute('alter stable stable_1 drop column q_binary4;')
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
startTime = time.time()
|
||||
|
||||
self.dropandcreateDB_random("nested", 1)
|
||||
self.modify_tables()
|
||||
|
||||
for i in range(2):
|
||||
self.cachemodel_none()
|
||||
tdSql.query("alter database nested cachemodel 'last_row' ")
|
||||
tdSql.query("reset query cache;")
|
||||
self.cachemodel_last_row()
|
||||
tdSql.query("alter database nested cachemodel 'last_value' ")
|
||||
tdSql.query("reset query cache;")
|
||||
self.cachemodel_last_value()
|
||||
tdSql.query("alter database nested cachemodel 'both' ")
|
||||
tdSql.query("reset query cache;")
|
||||
self.cachemodel_both()
|
||||
tdSql.query("alter database nested cachemodel 'none' ")
|
||||
tdSql.query("reset query cache;")
|
||||
|
||||
|
||||
endTime = time.time()
|
||||
print("total time %ds" % (endTime - startTime))
|
||||
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,52 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
from util.cases import tdCases
|
||||
from .primary_ts_base import *
|
||||
from faker import Faker
|
||||
import random
|
||||
|
||||
class TDTestCase(TDTestCase):
|
||||
|
||||
def run(self):
|
||||
startTime = time.time()
|
||||
self.dropandcreateDB_primary_key(self.database, 1 , 1 ,'yes','yes','no')
|
||||
|
||||
self.fun_pk_interp(self.database,'interp','')
|
||||
self.multiple_agg_groupby(self.database,1)
|
||||
self.fun_pk_diff(self.database,'diff','')
|
||||
self.fun_pk_twa(self.database,'derivative',',1s,0')
|
||||
self.fun_pk_twa(self.database,'derivative',',1s,1')
|
||||
self.fun_pk_unique(self.database,'unique','')
|
||||
# self.fun_pk_last_init(self.database,'last','')
|
||||
# self.fun_pk_last(self.database,'last','')
|
||||
# self.fun_pk_last(self.database,'last_row','')
|
||||
# self.fun_pk_first(self.database,'first','')
|
||||
|
||||
# self.query_pk_fun(self.database,'')
|
||||
|
||||
# self.touying_pk_1(self.database,1)
|
||||
# self.touying_pk_where(self.database,'')
|
||||
# self.touying_pk_where(self.database,'tags')
|
||||
# self.touying_pk_where(self.database,'distinct')
|
||||
# self.count_pk(self.database,1)
|
||||
|
||||
endTime = time.time()
|
||||
print("total time %ds" % (endTime - startTime))
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,52 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
from util.cases import tdCases
|
||||
from .primary_ts_base import *
|
||||
from faker import Faker
|
||||
import random
|
||||
|
||||
class TDTestCase(TDTestCase):
|
||||
|
||||
def run(self):
|
||||
startTime = time.time()
|
||||
self.dropandcreateDB_primary_key(self.database, 1 , 1 ,'yes','yes','no')
|
||||
|
||||
# self.fun_pk_interp(self.database,'interp','')
|
||||
# self.multiple_agg_groupby(self.database,1)
|
||||
# self.fun_pk_diff(self.database,'diff','')
|
||||
# self.fun_pk_twa(self.database,'derivative',',1s,0')
|
||||
# self.fun_pk_twa(self.database,'derivative',',1s,1')
|
||||
# self.fun_pk_unique(self.database,'unique','')
|
||||
self.fun_pk_last_init(self.database,'last','')
|
||||
self.fun_pk_last(self.database,'last','')
|
||||
self.fun_pk_last(self.database,'last_row','')
|
||||
self.fun_pk_first(self.database,'first','')
|
||||
|
||||
self.query_pk_fun(self.database,'')
|
||||
|
||||
# self.touying_pk_1(self.database,1)
|
||||
# self.touying_pk_where(self.database,'')
|
||||
# self.touying_pk_where(self.database,'tags')
|
||||
# self.touying_pk_where(self.database,'distinct')
|
||||
# self.count_pk(self.database,1)
|
||||
|
||||
endTime = time.time()
|
||||
print("total time %ds" % (endTime - startTime))
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,52 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
from util.cases import tdCases
|
||||
from .primary_ts_base import *
|
||||
from faker import Faker
|
||||
import random
|
||||
|
||||
class TDTestCase(TDTestCase):
|
||||
|
||||
def run(self):
|
||||
startTime = time.time()
|
||||
self.dropandcreateDB_primary_key(self.database, 1 , 1 ,'yes','yes','no')
|
||||
|
||||
# self.fun_pk_interp(self.database,'interp','')
|
||||
# self.multiple_agg_groupby(self.database,1)
|
||||
# self.fun_pk_diff(self.database,'diff','')
|
||||
# self.fun_pk_twa(self.database,'derivative',',1s,0')
|
||||
# self.fun_pk_twa(self.database,'derivative',',1s,1')
|
||||
# self.fun_pk_unique(self.database,'unique','')
|
||||
# self.fun_pk_last_init(self.database,'last','')
|
||||
# self.fun_pk_last(self.database,'last','')
|
||||
# self.fun_pk_last(self.database,'last_row','')
|
||||
# self.fun_pk_first(self.database,'first','')
|
||||
|
||||
# self.query_pk_fun(self.database,'')
|
||||
|
||||
self.touying_pk_1(self.database,1)
|
||||
self.touying_pk_where(self.database,'')
|
||||
# self.touying_pk_where(self.database,'tags')
|
||||
# self.touying_pk_where(self.database,'distinct')
|
||||
# self.count_pk(self.database,1)
|
||||
|
||||
endTime = time.time()
|
||||
print("total time %ds" % (endTime - startTime))
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,52 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
from util.cases import tdCases
|
||||
from .primary_ts_base import *
|
||||
from faker import Faker
|
||||
import random
|
||||
|
||||
class TDTestCase(TDTestCase):
|
||||
|
||||
def run(self):
|
||||
startTime = time.time()
|
||||
self.dropandcreateDB_primary_key(self.database, 1 , 1 ,'yes','yes','no')
|
||||
|
||||
# self.fun_pk_interp(self.database,'interp','')
|
||||
# self.multiple_agg_groupby(self.database,1)
|
||||
# self.fun_pk_diff(self.database,'diff','')
|
||||
# self.fun_pk_twa(self.database,'derivative',',1s,0')
|
||||
# self.fun_pk_twa(self.database,'derivative',',1s,1')
|
||||
# self.fun_pk_unique(self.database,'unique','')
|
||||
# self.fun_pk_last_init(self.database,'last','')
|
||||
# self.fun_pk_last(self.database,'last','')
|
||||
# self.fun_pk_last(self.database,'last_row','')
|
||||
# self.fun_pk_first(self.database,'first','')
|
||||
|
||||
# self.query_pk_fun(self.database,'')
|
||||
|
||||
# self.touying_pk_1(self.database,1)
|
||||
# self.touying_pk_where(self.database,'')
|
||||
self.touying_pk_where(self.database,'tags')
|
||||
self.touying_pk_where(self.database,'distinct')
|
||||
self.count_pk(self.database,1)
|
||||
|
||||
endTime = time.time()
|
||||
print("total time %ds" % (endTime - startTime))
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
Loading…
Reference in New Issue