Merge branch '3.0' into fix/TD-17511
This commit is contained in:
commit
58ba4f8cf7
|
@ -28,8 +28,9 @@ static void msg_process(TAOS_RES* msg) {
|
|||
printf("db: %s\n", tmq_get_db_name(msg));
|
||||
printf("vg: %d\n", tmq_get_vgroup_id(msg));
|
||||
if (tmq_get_res_type(msg) == TMQ_RES_TABLE_META) {
|
||||
tmq_raw_data* raw = tmq_get_raw_meta(msg);
|
||||
if (raw) {
|
||||
tmq_raw_data raw = {0};
|
||||
int32_t code = tmq_get_raw_meta(msg, &raw);
|
||||
if (code == 0) {
|
||||
TAOS* pConn = taos_connect("192.168.1.86", "root", "taosdata", NULL, 0);
|
||||
if (pConn == NULL) {
|
||||
return;
|
||||
|
@ -53,7 +54,6 @@ static void msg_process(TAOS_RES* msg) {
|
|||
printf("write raw data: %s\n", tmq_err2str(ret));
|
||||
taos_close(pConn);
|
||||
}
|
||||
tmq_free_raw_meta(raw);
|
||||
char* result = tmq_get_json_meta(msg);
|
||||
if (result) {
|
||||
printf("meta result: %s\n", result);
|
||||
|
|
|
@ -259,13 +259,17 @@ enum tmq_res_t {
|
|||
TMQ_RES_TABLE_META = 2,
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
void* raw_meta;
|
||||
uint32_t raw_meta_len;
|
||||
uint16_t raw_meta_type;
|
||||
} tmq_raw_data;
|
||||
|
||||
typedef enum tmq_res_t tmq_res_t;
|
||||
typedef struct tmq_raw_data tmq_raw_data;
|
||||
|
||||
DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res);
|
||||
DLL_EXPORT tmq_raw_data *tmq_get_raw_meta(TAOS_RES *res);
|
||||
DLL_EXPORT int32_t taos_write_raw_meta(TAOS *taos, tmq_raw_data *raw_meta);
|
||||
DLL_EXPORT void tmq_free_raw_meta(tmq_raw_data *rawMeta);
|
||||
DLL_EXPORT int32_t tmq_get_raw_meta(TAOS_RES *res, tmq_raw_data *raw_meta);
|
||||
DLL_EXPORT int32_t taos_write_raw_meta(TAOS *taos, tmq_raw_data raw_meta);
|
||||
DLL_EXPORT char *tmq_get_json_meta(TAOS_RES *res); // Returning null means error. Returned result need to be freed by tmq_free_json_meta
|
||||
DLL_EXPORT void tmq_free_json_meta(char* jsonMeta);
|
||||
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
|
||||
|
|
|
@ -268,7 +268,7 @@ static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSm
|
|||
*actionNeeded = true;
|
||||
}
|
||||
if (*actionNeeded) {
|
||||
uDebug("SML:0x%" PRIx64 " generate schema action. column name: %s, action: %d", info->id, colField->name,
|
||||
uDebug("SML:0x%" PRIx64 " generate schema action. kv->name: %s, action: %d", info->id, kv->key,
|
||||
action->action);
|
||||
}
|
||||
return 0;
|
||||
|
@ -436,6 +436,7 @@ static int32_t smlProcessSchemaAction(SSmlHandle *info, SSchema *schemaField, SH
|
|||
SSchemaAction *action, bool isTag) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
for (int j = 0; j < taosArrayGetSize(cols); ++j) {
|
||||
if(j == 0 && !isTag) continue;
|
||||
SSmlKv *kv = (SSmlKv *)taosArrayGetP(cols, j);
|
||||
bool actionNeeded = false;
|
||||
code = smlGenerateSchemaAction(schemaField, schemaHash, kv, isTag, action, &actionNeeded, info);
|
||||
|
@ -452,18 +453,25 @@ static int32_t smlProcessSchemaAction(SSmlHandle *info, SSchema *schemaField, SH
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t smlCheckMeta(SSchema *schema, int32_t length, SArray *cols) {
|
||||
static int32_t smlCheckMeta(SSchema *schema, int32_t length, SArray *cols, bool isTag) {
|
||||
SHashObj *hashTmp = taosHashInit(length, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
for (uint16_t i = 0; i < length; i++) {
|
||||
int32_t i = 0;
|
||||
for ( ;i < length; i++) {
|
||||
taosHashPut(hashTmp, schema[i].name, strlen(schema[i].name), &i, SHORT_BYTES);
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < taosArrayGetSize(cols); i++) {
|
||||
if (isTag){
|
||||
i = 0;
|
||||
} else {
|
||||
i = 1;
|
||||
}
|
||||
for (; i < taosArrayGetSize(cols); i++) {
|
||||
SSmlKv *kv = (SSmlKv *)taosArrayGetP(cols, i);
|
||||
if (taosHashGet(hashTmp, kv->key, kv->keyLen) == NULL) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
taosHashCleanup(hashTmp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -523,7 +531,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
|||
}
|
||||
|
||||
taosHashClear(hashTmp);
|
||||
for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) {
|
||||
for (uint16_t i = 1; i < pTableMeta->tableInfo.numOfColumns; i++) {
|
||||
taosHashPut(hashTmp, pTableMeta->schema[i].name, strlen(pTableMeta->schema[i].name), &i, SHORT_BYTES);
|
||||
}
|
||||
code = smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->cols, &schemaAction, false);
|
||||
|
@ -551,12 +559,12 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
|||
|
||||
if (needCheckMeta) {
|
||||
code = smlCheckMeta(&(pTableMeta->schema[pTableMeta->tableInfo.numOfColumns]), pTableMeta->tableInfo.numOfTags,
|
||||
sTableData->tags);
|
||||
sTableData->tags, true);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%" PRIx64 " check tag failed. super table name %s", info->id, (char *)superTable);
|
||||
goto end;
|
||||
}
|
||||
code = smlCheckMeta(&(pTableMeta->schema[0]), pTableMeta->tableInfo.numOfColumns, sTableData->cols);
|
||||
code = smlCheckMeta(&(pTableMeta->schema[0]), pTableMeta->tableInfo.numOfColumns, sTableData->cols, false);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%" PRIx64 " check cols failed. super table name %s", info->id, (char *)superTable);
|
||||
goto end;
|
||||
|
@ -832,6 +840,7 @@ static int64_t smlParseOpenTsdbTime(SSmlHandle *info, const char *data, int32_t
|
|||
static int32_t smlParseTS(SSmlHandle *info, const char *data, int32_t len, SArray *cols) {
|
||||
int64_t ts = 0;
|
||||
if (info->protocol == TSDB_SML_LINE_PROTOCOL) {
|
||||
// uError("SML:data:%s,len:%d", data, len);
|
||||
ts = smlParseInfluxTime(info, data, len);
|
||||
} else if (info->protocol == TSDB_SML_TELNET_PROTOCOL) {
|
||||
ts = smlParseOpenTsdbTime(info, data, len);
|
||||
|
@ -2031,6 +2040,8 @@ static int32_t smlParseJSONString(SSmlHandle *info, cJSON *root, SSmlTableInfo *
|
|||
|
||||
static int32_t smlParseInfluxLine(SSmlHandle *info, const char *sql) {
|
||||
SSmlLineInfo elements = {0};
|
||||
uDebug("SML:0x%" PRIx64 " smlParseInfluxLine sql:%s, hello", info->id, sql);
|
||||
|
||||
int ret = smlParseInfluxString(sql, &elements, &info->msgBuf);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%" PRIx64 " smlParseInfluxLine failed", info->id);
|
||||
|
|
|
@ -113,12 +113,6 @@ struct tmq_t {
|
|||
tsem_t rspSem;
|
||||
};
|
||||
|
||||
struct tmq_raw_data {
|
||||
void* raw_meta;
|
||||
int32_t raw_meta_len;
|
||||
int16_t raw_meta_type;
|
||||
};
|
||||
|
||||
enum {
|
||||
TMQ_VG_STATUS__IDLE = 0,
|
||||
TMQ_VG_STATUS__WAIT,
|
||||
|
@ -1918,16 +1912,15 @@ const char* tmq_get_table_name(TAOS_RES* res) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
tmq_raw_data* tmq_get_raw_meta(TAOS_RES* res) {
|
||||
if (TD_RES_TMQ_META(res)) {
|
||||
tmq_raw_data* raw = taosMemoryCalloc(1, sizeof(tmq_raw_data));
|
||||
int32_t tmq_get_raw_meta(TAOS_RES* res, tmq_raw_data *raw) {
|
||||
if (TD_RES_TMQ_META(res) && raw) {
|
||||
SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
|
||||
raw->raw_meta = pMetaRspObj->metaRsp.metaRsp;
|
||||
raw->raw_meta_len = pMetaRspObj->metaRsp.metaRspLen;
|
||||
raw->raw_meta_type = pMetaRspObj->metaRsp.resMsgType;
|
||||
return raw;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
return NULL;
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
|
||||
static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id,
|
||||
|
@ -2935,23 +2928,23 @@ end:
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t taos_write_raw_meta(TAOS* taos, tmq_raw_data* raw_meta) {
|
||||
if (!taos || !raw_meta) {
|
||||
int32_t taos_write_raw_meta(TAOS *taos, tmq_raw_data raw_meta){
|
||||
if (!taos) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
|
||||
if (raw_meta->raw_meta_type == TDMT_VND_CREATE_STB) {
|
||||
return taosCreateStb(taos, raw_meta->raw_meta, raw_meta->raw_meta_len);
|
||||
} else if (raw_meta->raw_meta_type == TDMT_VND_ALTER_STB) {
|
||||
return taosCreateStb(taos, raw_meta->raw_meta, raw_meta->raw_meta_len);
|
||||
} else if (raw_meta->raw_meta_type == TDMT_VND_DROP_STB) {
|
||||
return taosDropStb(taos, raw_meta->raw_meta, raw_meta->raw_meta_len);
|
||||
} else if (raw_meta->raw_meta_type == TDMT_VND_CREATE_TABLE) {
|
||||
return taosCreateTable(taos, raw_meta->raw_meta, raw_meta->raw_meta_len);
|
||||
} else if (raw_meta->raw_meta_type == TDMT_VND_ALTER_TABLE) {
|
||||
return taosAlterTable(taos, raw_meta->raw_meta, raw_meta->raw_meta_len);
|
||||
} else if (raw_meta->raw_meta_type == TDMT_VND_DROP_TABLE) {
|
||||
return taosDropTable(taos, raw_meta->raw_meta, raw_meta->raw_meta_len);
|
||||
if(raw_meta.raw_meta_type == TDMT_VND_CREATE_STB) {
|
||||
return taosCreateStb(taos, raw_meta.raw_meta, raw_meta.raw_meta_len);
|
||||
}else if(raw_meta.raw_meta_type == TDMT_VND_ALTER_STB){
|
||||
return taosCreateStb(taos, raw_meta.raw_meta, raw_meta.raw_meta_len);
|
||||
}else if(raw_meta.raw_meta_type == TDMT_VND_DROP_STB){
|
||||
return taosDropStb(taos, raw_meta.raw_meta, raw_meta.raw_meta_len);
|
||||
}else if(raw_meta.raw_meta_type == TDMT_VND_CREATE_TABLE){
|
||||
return taosCreateTable(taos, raw_meta.raw_meta, raw_meta.raw_meta_len);
|
||||
}else if(raw_meta.raw_meta_type == TDMT_VND_ALTER_TABLE){
|
||||
return taosAlterTable(taos, raw_meta.raw_meta, raw_meta.raw_meta_len);
|
||||
}else if(raw_meta.raw_meta_type == TDMT_VND_DROP_TABLE){
|
||||
return taosDropTable(taos, raw_meta.raw_meta, raw_meta.raw_meta_len);
|
||||
}
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
|
|
|
@ -49,9 +49,9 @@ static void dmProcessStatusRsp(SDnodeMgmt *pMgmt, SRpcMsg *pRsp) {
|
|||
dmUpdateEps(pMgmt->pData, statusRsp.pDnodeEps);
|
||||
}
|
||||
}
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
tFreeSStatusRsp(&statusRsp);
|
||||
}
|
||||
rpcFreeCont(pRsp->pCont);
|
||||
}
|
||||
|
||||
void dmSendStatusReq(SDnodeMgmt *pMgmt) {
|
||||
|
|
|
@ -43,9 +43,9 @@ typedef struct SRSmaInfo SRSmaInfo;
|
|||
typedef struct SRSmaInfoItem SRSmaInfoItem;
|
||||
|
||||
struct SSmaEnv {
|
||||
SRWLatch lock;
|
||||
int8_t type;
|
||||
SSmaStat *pStat;
|
||||
SRWLatch lock;
|
||||
int8_t type;
|
||||
SSmaStat *pStat;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
|
@ -103,10 +103,15 @@ struct SRSmaInfoItem {
|
|||
};
|
||||
|
||||
struct SRSmaInfo {
|
||||
STSchema *pTSchema;
|
||||
int64_t suid;
|
||||
STSchema *pTSchema;
|
||||
int64_t suid;
|
||||
int8_t delFlag;
|
||||
T_REF_DECLARE()
|
||||
SRSmaInfoItem items[TSDB_RETENTION_L2];
|
||||
};
|
||||
#define RSMA_INFO_HEAD_LEN 24
|
||||
#define RSMA_INFO_IS_DEL(r) ((r)->delFlag == 1)
|
||||
#define RSMA_INFO_SET_DEL(r) ((r)->delFlag = 1)
|
||||
|
||||
enum {
|
||||
TASK_TRIGGER_STAT_INIT = 0,
|
||||
|
@ -120,8 +125,8 @@ enum {
|
|||
enum {
|
||||
RSMA_ROLE_CREATE = 0,
|
||||
RSMA_ROLE_DROP = 1,
|
||||
RSMA_ROLE_FETCH = 2,
|
||||
RSMA_ROLE_SUBMIT = 3,
|
||||
RSMA_ROLE_SUBMIT = 2,
|
||||
RSMA_ROLE_FETCH = 3,
|
||||
RSMA_ROLE_ITERATE = 4,
|
||||
};
|
||||
|
||||
|
@ -134,6 +139,8 @@ int32_t tdInsertRSmaData(SSma *pSma, char *msg);
|
|||
|
||||
int32_t tdRefSmaStat(SSma *pSma, SSmaStat *pStat);
|
||||
int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat);
|
||||
int32_t tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo);
|
||||
int32_t tdUnRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo);
|
||||
|
||||
void *tdAcquireSmaRef(int32_t rsetId, int64_t refId, const char *tags, int32_t ln);
|
||||
int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId, const char *tags, int32_t ln);
|
||||
|
@ -193,6 +200,7 @@ void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t le
|
|||
static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType);
|
||||
void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType);
|
||||
void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree);
|
||||
void tdRemoveRSmaInfoBySuid(SSma *pSma, int64_t suid);
|
||||
int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash);
|
||||
|
||||
int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName);
|
||||
|
@ -258,8 +266,9 @@ void tdUpdateTFileMagic(STFile *pTFile, void *pCksm);
|
|||
void tdCloseTFile(STFile *pTFile);
|
||||
void tdDestroyTFile(STFile *pTFile);
|
||||
|
||||
void tdGetVndFileName(int32_t vgId, const char *pdname, const char *dname, const char *fname, int64_t version, char *outputName);
|
||||
void tdGetVndDirName(int32_t vgId,const char *pdname, const char *dname, bool endWithSep, char *outputName);
|
||||
void tdGetVndFileName(int32_t vgId, const char *pdname, const char *dname, const char *fname, int64_t version,
|
||||
char *outputName);
|
||||
void tdGetVndDirName(int32_t vgId, const char *pdname, const char *dname, bool endWithSep, char *outputName);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -186,6 +186,7 @@ int32_t tsdbGetNRowsInTbData(STbData *pTbData);
|
|||
typedef enum { TSDB_HEAD_FILE = 0, TSDB_DATA_FILE, TSDB_LAST_FILE, TSDB_SMA_FILE } EDataFileT;
|
||||
void tsdbDataFileName(STsdb *pTsdb, SDFileSet *pDFileSet, EDataFileT ftype, char fname[]);
|
||||
bool tsdbFileIsSame(SDFileSet *pDFileSet1, SDFileSet *pDFileSet2, EDataFileT ftype);
|
||||
bool tsdbDelFileIsSame(SDelFile *pDelFile1, SDelFile *pDelFile2);
|
||||
int32_t tsdbUpdateDFileHdr(TdFilePtr pFD, SDFileSet *pSet, EDataFileT ftype);
|
||||
int32_t tsdbDFileRollback(STsdb *pTsdb, SDFileSet *pSet, EDataFileT ftype);
|
||||
int32_t tPutDataFileHdr(uint8_t *p, SDFileSet *pSet, EDataFileT ftype);
|
||||
|
|
|
@ -277,7 +277,7 @@ _drop_super_table:
|
|||
_exit:
|
||||
tdbFree(pKey);
|
||||
tdbFree(pData);
|
||||
metaDebug("vgId:%d, super table %s uid:%" PRId64 " is dropped", TD_VID(pMeta->pVnode), pReq->name, pReq->suid);
|
||||
metaDebug("vgId:%d, super table %s uid:%" PRId64 " is dropped", TD_VID(pMeta->pVnode), pReq->name, pReq->suid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@ static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat) {
|
|||
if ((pDir = taosOpenDir(dir)) == NULL) {
|
||||
regfree(®ex);
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
smaWarn("vgId:%d, rsma post commit, open dir %s failed since %s", TD_VID(pVnode), dir, terrstr());
|
||||
smaDebug("vgId:%d, rsma post commit, open dir %s failed since %s", TD_VID(pVnode), dir, terrstr());
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
|
@ -392,5 +392,7 @@ static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) {
|
|||
// step 2: cleanup outdated qtaskinfo files
|
||||
tdCleanupQTaskInfoFiles(pSma, pRSmaStat);
|
||||
|
||||
atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 0);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -169,6 +169,26 @@ int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) {
|
||||
if (!pRSmaInfo) return 0;
|
||||
|
||||
int ref = T_REF_INC(pRSmaInfo);
|
||||
smaDebug("vgId:%d, ref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tdUnRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) {
|
||||
if (!pRSmaInfo) return 0;
|
||||
|
||||
int ref = T_REF_DEC(pRSmaInfo);
|
||||
smaDebug("vgId:%d, unref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref);
|
||||
|
||||
if (ref == 0) {
|
||||
tdRemoveRSmaInfoBySuid(pSma, pRSmaInfo->suid);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pSma) {
|
||||
ASSERT(pSmaStat != NULL);
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ SSmaMgmt smaMgmt = {
|
|||
};
|
||||
|
||||
#define TD_QTASKINFO_FNAME_PREFIX "qtaskinfo.ver"
|
||||
#define TD_RSMAINFO_DEL_FILE "rsmainfo.del"
|
||||
typedef struct SRSmaQTaskInfoItem SRSmaQTaskInfoItem;
|
||||
typedef struct SRSmaQTaskInfoIter SRSmaQTaskInfoIter;
|
||||
|
||||
|
@ -48,14 +49,11 @@ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables);
|
|||
static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int64_t *committed);
|
||||
static int32_t tdRSmaRestoreTSDataReload(SSma *pSma, int64_t committed);
|
||||
|
||||
|
||||
|
||||
static SRSmaInfo *tdGetRSmaInfoByItem(SRSmaInfoItem *pItem) {
|
||||
// adapt accordingly if definition of SRSmaInfo update
|
||||
SRSmaInfo *pResult = NULL;
|
||||
int32_t rsmaInfoHeadLen = sizeof(int64_t) + sizeof(STSchema *);
|
||||
ASSERT(pItem->level == TSDB_RETENTION_L1 || pItem->level == TSDB_RETENTION_L2);
|
||||
pResult = (SRSmaInfo *)POINTER_SHIFT(pItem, -(sizeof(SRSmaInfoItem) * (pItem->level - 1) + rsmaInfoHeadLen));
|
||||
pResult = (SRSmaInfo *)POINTER_SHIFT(pItem, -(sizeof(SRSmaInfoItem) * (pItem->level - 1) + RSMA_INFO_HEAD_LEN));
|
||||
ASSERT(pResult->pTSchema->numOfCols > 1);
|
||||
return pResult;
|
||||
}
|
||||
|
@ -116,8 +114,8 @@ void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree) {
|
|||
SRSmaInfoItem *pItem = &pInfo->items[i];
|
||||
if (pItem->taskInfo) {
|
||||
if (isDeepFree && pItem->tmrId) {
|
||||
smaDebug("vgId:%d, table %" PRIi64 " stop fetch timer %p level %d", SMA_VID(pSma), pInfo->suid, pItem->tmrId,
|
||||
i + 1);
|
||||
smaDebug("vgId:%d, stop fetch timer %p for table %" PRIi64 " level %d", SMA_VID(pSma), pInfo->suid,
|
||||
pItem->tmrId, i + 1);
|
||||
taosTmrStopA(&pItem->tmrId);
|
||||
}
|
||||
tdFreeQTaskInfo(&pItem->taskInfo, SMA_VID(pSma), i + 1);
|
||||
|
@ -337,6 +335,7 @@ int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, con
|
|||
}
|
||||
pRSmaInfo->pTSchema = pTSchema;
|
||||
pRSmaInfo->suid = suid;
|
||||
T_REF_INIT_VAL(pRSmaInfo, 1);
|
||||
|
||||
if (tdSetRSmaInfoItemParams(pSma, param, pStat, pRSmaInfo, 0) < 0) {
|
||||
goto _err;
|
||||
|
@ -392,11 +391,33 @@ int32_t tdProcessRSmaCreate(SSma *pSma, SVCreateStbReq *pReq) {
|
|||
int32_t tdProcessRSmaDrop(SSma *pSma, SVDropStbReq *pReq) {
|
||||
SVnode *pVnode = pSma->pVnode;
|
||||
if (!VND_IS_RSMA(pVnode)) {
|
||||
smaTrace("vgId:%d, not create rsma for stable %s %" PRIi64 " since vnd is not rsma", TD_VID(pVnode), pReq->name,
|
||||
smaTrace("vgId:%d, not drop rsma for stable %s %" PRIi64 " since vnd is not rsma", TD_VID(pVnode), pReq->name,
|
||||
pReq->suid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
|
||||
if (!pSmaEnv) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
|
||||
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
|
||||
|
||||
SRSmaInfo *pRSmaInfo = tdGetRSmaInfoBySuid(pSma, pReq->suid);
|
||||
|
||||
if (!pRSmaInfo) {
|
||||
smaWarn("vgId:%d, drop rsma for stable %s %" PRIi64 " failed no rsma in hash", TD_VID(pVnode), pReq->name,
|
||||
pReq->suid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// set del flag for data in mem
|
||||
RSMA_INFO_SET_DEL(pRSmaInfo);
|
||||
tdUnRefRSmaInfo(pSma, pRSmaInfo);
|
||||
|
||||
// save to file
|
||||
|
||||
smaDebug("vgId:%d, drop rsma for table %" PRIi64 " succeed", TD_VID(pVnode), pReq->suid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -650,10 +671,10 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType
|
|||
|
||||
/**
|
||||
* @brief During async commit, the SRSmaInfo object would be COW from iRSmaInfoHash and write lock should be applied.
|
||||
*
|
||||
* @param pSma
|
||||
* @param suid
|
||||
* @return SRSmaInfo*
|
||||
*
|
||||
* @param pSma
|
||||
* @param suid
|
||||
* @return SRSmaInfo*
|
||||
*/
|
||||
static SRSmaInfo *tdGetRSmaInfoBySuid(SSma *pSma, int64_t suid) {
|
||||
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
|
||||
|
@ -661,7 +682,6 @@ static SRSmaInfo *tdGetRSmaInfoBySuid(SSma *pSma, int64_t suid) {
|
|||
SRSmaInfo *pRSmaInfo = NULL;
|
||||
|
||||
if (!pEnv) {
|
||||
// only applicable when rsma env exists
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -683,18 +703,21 @@ static SRSmaInfo *tdGetRSmaInfoBySuid(SSma *pSma, int64_t suid) {
|
|||
SRSmaInfo *pCowRSmaInfo = NULL;
|
||||
// lock
|
||||
taosWLockLatch(SMA_ENV_LOCK(pEnv));
|
||||
void *iRSmaInfo = taosHashGet(RSMA_IMU_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
|
||||
if (iRSmaInfo) {
|
||||
SRSmaInfo *pIRSmaInfo = *(SRSmaInfo **)iRSmaInfo;
|
||||
if (pIRSmaInfo) {
|
||||
if (tdCloneRSmaInfo(pSma, pCowRSmaInfo, pIRSmaInfo) < 0) {
|
||||
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
|
||||
smaError("vgId:%d, clone rsma info failed for suid:%" PRIu64 " since %s", SMA_VID(pSma), suid, terrstr());
|
||||
return NULL;
|
||||
}
|
||||
if (taosHashPut(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t), &pCowRSmaInfo, sizeof(pCowRSmaInfo)) < 0) {
|
||||
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
|
||||
return NULL;
|
||||
if (!taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t))) { // 2-phase lock
|
||||
void *iRSmaInfo = taosHashGet(RSMA_IMU_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
|
||||
if (iRSmaInfo) {
|
||||
SRSmaInfo *pIRSmaInfo = *(SRSmaInfo **)iRSmaInfo;
|
||||
if (pIRSmaInfo) {
|
||||
if (tdCloneRSmaInfo(pSma, pCowRSmaInfo, pIRSmaInfo) < 0) {
|
||||
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
|
||||
smaError("vgId:%d, clone rsma info failed for suid:%" PRIu64 " since %s", SMA_VID(pSma), suid, terrstr());
|
||||
return NULL;
|
||||
}
|
||||
smaDebug("vgId:%d, clone rsma info succeed for suid:%" PRIu64, SMA_VID(pSma), suid);
|
||||
if (taosHashPut(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t), &pCowRSmaInfo, sizeof(pCowRSmaInfo)) < 0) {
|
||||
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -703,21 +726,56 @@ static SRSmaInfo *tdGetRSmaInfoBySuid(SSma *pSma, int64_t suid) {
|
|||
return pCowRSmaInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief During the drop procedure, only need to delete the object in rsmaInfoHash.
|
||||
*
|
||||
* @param pSma
|
||||
* @param suid
|
||||
* @return SRSmaInfo*
|
||||
*/
|
||||
void tdRemoveRSmaInfoBySuid(SSma *pSma, int64_t suid) {
|
||||
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
|
||||
SRSmaStat *pStat = NULL;
|
||||
SRSmaInfo *pRSmaInfo = NULL;
|
||||
|
||||
if (!pEnv) {
|
||||
return;
|
||||
}
|
||||
|
||||
pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
|
||||
if (!pStat || !RSMA_INFO_HASH(pStat)) {
|
||||
return;
|
||||
}
|
||||
|
||||
pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
|
||||
if (pRSmaInfo) {
|
||||
if ((pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
|
||||
tdFreeRSmaInfo(pSma, pRSmaInfo, true);
|
||||
}
|
||||
taosHashRemove(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
|
||||
smaDebug("vgId:%d, remove from infoHash for table:%" PRIu64 " succeed", SMA_VID(pSma), suid);
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid) {
|
||||
SRSmaInfo *pRSmaInfo = tdGetRSmaInfoBySuid(pSma, suid);
|
||||
if (!pRSmaInfo) {
|
||||
smaDebug("vgId:%d, return as no rsma info for suid:%" PRIu64, SMA_VID(pSma), suid);
|
||||
smaDebug("vgId:%d, execute rsma, no rsma info for suid:%" PRIu64, SMA_VID(pSma), suid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (!pRSmaInfo->items[0].taskInfo) {
|
||||
smaDebug("vgId:%d, return as no rsma qTaskInfo for suid:%" PRIu64, SMA_VID(pSma), suid);
|
||||
smaDebug("vgId:%d, execute rsma, no rsma qTaskInfo for suid:%" PRIu64, SMA_VID(pSma), suid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (inputType == STREAM_INPUT__DATA_SUBMIT) {
|
||||
tdRefRSmaInfo(pSma, pRSmaInfo);
|
||||
|
||||
tdExecuteRSmaImpl(pSma, pMsg, inputType, &pRSmaInfo->items[0], pRSmaInfo->pTSchema, suid, TSDB_RETENTION_L1);
|
||||
tdExecuteRSmaImpl(pSma, pMsg, inputType, &pRSmaInfo->items[1], pRSmaInfo->pTSchema, suid, TSDB_RETENTION_L2);
|
||||
|
||||
tdUnRefRSmaInfo(pSma, pRSmaInfo);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -931,10 +989,10 @@ _err:
|
|||
|
||||
/**
|
||||
* @brief Restore from SRSmaQTaskInfoItem
|
||||
*
|
||||
* @param pSma
|
||||
* @param pItem
|
||||
* @return int32_t
|
||||
*
|
||||
* @param pSma
|
||||
* @param pItem
|
||||
* @return int32_t
|
||||
*/
|
||||
static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *pItem) {
|
||||
SRSmaInfo *pRSmaInfo = NULL;
|
||||
|
@ -1271,6 +1329,9 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
|
|||
}
|
||||
|
||||
SRSmaInfo *pRSmaInfo = tdGetRSmaInfoByItem(pItem);
|
||||
if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
|
||||
goto _end;
|
||||
}
|
||||
|
||||
int8_t fetchTriggerStat =
|
||||
atomic_val_compare_exchange_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE, TASK_TRIGGER_STAT_INACTIVE);
|
||||
|
@ -1279,13 +1340,14 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
|
|||
smaDebug("vgId:%d, fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is active", SMA_VID(pSma),
|
||||
pItem->level, pRSmaInfo->suid);
|
||||
|
||||
tdRefSmaStat(pSma, (SSmaStat *)pStat);
|
||||
// sync procedure => async process
|
||||
tdRefRSmaInfo(pSma, pRSmaInfo);
|
||||
|
||||
SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL};
|
||||
qSetStreamInput(pItem->taskInfo, &dataBlock, STREAM_INPUT__DATA_BLOCK, false);
|
||||
tdRSmaFetchAndSubmitResult(pItem, pRSmaInfo->pTSchema, pRSmaInfo->suid, pStat, STREAM_INPUT__DATA_BLOCK);
|
||||
|
||||
tdUnRefSmaStat(pSma, (SSmaStat *)pStat);
|
||||
tdUnRefRSmaInfo(pSma, pRSmaInfo);
|
||||
} break;
|
||||
case TASK_TRIGGER_STAT_PAUSED: {
|
||||
smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is paused",
|
||||
|
|
|
@ -245,7 +245,7 @@ static int32_t tsdbApplyDelFileChange(STsdbFS *pFS, SDelFile *pFrom, SDelFile *p
|
|||
char fname[TSDB_FILENAME_LEN];
|
||||
|
||||
if (pFrom && pTo) {
|
||||
if (pFrom != pTo) {
|
||||
if (!tsdbDelFileIsSame(pFrom, pTo)) {
|
||||
tsdbDelFileName(pFS->pTsdb, pFrom, fname);
|
||||
if (taosRemoveFile(fname) < 0) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
|
|
|
@ -140,6 +140,8 @@ bool tsdbFileIsSame(SDFileSet *pDFileSet1, SDFileSet *pDFileSet2, EDataFileT fty
|
|||
}
|
||||
}
|
||||
|
||||
bool tsdbDelFileIsSame(SDelFile *pDelFile1, SDelFile *pDelFile2) { return pDelFile1->commitID == pDelFile2->commitID; }
|
||||
|
||||
int32_t tsdbUpdateDFileHdr(TdFilePtr pFD, SDFileSet *pSet, EDataFileT ftype) {
|
||||
int32_t code = 0;
|
||||
int64_t n;
|
||||
|
|
|
@ -246,7 +246,7 @@ int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb
|
|||
|
||||
tsdbDelFileName(pTsdb, pFile, fname);
|
||||
pDelFReader->pReadH = taosOpenFile(fname, TD_FILE_READ);
|
||||
if (pDelFReader == NULL) {
|
||||
if (pDelFReader->pReadH == NULL) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
taosMemoryFree(pDelFReader);
|
||||
goto _err;
|
||||
|
|
|
@ -2161,7 +2161,7 @@ static void smlDestroyTableHandle(void* pHandle) {
|
|||
tdDestroySVCreateTbReq(&handle->createTblReq);
|
||||
}
|
||||
|
||||
static int32_t smlBoundColumnData(SArray* cols, SParsedDataColInfo* pColList, SSchema* pSchema) {
|
||||
static int32_t smlBoundColumnData(SArray* cols, SParsedDataColInfo* pColList, SSchema* pSchema, bool isTag) {
|
||||
col_id_t nCols = pColList->numOfCols;
|
||||
|
||||
pColList->numOfBound = 0;
|
||||
|
@ -2177,7 +2177,8 @@ static int32_t smlBoundColumnData(SArray* cols, SParsedDataColInfo* pColList, SS
|
|||
SSmlKv* kv = taosArrayGetP(cols, i);
|
||||
SToken sToken = {.n = kv->keyLen, .z = (char*)kv->key};
|
||||
col_id_t t = lastColIdx + 1;
|
||||
col_id_t index = findCol(&sToken, t, nCols, pSchema);
|
||||
col_id_t index = ((t == 0 && !isTag) ? 0 : findCol(&sToken, t, nCols, pSchema));
|
||||
uDebug("SML, index:%d, t:%d, ncols:%d, kv->name:%s", index, t, nCols, kv->key);
|
||||
if (index < 0 && t > 0) {
|
||||
index = findCol(&sToken, 0, t, pSchema);
|
||||
isOrdered = false;
|
||||
|
@ -2312,7 +2313,7 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols
|
|||
smlDestroyTableHandle(&smlHandle->tableExecHandle); // free for each table
|
||||
SSchema* pTagsSchema = getTableTagSchema(pTableMeta);
|
||||
setBoundColumnInfo(&smlHandle->tableExecHandle.tags, pTagsSchema, getNumOfTags(pTableMeta));
|
||||
int ret = smlBoundColumnData(tags, &smlHandle->tableExecHandle.tags, pTagsSchema);
|
||||
int ret = smlBoundColumnData(tags, &smlHandle->tableExecHandle.tags, pTagsSchema, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
buildInvalidOperationMsg(&pBuf, "bound tags error");
|
||||
return ret;
|
||||
|
@ -2343,7 +2344,7 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols
|
|||
|
||||
SSchema* pSchema = getTableColumnSchema(pTableMeta);
|
||||
|
||||
ret = smlBoundColumnData(colsSchema, &pDataBlock->boundColumnInfo, pSchema);
|
||||
ret = smlBoundColumnData(colsSchema, &pDataBlock->boundColumnInfo, pSchema, false);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
buildInvalidOperationMsg(&pBuf, "bound cols error");
|
||||
return ret;
|
||||
|
@ -2401,7 +2402,9 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols
|
|||
} else {
|
||||
int32_t colLen = kv->length;
|
||||
if (pColSchema->type == TSDB_DATA_TYPE_TIMESTAMP) {
|
||||
// uError("SML:data before:%ld, precision:%d", kv->i, pTableMeta->tableInfo.precision);
|
||||
kv->i = convertTimePrecision(kv->i, TSDB_TIME_PRECISION_NANO, pTableMeta->tableInfo.precision);
|
||||
// uError("SML:data after:%ld, precision:%d", kv->i, pTableMeta->tableInfo.precision);
|
||||
}
|
||||
|
||||
if (IS_VAR_DATA_TYPE(kv->type)) {
|
||||
|
|
|
@ -313,7 +313,7 @@ static int32_t raftLogGetEntry(struct SSyncLogStore* pLogStore, SyncIndex index,
|
|||
snprintf(logBuf, sizeof(logBuf), "wal read error, index:%" PRId64 ", err:%d %X, msg:%s, syserr:%d, sysmsg:%s",
|
||||
index, err, err, errStr, sysErr, sysErrStr);
|
||||
if (terrno == TSDB_CODE_WAL_LOG_NOT_EXIST) {
|
||||
syncNodeEventLog(pData->pSyncNode, logBuf);
|
||||
// syncNodeEventLog(pData->pSyncNode, logBuf);
|
||||
} else {
|
||||
syncNodeErrorLog(pData->pSyncNode, logBuf);
|
||||
}
|
||||
|
@ -499,7 +499,7 @@ SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index) {
|
|||
snprintf(logBuf, sizeof(logBuf), "wal read error, index:%" PRId64 ", err:%d %X, msg:%s, syserr:%d, sysmsg:%s",
|
||||
index, err, err, errStr, sysErr, sysErrStr);
|
||||
if (terrno == TSDB_CODE_WAL_LOG_NOT_EXIST) {
|
||||
syncNodeEventLog(pData->pSyncNode, logBuf);
|
||||
// syncNodeEventLog(pData->pSyncNode, logBuf);
|
||||
} else {
|
||||
syncNodeErrorLog(pData->pSyncNode, logBuf);
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import argparse
|
|||
import sys
|
||||
import os
|
||||
import io
|
||||
import datetime
|
||||
import signal
|
||||
import traceback
|
||||
import requests
|
||||
|
@ -1107,14 +1108,20 @@ class Database:
|
|||
# TODO: fix the error as result of above: "tsdb timestamp is out of range"
|
||||
@classmethod
|
||||
def setupLastTick(cls):
|
||||
t1 = datetime.datetime(2020, 6, 1)
|
||||
# start time will be auto generated , start at 10 years ago local time
|
||||
local_time = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-16]
|
||||
local_epoch_time = [int(i) for i in local_time.split("-")]
|
||||
#local_epoch_time will be such as : [2022, 7, 18]
|
||||
|
||||
t1 = datetime.datetime(local_epoch_time[0]-5, local_epoch_time[1], local_epoch_time[2])
|
||||
t2 = datetime.datetime.now()
|
||||
# maybe a very large number, takes 69 years to exceed Python int range
|
||||
elSec = int(t2.timestamp() - t1.timestamp())
|
||||
elSec2 = (elSec % (8 * 12 * 30 * 24 * 60 * 60 / 500)) * \
|
||||
500 # a number representing seconds within 10 years
|
||||
# print("elSec = {}".format(elSec))
|
||||
t3 = datetime.datetime(2012, 1, 1) # default "keep" is 10 years
|
||||
|
||||
t3 = datetime.datetime(local_epoch_time[0]-10, local_epoch_time[1], local_epoch_time[2]) # default "keep" is 10 years
|
||||
t4 = datetime.datetime.fromtimestamp(
|
||||
t3.timestamp() + elSec2) # see explanation above
|
||||
Logging.debug("Setting up TICKS to start from: {}".format(t4))
|
||||
|
|
|
@ -56,6 +56,16 @@ class TDTestCase:
|
|||
}
|
||||
return numbers.get(value, 'other')
|
||||
|
||||
def getCacheModelNum(self,str):
|
||||
numbers = {
|
||||
"none" : 0,
|
||||
"last_row" : 1,
|
||||
"last_value" : 2,
|
||||
"both" : 3
|
||||
|
||||
}
|
||||
return numbers.get(str, 'other')
|
||||
|
||||
def prepare_datas(self):
|
||||
for i in range(4):
|
||||
str = self.getCacheModelStr(i)
|
||||
|
@ -69,7 +79,7 @@ class TDTestCase:
|
|||
tdSql.execute(" insert into tb1 values(now , %d, %f)" %(k,k*10) )
|
||||
tdSql.execute(" insert into tb2 values(now , %d, %f)" %(k,k*10) )
|
||||
|
||||
def check_cache_last_sets(self):
|
||||
def check_cachemodel_sets(self):
|
||||
|
||||
|
||||
# check cache_last value for database
|
||||
|
@ -84,52 +94,54 @@ class TDTestCase:
|
|||
# print(cache_last_value)
|
||||
if dbname in ["information_schema" , "performance_schema"]:
|
||||
continue
|
||||
cache_lasts[dbname]=cache_last_value
|
||||
cache_lasts[dbname]=self.getCacheModelNum(cache_last_value)
|
||||
|
||||
|
||||
# cache_last_set value
|
||||
for k , v in cache_lasts.items():
|
||||
|
||||
if k=="testdb_"+str(v):
|
||||
tdLog.info(" database %s cache_last value check pass, value is %s "%(k,v) )
|
||||
if k=="testdb_"+str(self.getCacheModelStr(v)):
|
||||
tdLog.info(" database %s cache_last value check pass, value is %s "%(k,self.getCacheModelStr(v)) )
|
||||
else:
|
||||
tdLog.exit(" database %s cache_last value check fail, value is %s "%(k,v) )
|
||||
tdLog.exit(" database %s cache_last value check fail, value is %s "%(k,self.getCacheModelStr(v)) )
|
||||
|
||||
# # check storage layer implementation
|
||||
|
||||
|
||||
# buildPath = self.getBuildPath()
|
||||
# if (buildPath == ""):
|
||||
# tdLog.exit("taosd not found!")
|
||||
# else:
|
||||
# tdLog.info("taosd found in %s" % buildPath)
|
||||
# dataPath = buildPath + "/../sim/dnode1/data"
|
||||
# abs_vnodePath = os.path.abspath(dataPath)+"/vnode/"
|
||||
# tdLog.info("abs_vnodePath: %s" % abs_vnodePath)
|
||||
buildPath = self.getBuildPath()
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
dataPath = buildPath + "/../sim/dnode1/data"
|
||||
abs_vnodePath = os.path.abspath(dataPath)+"/vnode/"
|
||||
tdLog.info("abs_vnodePath: %s" % abs_vnodePath)
|
||||
|
||||
# tdSql.query(" show dnodes ")
|
||||
# dnode_id = tdSql.queryResult[0][0]
|
||||
tdSql.query(" show dnodes ")
|
||||
dnode_id = tdSql.queryResult[0][0]
|
||||
|
||||
# for dbname in cache_lasts.keys():
|
||||
# print(dbname)
|
||||
# tdSql.execute(" use %s" % dbname)
|
||||
# tdSql.query(" show vgroups ")
|
||||
# vgroups_infos = tdSql.queryResult
|
||||
# for vgroup_info in vgroups_infos:
|
||||
# vnode_json = abs_vnodePath + "/vnode" +f"{vgroup_info[0]}/" + "vnode.json"
|
||||
# vnode_info_of_db = f"cat {vnode_json}"
|
||||
# vnode_info = subprocess.check_output(vnode_info_of_db, shell=True).decode("utf-8")
|
||||
# infoDict = json.loads(vnode_info)
|
||||
# vnode_json_of_dbname = f"{dnode_id}."+ dbname
|
||||
# config = infoDict["config"]
|
||||
# if infoDict["config"]["dbname"] == vnode_json_of_dbname:
|
||||
# if "cachelast" in infoDict["config"]:
|
||||
# if int(infoDict["config"]["cachelast"]) != cache_lasts[dbname]:
|
||||
# tdLog.exit("cachelast value is error in vnode.json of vnode%d "%(vgroup_info[0]))
|
||||
# else:
|
||||
# tdLog.exit("cachelast not found in vnode.json of vnode%d "%(vgroup_info[0]))
|
||||
for dbname in cache_lasts.keys():
|
||||
# print(dbname)
|
||||
tdSql.execute(" use %s" % dbname)
|
||||
tdSql.query(" show vgroups ")
|
||||
vgroups_infos = tdSql.queryResult
|
||||
for vgroup_info in vgroups_infos:
|
||||
vnode_json = abs_vnodePath + "/vnode" +f"{vgroup_info[0]}/" + "vnode.json"
|
||||
vnode_info_of_db = f"cat {vnode_json}"
|
||||
vnode_info = subprocess.check_output(vnode_info_of_db, shell=True).decode("utf-8")
|
||||
infoDict = json.loads(vnode_info)
|
||||
vnode_json_of_dbname = f"{dnode_id}."+ dbname
|
||||
config = infoDict["config"]
|
||||
if infoDict["config"]["dbname"] == vnode_json_of_dbname:
|
||||
if "cacheLast" in infoDict["config"]:
|
||||
if int(infoDict["config"]["cacheLast"]) != cache_lasts[dbname]:
|
||||
tdLog.exit("cachemodel value is error in vnode.json of vnode%d "%(vgroup_info[0]))
|
||||
else:
|
||||
tdLog.info("cachemodel value is success in vnode.json of vnode%d "%(vgroup_info[0]))
|
||||
else:
|
||||
tdLog.exit("cacheLast not found in vnode.json of vnode%d "%(vgroup_info[0]))
|
||||
|
||||
def restart_check_cache_last_sets(self):
|
||||
def restart_check_cachemodel_sets(self):
|
||||
|
||||
for i in range(3):
|
||||
tdSql.query("show dnodes")
|
||||
|
@ -137,14 +149,14 @@ class TDTestCase:
|
|||
tdDnodes.stop(index)
|
||||
tdDnodes.start(index)
|
||||
time.sleep(3)
|
||||
self.check_cache_last_sets()
|
||||
self.check_cachemodel_sets()
|
||||
|
||||
|
||||
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
|
||||
self.illegal_params()
|
||||
self.prepare_datas()
|
||||
self.check_cache_last_sets()
|
||||
self.restart_check_cache_last_sets()
|
||||
self.check_cachemodel_sets()
|
||||
self.restart_check_cachemodel_sets()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
|
@ -0,0 +1,190 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import random
|
||||
import string
|
||||
|
||||
from numpy import logspace
|
||||
from util import constant
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.common import *
|
||||
from util.sqlset import TDSetSql
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(),logSql)
|
||||
self.dbname = 'db_test'
|
||||
self.setsql = TDSetSql()
|
||||
self.ntbname = 'ntb'
|
||||
self.rowNum = 10
|
||||
self.tbnum = 20
|
||||
self.ts = 1537146000000
|
||||
self.binary_str = 'taosdata'
|
||||
self.nchar_str = '涛思数据'
|
||||
self.str_length = 20
|
||||
self.column_dict = {
|
||||
'col1': 'tinyint',
|
||||
'col2': 'smallint',
|
||||
'col3': 'int',
|
||||
'col4': 'bigint',
|
||||
'col5': 'tinyint unsigned',
|
||||
'col6': 'smallint unsigned',
|
||||
'col7': 'int unsigned',
|
||||
'col8': 'bigint unsigned',
|
||||
'col9': 'float',
|
||||
'col10': 'double',
|
||||
'col11': 'bool',
|
||||
'col12': f'binary({self.str_length})',
|
||||
'col13': f'nchar({self.str_length})',
|
||||
|
||||
}
|
||||
self.tinyint_val = random.randint(constant.TINYINT_MIN,constant.TINYINT_MAX)
|
||||
self.smallint_val = random.randint(constant.SMALLINT_MIN,constant.SMALLINT_MAX)
|
||||
self.int_val = random.randint(constant.INT_MIN,constant.INT_MAX)
|
||||
self.bigint_val = random.randint(constant.BIGINT_MIN,constant.BIGINT_MAX)
|
||||
self.untingint_val = random.randint(constant.TINYINT_UN_MIN,constant.TINYINT_UN_MAX)
|
||||
self.unsmallint_val = random.randint(constant.SMALLINT_UN_MIN,constant.SMALLINT_UN_MAX)
|
||||
self.unint_val = random.randint(constant.INT_UN_MIN,constant.INT_MAX)
|
||||
self.unbigint_val = random.randint(constant.BIGINT_UN_MIN,constant.BIGINT_UN_MAX)
|
||||
self.float_val = random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX)
|
||||
self.double_val = random.uniform(constant.DOUBLE_MIN*(1E-300),constant.DOUBLE_MAX*(1E-300))
|
||||
self.bool_val = random.randint(0,100)%2
|
||||
self.binary_val = tdCom.getLongName(random.randint(0,self.str_length))
|
||||
self.nchar_val = tdCom.getLongName(random.randint(0,self.str_length))
|
||||
self.base_data = {
|
||||
'tinyint':self.tinyint_val,
|
||||
'smallint':self.smallint_val,
|
||||
'int':self.int_val,
|
||||
'bigint':self.bigint_val,
|
||||
'tinyint unsigned':self.untingint_val,
|
||||
'smallint unsigned':self.unsmallint_val,
|
||||
'int unsigned':self.unint_val,
|
||||
'bigint unsigned':self.unbigint_val,
|
||||
'bool':self.bool_val,
|
||||
'float':self.float_val,
|
||||
'double':self.double_val,
|
||||
'binary':self.binary_val,
|
||||
'nchar':self.nchar_val
|
||||
}
|
||||
def insert_base_data(self,col_type,tbname,rows,base_data):
|
||||
for i in range(rows):
|
||||
if col_type.lower() == 'tinyint':
|
||||
tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["tinyint"]})')
|
||||
elif col_type.lower() == 'smallint':
|
||||
tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["smallint"]})')
|
||||
elif col_type.lower() == 'int':
|
||||
tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["int"]})')
|
||||
elif col_type.lower() == 'bigint':
|
||||
tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bigint"]})')
|
||||
elif col_type.lower() == 'tinyint unsigned':
|
||||
tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["tinyint unsigned"]})')
|
||||
elif col_type.lower() == 'smallint unsigned':
|
||||
tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["smallint unsigned"]})')
|
||||
elif col_type.lower() == 'int unsigned':
|
||||
tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["int unsigned"]})')
|
||||
elif col_type.lower() == 'bigint unsigned':
|
||||
tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bigint unsigned"]})')
|
||||
elif col_type.lower() == 'bool':
|
||||
tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bool"]})')
|
||||
elif col_type.lower() == 'float':
|
||||
tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["float"]})')
|
||||
elif col_type.lower() == 'double':
|
||||
tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["double"]})')
|
||||
elif 'binary' in col_type.lower():
|
||||
tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{base_data['binary']}")''')
|
||||
elif 'nchar' in col_type.lower():
|
||||
tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{base_data['nchar']}")''')
|
||||
|
||||
def delete_all_data(self,tbname,col_type,row_num,base_data,dbname):
|
||||
tdSql.execute(f'delete from {tbname}')
|
||||
tdSql.execute(f'flush database {dbname}')
|
||||
tdSql.execute('reset query cache')
|
||||
tdSql.query(f'select * from {tbname}')
|
||||
tdSql.checkRows(0)
|
||||
self.insert_base_data(col_type,tbname,row_num,base_data)
|
||||
tdSql.execute(f'flush database {dbname}')
|
||||
tdSql.execute('reset query cache')
|
||||
tdSql.query(f'select * from {tbname}')
|
||||
tdSql.checkRows(row_num)
|
||||
def delete_one_row(self,tbname,column_type,column_name,base_data,dbname):
|
||||
tdSql.execute(f'delete from {tbname} where ts={self.ts}')
|
||||
tdSql.execute(f'flush database {dbname}')
|
||||
tdSql.execute('reset query cache')
|
||||
tdSql.query(f'select {column_name} from {tbname}')
|
||||
tdSql.checkRows(self.rowNum-1)
|
||||
tdSql.query(f'select {column_name} from {tbname} where ts={self.ts}')
|
||||
tdSql.checkRows(0)
|
||||
if 'binary' in column_type.lower():
|
||||
tdSql.execute(f'''insert into {tbname} values({self.ts},"{base_data['binary']}")''')
|
||||
elif 'nchar' in column_type.lower():
|
||||
tdSql.execute(f'''insert into {tbname} values({self.ts},"{base_data['nchar']}")''')
|
||||
else:
|
||||
tdSql.execute(f'insert into {tbname} values({self.ts},{base_data[column_type]})')
|
||||
tdSql.query(f'select {column_name} from {tbname} where ts={self.ts}')
|
||||
if column_type.lower() == 'float' or column_type.lower() == 'double':
|
||||
if abs(tdSql.queryResult[0][0] - base_data[column_type]) / base_data[column_type] <= 0.0001:
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0],tdSql.queryResult[0][0])
|
||||
else:
|
||||
tdLog.exit(f'{column_type} data check failure')
|
||||
elif 'binary' in column_type.lower():
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0],base_data['binary'])
|
||||
elif 'nchar' in column_type.lower():
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0],base_data['nchar'])
|
||||
else:
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0],base_data[column_type])
|
||||
|
||||
def delete_rows(self):
|
||||
|
||||
|
||||
pass
|
||||
def delete_error(self,tbname,column_name,column_type,base_data):
|
||||
for error_list in ['',f'ts = {self.ts} and',f'ts = {self.ts} or']:
|
||||
if 'binary' in column_type.lower():
|
||||
tdSql.error(f'''delete from {tbname} where {error_list} {column_name} ="{base_data['binary']}"''')
|
||||
elif 'nchar' in column_type.lower():
|
||||
tdSql.error(f'''delete from {tbname} where {error_list} {column_name} ="{base_data['nchar']}"''')
|
||||
else:
|
||||
tdSql.error('delete from {tbname} where {error_list} {column_name} = {base_data[column_type]}')
|
||||
|
||||
def delete_data_ntb(self):
|
||||
tdSql.execute(f'create database if not exists {self.dbname}')
|
||||
tdSql.execute(f'use {self.dbname}')
|
||||
for col_name,col_type in self.column_dict.items():
|
||||
tdSql.execute(f'create table {self.ntbname} (ts timestamp,{col_name} {col_type})')
|
||||
self.insert_base_data(col_type,self.ntbname,self.rowNum,self.base_data)
|
||||
self.delete_one_row(self.ntbname,col_type,col_name,self.base_data,self.dbname)
|
||||
self.delete_all_data(self.ntbname,col_type,self.rowNum,self.base_data,self.dbname)
|
||||
self.delete_error(self.ntbname,col_name,col_type,self.base_data)
|
||||
for i in range(self.rowNum):
|
||||
tdSql.execute(f'delete from {self.ntbname} where ts>{self.ts+i}')
|
||||
tdSql.execute(f'flush database {self.dbname}')
|
||||
tdSql.execute('reset query cache')
|
||||
tdSql.query(f'select {col_name} from {self.ntbname}')
|
||||
tdSql.checkRows(i+1)
|
||||
self.insert_base_data(col_type,self.ntbname,self.rowNum,self.base_data)
|
||||
|
||||
tdSql.execute(f'drop table {self.ntbname}')
|
||||
|
||||
def run(self):
|
||||
self.delete_data_ntb()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,49 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
import threading
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def genMultiThreadSeq(self, sql_list):
|
||||
tlist = list()
|
||||
for insert_sql in sql_list:
|
||||
t = threading.Thread(target=tdSql.execute, args=(insert_sql,))
|
||||
tlist.append(t)
|
||||
return tlist
|
||||
|
||||
def multiThreadRun(self, tlist):
|
||||
for t in tlist:
|
||||
t.start()
|
||||
for t in tlist:
|
||||
t.join()
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
tdSql.execute('create database if not exists test;')
|
||||
tdSql.execute('create table test.stb (ts timestamp, c11 int, c12 float ) TAGS(t11 int, t12 int );')
|
||||
tdSql.execute('create table test.tb using test.stb TAGS (1, 1);')
|
||||
sql_list = list()
|
||||
for i in range(5):
|
||||
sql = f'insert into test.tb values (now-{i}m, {i}, {i});'
|
||||
sql_list.append(sql)
|
||||
sql_list.append(f'drop database test;')
|
||||
tlist = self.genMultiThreadSeq(sql_list)
|
||||
self.multiThreadRun(tlist)
|
||||
tdSql.query(f'show databases')
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -0,0 +1,229 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import random
|
||||
import string
|
||||
from util import constant
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.common import *
|
||||
from util.sqlset import TDSetSql
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(),logSql)
|
||||
self.setsql = TDSetSql()
|
||||
self.dbname = 'db_test'
|
||||
self.ntbname = 'ntb'
|
||||
self.stbname = 'stb'
|
||||
self.ctbname = 'ctb'
|
||||
self.ts = 1537146000000
|
||||
self.str_length = 20
|
||||
self.column_dict = {
|
||||
'col1': 'tinyint',
|
||||
'col2': 'smallint',
|
||||
'col3': 'int',
|
||||
'col4': 'bigint',
|
||||
'col5': 'tinyint unsigned',
|
||||
'col6': 'smallint unsigned',
|
||||
'col7': 'int unsigned',
|
||||
'col8': 'bigint unsigned',
|
||||
'col9': 'float',
|
||||
'col10': 'double',
|
||||
'col11': 'bool',
|
||||
'col12': f'binary({self.str_length})',
|
||||
'col13': f'nchar({self.str_length})',
|
||||
'col_ts' : 'timestamp'
|
||||
}
|
||||
|
||||
def data_check(self,tbname,col_name,col_type,value):
|
||||
tdSql.query(f'select {col_name} from {tbname}')
|
||||
if col_type.lower() == 'float' or col_type.lower() == 'double':
|
||||
if abs(tdSql.queryResult[0][0] - value) / value <= 0.0001:
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0],tdSql.queryResult[0][0])
|
||||
else:
|
||||
tdLog.exit(f'{col_name} data check failure')
|
||||
elif col_type.lower() == 'timestamp':
|
||||
tdSql.checkEqual(str(tdSql.queryResult[0][0]),str(datetime.datetime.fromtimestamp(value/1000).strftime("%Y-%m-%d %H:%M:%S.%f")))
|
||||
else:
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0],value)
|
||||
def update_and_check_data(self,tbname,col_name,col_type,value,dbname):
|
||||
if 'binary' in col_type.lower() or 'nchar' in col_type.lower():
|
||||
tdSql.execute(f'insert into {tbname} values({self.ts},"{value}")')
|
||||
else:
|
||||
tdSql.execute(f'insert into {tbname} values({self.ts},{value})')
|
||||
self.data_check(tbname,col_name,col_type,value)
|
||||
tdSql.execute(f'flush database {dbname}')
|
||||
tdSql.execute('reset query cache')
|
||||
self.data_check(tbname,col_name,col_type,value)
|
||||
for func in ['first','last']:
|
||||
tdSql.execute(f'select {func}({col_name}) from {tbname}')
|
||||
def error_check(self,tbname,column_dict,tb_type=None,stbname=None):
|
||||
str_length = self.str_length+1
|
||||
for col_name,col_type in column_dict.items():
|
||||
if tb_type == 'ntb':
|
||||
tdSql.execute(f'create table {tbname} (ts timestamp,{col_name} {col_type})')
|
||||
elif tb_type == 'ctb':
|
||||
tdSql.execute(f'create table {stbname} (ts timestamp,{col_name} {col_type}) tags(t0 int)')
|
||||
tdSql.execute(f'create table {tbname} using {stbname} tags(1)')
|
||||
tdSql.execute(f'insert into {tbname} values({self.ts},null)')
|
||||
if col_type.lower() == 'double':
|
||||
for error_value in [tdCom.getLongName(self.str_length),True,False,1.1*constant.DOUBLE_MIN,1.1*constant.DOUBLE_MAX]:
|
||||
tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
|
||||
elif col_type.lower() == 'float':
|
||||
for error_value in [tdCom.getLongName(self.str_length),True,False,1.1*constant.FLOAT_MIN,1.1*constant.FLOAT_MAX]:
|
||||
tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
|
||||
elif 'binary' in col_type.lower() or 'nchar' in col_type.lower():
|
||||
for error_value in [tdCom.getLongName(str_length)]:
|
||||
tdSql.error(f'insert into {tbname} values({self.ts},"{error_value}")')
|
||||
elif col_type.lower() == 'bool':
|
||||
for error_value in [tdCom.getLongName(self.str_length)]:
|
||||
tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
|
||||
elif col_type.lower() == 'tinyint':
|
||||
for error_value in [constant.TINYINT_MIN-1,constant.TINYINT_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]:
|
||||
tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
|
||||
elif col_type.lower() == 'smallint':
|
||||
for error_value in [constant.SMALLINT_MIN-1,constant.SMALLINT_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]:
|
||||
tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
|
||||
elif col_type.lower() == 'int':
|
||||
for error_value in [constant.INT_MIN-1,constant.INT_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]:
|
||||
tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
|
||||
elif col_type.lower() == 'bigint':
|
||||
for error_value in [constant.BIGINT_MIN-1,constant.BIGINT_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]:
|
||||
tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
|
||||
elif col_type.lower() == 'tinyint unsigned':
|
||||
for error_value in [constant.TINYINT_UN_MIN-1,constant.TINYINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]:
|
||||
tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
|
||||
elif col_type.lower() == 'smallint unsigned':
|
||||
for error_value in [constant.SMALLINT_UN_MIN-1,constant.SMALLINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]:
|
||||
tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
|
||||
elif col_type.lower() == 'int unsigned':
|
||||
for error_value in [constant.INT_UN_MIN-1,constant.INT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]:
|
||||
tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
|
||||
elif col_type.lower() == 'bigint unsigned':
|
||||
for error_value in [constant.BIGINT_UN_MIN-1,constant.BIGINT_UN_MAX+1,random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX),tdCom.getLongName(self.str_length),True,False]:
|
||||
tdSql.error(f'insert into {tbname} values({self.ts},{error_value})')
|
||||
tdSql.execute(f'drop table {tbname}')
|
||||
if tb_type == 'ctb':
|
||||
tdSql.execute(f'drop table {stbname}')
|
||||
def update_data_check(self,tbname,column_dict,dbname,tb_type=None,stbname=None):
|
||||
up_tinyint = random.randint(constant.TINYINT_MIN,constant.TINYINT_MAX)
|
||||
up_smallint = random.randint(constant.SMALLINT_MIN,constant.SMALLINT_MAX)
|
||||
up_int = random.randint(constant.INT_MIN,constant.INT_MAX)
|
||||
up_bigint = random.randint(constant.BIGINT_MIN,constant.BIGINT_MAX)
|
||||
up_untinyint = random.randint(constant.TINYINT_UN_MIN,constant.TINYINT_UN_MAX)
|
||||
up_unsmallint = random.randint(constant.SMALLINT_UN_MIN,constant.SMALLINT_UN_MAX)
|
||||
up_unint = random.randint(constant.INT_UN_MIN,constant.INT_MAX)
|
||||
up_unbigint = random.randint(constant.BIGINT_UN_MIN,constant.BIGINT_UN_MAX)
|
||||
up_bool = random.randint(0,100)%2
|
||||
up_float = random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX)
|
||||
up_double = random.uniform(constant.DOUBLE_MIN*(1E-300),constant.DOUBLE_MAX*(1E-300))
|
||||
binary_length = random.randint(0,self.str_length)
|
||||
nchar_length = random.randint(0,self.str_length)
|
||||
up_binary = tdCom.getLongName(binary_length)
|
||||
up_nchar = tdCom.getLongName(nchar_length)
|
||||
for col_name,col_type in column_dict.items():
|
||||
if tb_type == 'ntb':
|
||||
tdSql.execute(f'create table {tbname} (ts timestamp,{col_name} {col_type})')
|
||||
elif tb_type == 'ctb':
|
||||
tdSql.execute(f'create table {stbname} (ts timestamp,{col_name} {col_type}) tags(t0 int)')
|
||||
tdSql.execute(f'create table {tbname} using {stbname} tags(1)')
|
||||
tdSql.execute(f'insert into {tbname} values({self.ts},null)')
|
||||
if col_type.lower() == 'tinyint':
|
||||
self.update_and_check_data(tbname,col_name,col_type,up_tinyint,dbname)
|
||||
elif col_type.lower() == 'smallint':
|
||||
self.update_and_check_data(tbname,col_name,col_type,up_smallint,dbname)
|
||||
elif col_type.lower() == 'int':
|
||||
self.update_and_check_data(tbname,col_name,col_type,up_int,dbname)
|
||||
elif col_type.lower() == 'bigint':
|
||||
self.update_and_check_data(tbname,col_name,col_type,up_bigint,dbname)
|
||||
elif col_type.lower() == 'tinyint unsigned':
|
||||
self.update_and_check_data(tbname,col_name,col_type,up_untinyint,dbname)
|
||||
elif col_type.lower() == 'smallint unsigned':
|
||||
self.update_and_check_data(tbname,col_name,col_type,up_unsmallint,dbname)
|
||||
elif col_type.lower() == 'int unsigned':
|
||||
self.update_and_check_data(tbname,col_name,col_type,up_unint,dbname)
|
||||
elif col_type.lower() == 'bigint unsigned':
|
||||
self.update_and_check_data(tbname,col_name,col_type,up_unbigint,dbname)
|
||||
elif col_type.lower() == 'bool':
|
||||
self.update_and_check_data(tbname,col_name,col_type,up_bool,dbname)
|
||||
elif col_type.lower() == 'float':
|
||||
self.update_and_check_data(tbname,col_name,col_type,up_float,dbname)
|
||||
elif col_type.lower() == 'double':
|
||||
self.update_and_check_data(tbname,col_name,col_type,up_double,dbname)
|
||||
elif 'binary' in col_type.lower():
|
||||
self.update_and_check_data(tbname,col_name,col_type,up_binary,dbname)
|
||||
elif 'nchar' in col_type.lower():
|
||||
self.update_and_check_data(tbname,col_name,col_type,up_nchar,dbname)
|
||||
elif col_type.lower() == 'timestamp':
|
||||
self.update_and_check_data(tbname,col_name,col_type,self.ts+1,dbname)
|
||||
tdSql.execute(f'insert into {tbname} values({self.ts},null)')
|
||||
tdSql.query(f'select {col_name} from {tbname}')
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0],None)
|
||||
tdSql.execute(f'flush database {self.dbname}')
|
||||
tdSql.execute('reset query cache')
|
||||
tdSql.query(f'select {col_name} from {tbname}')
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0],None)
|
||||
tdSql.execute(f'drop table {tbname}')
|
||||
if tb_type == 'ctb':
|
||||
tdSql.execute(f'drop table {stbname}')
|
||||
def update_check(self):
|
||||
tdSql.execute(f'drop database if exists {self.dbname}')
|
||||
tdSql.execute(f'create database {self.dbname}')
|
||||
tdSql.execute(f'use {self.dbname}')
|
||||
self.update_data_check(self.ntbname,self.column_dict,self.dbname,'ntb')
|
||||
for col_name,col_type in self.column_dict.items():
|
||||
tdSql.execute(f'create table {self.ntbname} (ts timestamp,{col_name} {col_type})')
|
||||
tdSql.execute(f'insert into {self.ntbname} values({self.ts},null)')
|
||||
if 'binary' in col_type.lower():
|
||||
up_binary = tdCom.getLongName(self.str_length+1)
|
||||
tdSql.execute(f'alter table {self.ntbname} modify column {col_name} binary({self.str_length+1})')
|
||||
self.update_and_check_data(self.ntbname,col_name,col_type,up_binary,self.dbname)
|
||||
elif 'nchar' in col_type.lower():
|
||||
up_nchar = tdCom.getLongName(self.str_length+1)
|
||||
tdSql.execute(f'alter table {self.ntbname} modify column {col_name} nchar({self.str_length+1})')
|
||||
self.update_and_check_data(self.ntbname,col_name,col_type,up_nchar,self.dbname)
|
||||
tdSql.execute(f'drop table {self.ntbname}')
|
||||
self.update_data_check(self.ctbname,self.column_dict,self.dbname,'ctb',self.stbname)
|
||||
for col_name,col_type in self.column_dict.items():
|
||||
tdSql.execute(f'create table {self.stbname} (ts timestamp,{col_name} {col_type}) tags(t0 int)')
|
||||
tdSql.execute(f'create table {self.ctbname} using {self.stbname} tags(1)')
|
||||
tdSql.execute(f'insert into {self.ctbname} values({self.ts},null)')
|
||||
if 'binary' in col_type.lower():
|
||||
up_binary = tdCom.getLongName(self.str_length+1)
|
||||
tdSql.execute(f'alter table {self.stbname} modify column {col_name} binary({self.str_length+1})')
|
||||
self.update_and_check_data(self.ctbname,col_name,col_type,up_binary,self.dbname)
|
||||
elif 'nchar' in col_type.lower():
|
||||
up_nchar = tdCom.getLongName(self.str_length+1)
|
||||
tdSql.execute(f'alter table {self.stbname} modify column {col_name} nchar({self.str_length+1})')
|
||||
self.update_and_check_data(self.ctbname,col_name,col_type,up_nchar,self.dbname)
|
||||
tdSql.execute(f'drop table {self.stbname}')
|
||||
|
||||
def update_check_error(self):
|
||||
tdSql.execute(f'drop database if exists {self.dbname}')
|
||||
tdSql.execute(f'create database {self.dbname}')
|
||||
tdSql.execute(f'use {self.dbname}')
|
||||
self.error_check(self.ntbname,self.column_dict,'ntb')
|
||||
self.error_check(self.ctbname,self.column_dict,'ctb',self.stbname)
|
||||
|
||||
def run(self):
|
||||
self.update_check()
|
||||
self.update_check_error()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -554,6 +554,9 @@ class TDTestCase:
|
|||
tdSql.query("select t1 from stb1 where abs(c1+t1)=1")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,0,0)
|
||||
|
||||
tdSql.query("select abs(c1) from (select ts , c1 ,t1 from stb1)")
|
||||
tdSql.checkRows(25)
|
||||
|
||||
tdSql.query(
|
||||
"select abs(c1+t1)*t1 from stb1 where abs(c1)/floor(abs(ceil(t1))) ==1")
|
||||
|
|
|
@ -435,8 +435,8 @@ class TDTestCase:
|
|||
tdSql.checkRows(40)
|
||||
|
||||
# # bug need fix
|
||||
# tdSql.query("select csum(st1+c1) from stb1 partition by tbname slimit 1 ")
|
||||
# tdSql.checkRows(4)
|
||||
tdSql.query("select csum(st1+c1) from stb1 partition by tbname slimit 1 ")
|
||||
tdSql.checkRows(4)
|
||||
# tdSql.error("select csum(st1+c1) from stb1 partition by tbname limit 1 ")
|
||||
|
||||
|
||||
|
|
|
@ -722,10 +722,18 @@ class TDTestCase:
|
|||
tdSql.query("select last_row(ceil(c1-2)) , abs(floor(t1+1)) ,floor(c2-c1) from testdb.stb1 partition by abs(floor(c1)) order by abs(c1)")
|
||||
tdSql.checkRows(11)
|
||||
|
||||
|
||||
tdSql.query("select max(c1) from stb1 interval(50s) sliding(30s)")
|
||||
tdSql.checkRows(13)
|
||||
|
||||
tdSql.query("select unique(c1) from stb1 partition by tbname")
|
||||
|
||||
# interval
|
||||
|
||||
tdSql.query("select last_row(c1) from testdb.stb1 interval(50s) sliding(30s)")
|
||||
tdSql.checkRows(27)
|
||||
|
||||
|
||||
tdSql.query("select last_row(c1) from testdb.ct1 interval(50s) sliding(30s)")
|
||||
tdSql.checkRows(5)
|
||||
last_row_result = tdSql.queryResult
|
||||
|
|
|
@ -162,10 +162,45 @@ class TDTestCase:
|
|||
tdSql.query("select tbname , max(c1) from stb partition by tbname interval(10s)")
|
||||
tdSql.checkRows(self.row_nums*2)
|
||||
|
||||
tdSql.query("select unique(c1) from stb partition by tbname order by tbname")
|
||||
|
||||
tdSql.query("select tbname , count(c1) from sub_stb_1 partition by tbname interval(10s)")
|
||||
tdSql.checkData(0,0,'sub_stb_1')
|
||||
tdSql.checkData(0,1,self.row_nums)
|
||||
|
||||
tdSql.query("select c1 , mavg(c1 ,2 ) from stb partition by c1")
|
||||
tdSql.checkRows(72)
|
||||
|
||||
tdSql.query("select c1 , diff(c1 , 0) from stb partition by c1")
|
||||
tdSql.checkRows(72)
|
||||
|
||||
tdSql.query("select c1 , csum(c1) from stb partition by c1")
|
||||
tdSql.checkRows(80)
|
||||
|
||||
tdSql.query("select c1 , sample(c1,2) from stb partition by c1 order by c1")
|
||||
tdSql.checkRows(21)
|
||||
# bug need fix
|
||||
# tdSql.checkData(0,1,None)
|
||||
|
||||
tdSql.query("select c1 , twa(c1) from stb partition by c1 order by c1")
|
||||
tdSql.checkRows(11)
|
||||
tdSql.checkData(0,1,0.000000000)
|
||||
|
||||
tdSql.query("select c1 , irate(c1) from stb partition by c1 order by c1")
|
||||
tdSql.checkRows(11)
|
||||
tdSql.checkData(0,1,None)
|
||||
|
||||
tdSql.query("select c1 , DERIVATIVE(c1,2,1) from stb partition by c1 order by c1")
|
||||
tdSql.checkRows(72)
|
||||
# bug need fix
|
||||
# tdSql.checkData(0,1,None)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# bug need fix
|
||||
# tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ")
|
||||
# tdSql.checkRows(5)
|
||||
|
|
|
@ -870,7 +870,10 @@ class TDTestCase:
|
|||
tdSql.query("select sample(c1 ,1000) from st")
|
||||
tdSql.checkRows(1000)
|
||||
|
||||
|
||||
# bug need fix
|
||||
tdSql.query("select c1 ,t1, sample(c1,2) from db.stb1 partition by c1 ")
|
||||
tdSql.query("select sample(c1,2) from db.stb1 partition by c1 ")
|
||||
# tdSql.query("select c1 ,ind, sample(c1,2) from sample_db.st partition by c1 ")
|
||||
|
||||
def run(self):
|
||||
import traceback
|
||||
|
|
|
@ -97,18 +97,18 @@ class TDTestCase:
|
|||
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||
|
||||
# update to half tables
|
||||
paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
|
||||
# paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2)
|
||||
# tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx",
|
||||
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
|
||||
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
|
||||
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
|
||||
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||
# tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
|
||||
# ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
|
||||
# startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||
|
||||
tdLog.info("create topics from stb1")
|
||||
topicFromStb1 = 'topic_stb1'
|
||||
# queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
|
||||
queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
|
||||
queryString = "select ts, c1, c2 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
|
||||
# queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
|
||||
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
|
||||
tdLog.info("create topic sql: %s"%sqlString)
|
||||
tdSql.execute(sqlString)
|
||||
|
@ -242,11 +242,11 @@ class TDTestCase:
|
|||
self.tmqCase1()
|
||||
# self.tmqCase2()
|
||||
|
||||
self.prepareTestEnv()
|
||||
tdLog.printNoPrefix("====================================================================")
|
||||
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
|
||||
self.snapshot = 1
|
||||
self.tmqCase1()
|
||||
# self.prepareTestEnv()
|
||||
# tdLog.printNoPrefix("====================================================================")
|
||||
# tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
|
||||
# self.snapshot = 1
|
||||
# self.tmqCase1()
|
||||
# self.tmqCase2()
|
||||
|
||||
|
||||
|
|
|
@ -205,6 +205,13 @@ class TMQCom:
|
|||
tdLog.debug("complete to create %d child tables by %s.%s" %(ctbNum, dbName, stbName))
|
||||
return
|
||||
|
||||
def drop_ctable(self, tsql, dbname=None, count=1, default_ctbname_prefix="ctb",ctbStartIdx=0):
|
||||
for _ in range(count):
|
||||
create_ctable_sql = f'drop table {dbname}.{default_ctbname_prefix}{ctbStartIdx};'
|
||||
ctbStartIdx += 1
|
||||
tdLog.info("drop ctb sql: %s"%create_ctable_sql)
|
||||
tsql.execute(create_ctable_sql)
|
||||
|
||||
# schema: (ts timestamp, c1 int, c2 binary(16))
|
||||
def insert_data(self,tsql,dbName,stbName,ctbNum,rowsPerTbl,batchNum,startTs=None):
|
||||
tdLog.debug("start to insert data ............")
|
||||
|
|
|
@ -116,7 +116,12 @@ class TDTestCase:
|
|||
# paraDict['ctbNum'] = self.ctbNum
|
||||
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||
consumerId = 0
|
||||
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 3/2)
|
||||
|
||||
if self.snapshot == 0:
|
||||
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/2))
|
||||
elif self.snapshot == 1:
|
||||
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1))
|
||||
|
||||
topicList = topicFromStb1
|
||||
ifcheckdata = 1
|
||||
ifManualCommit = 1
|
||||
|
@ -199,7 +204,11 @@ class TDTestCase:
|
|||
# paraDict['ctbNum'] = self.ctbNum
|
||||
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||
consumerId = 1
|
||||
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2)
|
||||
if self.snapshot == 0:
|
||||
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2))
|
||||
elif self.snapshot == 1:
|
||||
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1))
|
||||
|
||||
topicList = topicFromStb1
|
||||
ifcheckdata = 1
|
||||
ifManualCommit = 1
|
||||
|
|
|
@ -10,7 +10,7 @@ python3 ./test.py -f 0-others/taosdMonitor.py
|
|||
python3 ./test.py -f 0-others/udfTest.py
|
||||
python3 ./test.py -f 0-others/udf_create.py
|
||||
python3 ./test.py -f 0-others/udf_restart_taosd.py
|
||||
python3 ./test.py -f 0-others/cachelast.py
|
||||
python3 ./test.py -f 0-others/cachemodel.py
|
||||
python3 ./test.py -f 0-others/udf_cfg1.py
|
||||
python3 ./test.py -f 0-others/udf_cfg2.py
|
||||
|
||||
|
@ -32,7 +32,10 @@ python3 ./test.py -f 1-insert/block_wise.py
|
|||
python3 ./test.py -f 1-insert/create_retentions.py
|
||||
python3 ./test.py -f 1-insert/table_param_ttl.py
|
||||
|
||||
python3 ./test.py -f 1-insert/update_data.py
|
||||
|
||||
python3 ./test.py -f 2-query/db.py
|
||||
|
||||
python3 ./test.py -f 2-query/between.py
|
||||
python3 ./test.py -f 2-query/distinct.py
|
||||
python3 ./test.py -f 2-query/varchar.py
|
||||
|
@ -182,7 +185,7 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
|
|||
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
|
||||
python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py
|
||||
#python3 ./test.py -f 7-tmq/tmqDnodeRestart.py
|
||||
#python3 ./test.py -f 7-tmq/tmqUpdate-1ctb.py
|
||||
python3 ./test.py -f 7-tmq/tmqUpdate-1ctb.py
|
||||
python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb.py
|
||||
#python3 ./test.py -f 7-tmq/tmqDelete-1ctb.py
|
||||
|
||||
|
|
|
@ -45,10 +45,10 @@ typedef enum {
|
|||
NOTIFY_CMD_ID_BUTT,
|
||||
} NOTIFY_CMD_ID;
|
||||
|
||||
typedef enum enumQUERY_TYPE {
|
||||
NO_INSERT_TYPE,
|
||||
INSERT_TYPE,
|
||||
QUERY_TYPE_BUT
|
||||
typedef enum enumQUERY_TYPE {
|
||||
NO_INSERT_TYPE,
|
||||
INSERT_TYPE,
|
||||
QUERY_TYPE_BUT
|
||||
} QUERY_TYPE;
|
||||
|
||||
typedef struct {
|
||||
|
@ -587,9 +587,10 @@ static int32_t meta_msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIn
|
|||
tmq_get_topic_name(msg), vgroupId);
|
||||
|
||||
{
|
||||
tmq_raw_data *raw = tmq_get_raw_meta(msg);
|
||||
tmq_raw_data raw = {0};
|
||||
int32_t code = tmq_get_raw_meta(msg, &raw);
|
||||
|
||||
if(raw){
|
||||
if(code == TSDB_CODE_SUCCESS){
|
||||
TAOS_RES* pRes = taos_query(pInfo->taos, "use metadb");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
pError("error when use metadb, reason:%s\n", taos_errstr(pRes));
|
||||
|
@ -599,10 +600,9 @@ static int32_t meta_msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIn
|
|||
exit(-1);
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
taosFprintfFile(g_fp, "raw:%p\n", raw);
|
||||
taosFprintfFile(g_fp, "raw:%p\n", &raw);
|
||||
|
||||
int32_t ret = taos_write_raw_meta(pInfo->taos, raw);
|
||||
taosMemoryFree(raw);
|
||||
taos_write_raw_meta(pInfo->taos, raw);
|
||||
}
|
||||
|
||||
char* result = tmq_get_json_meta(msg);
|
||||
|
@ -1159,23 +1159,23 @@ void* ombConsumeThreadFunc(void* param) {
|
|||
|
||||
|
||||
static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type) {
|
||||
TAOS_RES *res = taos_query(taos, command);
|
||||
int32_t code = taos_errno(res);
|
||||
|
||||
if (code != 0) {
|
||||
TAOS_RES *res = taos_query(taos, command);
|
||||
int32_t code = taos_errno(res);
|
||||
|
||||
if (code != 0) {
|
||||
pPrint("%s Failed to execute <%s>, reason: %s %s", GREEN, command, taos_errstr(res), NC);
|
||||
taos_free_result(res);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (INSERT_TYPE == type) {
|
||||
int affectedRows = taos_affected_rows(res);
|
||||
taos_free_result(res);
|
||||
return affectedRows;
|
||||
}
|
||||
|
||||
taos_free_result(res);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (INSERT_TYPE == type) {
|
||||
int affectedRows = taos_affected_rows(res);
|
||||
taos_free_result(res);
|
||||
return affectedRows;
|
||||
}
|
||||
|
||||
taos_free_result(res);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void* ombProduceThreadFunc(void* param) {
|
||||
|
|
|
@ -737,13 +737,6 @@ int32_t shellDumpResult(TAOS_RES *tres, char *fname, int32_t *error_no, bool ver
|
|||
|
||||
void shellReadHistory() {
|
||||
SShellHistory *pHistory = &shell.history;
|
||||
int64_t file_size;
|
||||
if (taosStatFile(pHistory->file, &file_size, NULL) != 0) {
|
||||
return;
|
||||
} else if (file_size > SHELL_MAX_COMMAND_SIZE) {
|
||||
taosRemoveFile(pHistory->file);
|
||||
return;
|
||||
}
|
||||
TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_READ | TD_FILE_STREAM);
|
||||
if (pFile == NULL) return;
|
||||
|
||||
|
@ -763,10 +756,29 @@ void shellReadHistory() {
|
|||
|
||||
if (line != NULL) taosMemoryFree(line);
|
||||
taosCloseFile(&pFile);
|
||||
int64_t file_size;
|
||||
if (taosStatFile(pHistory->file, &file_size, NULL) == 0 && file_size > SHELL_MAX_COMMAND_SIZE) {
|
||||
fprintf(stdout,"%s(%d) %s %08" PRId64 "\n", __FILE__, __LINE__,__func__,taosGetSelfPthreadId());fflush(stdout);
|
||||
TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_TRUNC);
|
||||
if (pFile == NULL) return;
|
||||
int32_t endIndex = pHistory->hstart;
|
||||
if (endIndex != 0) {
|
||||
endIndex = pHistory->hend;
|
||||
}
|
||||
for (int32_t i = (pHistory->hend + SHELL_MAX_HISTORY_SIZE - 1) % SHELL_MAX_HISTORY_SIZE; i != endIndex;) {
|
||||
taosFprintfFile(pFile, "%s\n", pHistory->hist[i]);
|
||||
i = (i + SHELL_MAX_HISTORY_SIZE - 1) % SHELL_MAX_HISTORY_SIZE;
|
||||
}
|
||||
taosFprintfFile(pFile, "%s\n", pHistory->hist[endIndex]);
|
||||
taosFsyncFile(pFile);
|
||||
taosCloseFile(&pFile);
|
||||
}
|
||||
pHistory->hend = pHistory->hstart;
|
||||
}
|
||||
|
||||
void shellWriteHistory() {
|
||||
SShellHistory *pHistory = &shell.history;
|
||||
if (pHistory->hend == pHistory->hstart) return;
|
||||
TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_APPEND);
|
||||
if (pFile == NULL) return;
|
||||
|
||||
|
|
Loading…
Reference in New Issue