diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index d85c8e5871..a63225ab32 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,5 @@ name: tdengine base: core18 - version: '2.1.1.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index acfc1b0cf5..61b659f96c 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -7187,6 +7187,11 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, int32_t index) { const char* msg1 = "point interpolation query needs timestamp"; const char* msg2 = "too many tables in from clause"; const char* msg3 = "start(end) time of query range required or time range too large"; + // const char* msg5 = "too many columns in selection clause"; + // const char* msg6 = "too many tables in from clause"; + // const char* msg7 = "invalid table alias name"; + // const char* msg8 = "alias name too long"; + const char* msg9 = "only tag query not compatible with normal column filter"; int32_t code = TSDB_CODE_SUCCESS; @@ -7326,6 +7331,20 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, int32_t index) { } } + if (tscQueryTags(pQueryInfo)) { + SExprInfo* pExpr1 = tscSqlExprGet(pQueryInfo, 0); + + if (pExpr1->base.functionId != TSDB_FUNC_TID_TAG) { + int32_t numOfCols = (int32_t)taosArrayGetSize(pQueryInfo->colList); + for (int32_t i = 0; i < numOfCols; ++i) { + SColumn* pCols = taosArrayGetP(pQueryInfo->colList, i); + if (pCols->info.flist.numOfFilters > 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9); + } + } + } + } + // parse the having clause in the first place if (validateHavingClause(pQueryInfo, pSqlNode->pHaving, pCmd, pSqlNode->pSelNodeList, joinQuery, timeWindowQuery) != TSDB_CODE_SUCCESS) { diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 9b7d5c0c7f..a2afaf286f 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -1928,8 +1928,9 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) { } } - tscDebug("0x%"PRIx64" recv table meta, uid:%" PRIu64 ", tid:%d, name:%s", pSql->self, pTableMeta->id.uid, pTableMeta->id.tid, - tNameGetTableName(&pTableMetaInfo->name)); + tscDebug("0x%"PRIx64" recv table meta, uid:%" PRIu64 ", tid:%d, name:%s, numOfCols:%d, numOfTags:%d", pSql->self, + pTableMeta->id.uid, pTableMeta->id.tid, tNameGetTableName(&pTableMetaInfo->name), pTableMeta->tableInfo.numOfColumns, + pTableMeta->tableInfo.numOfTags); free(pTableMeta); return TSDB_CODE_SUCCESS; @@ -2072,7 +2073,7 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) { pInfo->vgroupList->numOfVgroups = pVgroupMsg->numOfVgroups; if (pInfo->vgroupList->numOfVgroups <= 0) { - tscDebug("0x%"PRIx64" empty vgroup info, no corresponding tables for stable", pSql->self); + tscDebug("0x%" PRIx64 " empty vgroup info, no corresponding tables for stable", pSql->self); } else { for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) { // just init, no need to lock diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c index 6928058f23..f02bd22540 100644 --- a/src/client/src/tscSub.c +++ b/src/client/src/tscSub.c @@ -215,7 +215,7 @@ static void tscProcessSubscriptionTimer(void *handle, void *tmrId) { taosTmrReset(tscProcessSubscriptionTimer, pSub->interval, pSub, tscTmr, &pSub->pTimer); } - +//TODO refactor: extract table list name not simply from the sql static SArray* getTableList( SSqlObj* pSql ) { const char* p = strstr( pSql->sqlstr, " from " ); assert(p != NULL); // we are sure this is a 'select' statement @@ -224,11 +224,11 @@ static SArray* getTableList( SSqlObj* pSql ) { SSqlObj* pNew = taos_query(pSql->pTscObj, sql); if (pNew == NULL) { - tscError("0x%"PRIx64"failed to retrieve table id: cannot create new sql object.", pSql->self); + tscError("0x%"PRIx64" failed to retrieve table id: cannot create new sql object.", pSql->self); return NULL; } else if (taos_errno(pNew) != TSDB_CODE_SUCCESS) { - tscError("0x%"PRIx64"failed to retrieve table id,error: %s", pSql->self, tstrerror(taos_errno(pNew))); + tscError("0x%"PRIx64" failed to retrieve table id,error: %s", pSql->self, tstrerror(taos_errno(pNew))); return NULL; } diff --git a/src/connector/go b/src/connector/go index 050667e5b4..8ce6d86558 160000 --- a/src/connector/go +++ b/src/connector/go @@ -1 +1 @@ -Subproject commit 050667e5b4d0eafa5387e4283e713559b421203f +Subproject commit 8ce6d86558afc8c0b50c10f990fd2b4270cf06fc diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index d3ac0ce280..0d32e17a4c 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -219,6 +219,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied") #define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing") #define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state") +#define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0515) //"Database is closing") // tsdb #define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID") diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index d70ff9649b..52faf8d3a0 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -470,6 +470,7 @@ typedef struct SThreadInfo_S { // seq of query or subscribe uint64_t querySeq; // sequence number of sql command + TAOS_SUB* tsub; } threadInfo; diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index 9e3b87671e..84e4f33ca7 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -719,13 +719,13 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void * if (action == SDB_ACTION_INSERT) { return sdbPerformInsertAction(pHead, pTable); } else if (action == SDB_ACTION_DELETE) { - if (qtype == TAOS_QTYPE_FWD) { + //if (qtype == TAOS_QTYPE_FWD) { // Drop database/stable may take a long time and cause a timeout, so we confirm first then reput it into queue - sdbWriteFwdToQueue(1, hparam, TAOS_QTYPE_QUERY, unused); - return TSDB_CODE_SUCCESS; - } else { + // sdbWriteFwdToQueue(1, hparam, TAOS_QTYPE_QUERY, unused); + // return TSDB_CODE_SUCCESS; + //} else { return sdbPerformDeleteAction(pHead, pTable); - } + //} } else if (action == SDB_ACTION_UPDATE) { return sdbPerformUpdateAction(pHead, pTable); } else { diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 4d3125a2d1..4e879537e4 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -1189,8 +1189,8 @@ static int32_t mnodeFindSuperTableTagIndex(SSTableObj *pStable, const char *tagN static int32_t mnodeAddSuperTableTagCb(SMnodeMsg *pMsg, int32_t code) { SSTableObj *pStable = (SSTableObj *)pMsg->pTable; - mLInfo("msg:%p, app:%p stable %s, add tag result:%s", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, - tstrerror(code)); + mLInfo("msg:%p, app:%p stable %s, add tag result:%s, numOfTags:%d", pMsg, pMsg->rpcMsg.ahandle, pStable->info.tableId, + tstrerror(code), pStable->numOfTags); return code; } diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 24ec19d665..493d5f117a 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -709,7 +709,7 @@ static void syncChooseMaster(SSyncNode *pNode) { } static SSyncPeer *syncCheckMaster(SSyncNode *pNode) { - int32_t onlineNum = 0; + int32_t onlineNum = 0, arbOnlineNum = 0; int32_t masterIndex = -1; int32_t replica = pNode->replica; @@ -723,13 +723,15 @@ static SSyncPeer *syncCheckMaster(SSyncNode *pNode) { SSyncPeer *pArb = pNode->peerInfo[TAOS_SYNC_MAX_REPLICA]; if (pArb && pArb->role != TAOS_SYNC_ROLE_OFFLINE) { onlineNum++; + ++arbOnlineNum; replica = pNode->replica + 1; } if (onlineNum <= replica * 0.5) { if (nodeRole != TAOS_SYNC_ROLE_UNSYNCED) { - if (nodeRole == TAOS_SYNC_ROLE_MASTER && onlineNum == replica * 0.5 && onlineNum >= 1) { + if (nodeRole == TAOS_SYNC_ROLE_MASTER && onlineNum == replica * 0.5 && ((replica > 2 && onlineNum - arbOnlineNum > 1) || pNode->replica < 3)) { sInfo("vgId:%d, self keep work as master, online:%d replica:%d", pNode->vgId, onlineNum, replica); + masterIndex = pNode->selfIndex; } else { nodeRole = TAOS_SYNC_ROLE_UNSYNCED; sInfo("vgId:%d, self change to unsynced state, online:%d replica:%d", pNode->vgId, onlineNum, replica); diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 57d45cc8c0..569f9b01bd 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -613,7 +613,7 @@ void doCleanupDataCache(SCacheObj *pCacheObj) { // todo memory leak if there are object with refcount greater than 0 in hash table? taosHashCleanup(pCacheObj->pHashTable); - taosTrashcanEmpty(pCacheObj, true); + taosTrashcanEmpty(pCacheObj, false); __cache_lock_destroy(pCacheObj); diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index f133101cca..fdf215a966 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -454,7 +454,11 @@ void vnodeDestroy(SVnodeObj *pVnode) { } if (pVnode->tsdb) { - code = tsdbCloseRepo(pVnode->tsdb, 1); + // the deleted vnode does not need to commit, so as to speed up the deletion + int toCommit = 1; + if (pVnode->dropped) toCommit = 0; + + code = tsdbCloseRepo(pVnode->tsdb, toCommit); pVnode->tsdb = NULL; } diff --git a/src/vnode/src/vnodeSync.c b/src/vnode/src/vnodeSync.c index e5a1964915..4197428fec 100644 --- a/src/vnode/src/vnodeSync.c +++ b/src/vnode/src/vnodeSync.c @@ -126,11 +126,16 @@ void vnodeStopSyncFile(int32_t vgId, uint64_t fversion) { } void vnodeConfirmForard(int32_t vgId, void *wparam, int32_t code) { - void *pVnode = vnodeAcquire(vgId); + SVnodeObj *pVnode = vnodeAcquire(vgId); if (pVnode == NULL) { vError("vgId:%d, vnode not found while confirm forward", vgId); } + if (code == TSDB_CODE_SYN_CONFIRM_EXPIRED && pVnode->status == TAOS_VN_STATUS_CLOSING) { + vDebug("vgId:%d, db:%s, vnode is closing while confirm forward", vgId, pVnode->db); + code = TSDB_CODE_VND_IS_CLOSING; + } + dnodeSendRpcVWriteRsp(pVnode, wparam, code); vnodeRelease(pVnode); } diff --git a/tests/mas/Jenkinsfile b/tests/mas/Jenkinsfile new file mode 100644 index 0000000000..b2a1a5e116 --- /dev/null +++ b/tests/mas/Jenkinsfile @@ -0,0 +1,309 @@ +def pre_test(){ + + sh ''' + sudo rmtaos||echo 'no taosd installed' + ''' + sh ''' + cd ${WKC} + git reset --hard + git checkout $BRANCH_NAME + git pull + git submodule update + cd ${WK} + git reset --hard + git checkout $BRANCH_NAME + git pull + export TZ=Asia/Harbin + date + rm -rf ${WK}/debug + mkdir debug + cd debug + cmake .. > /dev/null + make > /dev/null + make install > /dev/null + pip3 install ${WKC}/src/connector/python/linux/python3/ + ''' + return 1 +} +pipeline { + agent none + environment{ + + WK = '/var/lib/jenkins/workspace/TDinternal' + WKC= '/var/lib/jenkins/workspace/TDinternal/community' + } + + stages { + stage('Parallel test stage') { + parallel { + stage('pytest') { + agent{label 'slam1'} + steps { + pre_test() + sh ''' + cd ${WKC}/tests + find pytest -name '*'sql|xargs rm -rf + ./test-all.sh pytest + date''' + } + } + stage('test_b1') { + agent{label 'slam2'} + steps { + pre_test() + + sh ''' + cd ${WKC}/tests + ./test-all.sh b1 + date''' + + + } + } + + stage('test_crash_gen') { + agent{label "slam3"} + steps { + pre_test() + sh ''' + cd ${WKC}/tests/pytest + ''' + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/pytest + ./crash_gen.sh -a -p -t 4 -s 2000 + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/pytest + rm -rf /var/lib/taos/* + rm -rf /var/log/taos/* + ./handle_crash_gen_val_log.sh + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/pytest + rm -rf /var/lib/taos/* + rm -rf /var/log/taos/* + ./handle_taosd_val_log.sh + ''' + } + + sh''' + systemctl start taosd + sleep 10 + ''' + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/gotest + bash batchtest.sh + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/examples/python/PYTHONConnectorChecker + python3 PythonChecker.py + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/examples/JDBC/JDBCDemo/ + mvn clean package assembly:single -DskipTests >/dev/null + java -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1 + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/src/connector/jdbc + mvn clean package -Dmaven.test.skip=true >/dev/null + cd ${WKC}/tests/examples/JDBC/JDBCDemo/ + java --class-path=../../../../src/connector/jdbc/target:$JAVA_HOME/jre/lib/ext -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1 + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cp -rf ${WKC}/tests/examples/nodejs ${JENKINS_HOME}/workspace/ + cd ${JENKINS_HOME}/workspace/nodejs + node nodejsChecker.js host=localhost + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC# + dotnet run + ''' + } + sh ''' + systemctl stop taosd + cd ${WKC}/tests + ./test-all.sh b2 + date + ''' + sh ''' + cd ${WKC}/tests + ./test-all.sh full unit + date''' + } + } + + stage('test_valgrind') { + agent{label "slam4"} + + steps { + pre_test() + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/pytest + nohup taosd >/dev/null & + sleep 10 + python3 concurrent_inquiry.py -c 1 + + ''' + } + sh ''' + cd ${WKC}/tests + ./test-all.sh full jdbc + date''' + sh ''' + cd ${WKC}/tests/pytest + ./valgrind-test.sh 2>&1 > mem-error-out.log + ./handle_val_log.sh + + date + cd ${WKC}/tests + ./test-all.sh b3 + date''' + sh ''' + date + cd ${WKC}/tests + ./test-all.sh full example + date''' + } + } + + stage('arm64_build'){ + agent{label 'arm64'} + steps{ + sh ''' + cd ${WK} + git fetch + git checkout develop + git pull + cd ${WKC} + git fetch + git checkout develop + git pull + git submodule update + cd ${WKC}/packaging + ./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0 + + ''' + } + } + stage('arm32_build'){ + agent{label 'arm32'} + steps{ + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WK} + git fetch + git checkout develop + git pull + cd ${WKC} + git fetch + git checkout develop + git pull + git submodule update + cd ${WKC}/packaging + ./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0 + + ''' + } + + } + } + } + } + + } + post { + success { + emailext ( + subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", + body: ''' + + + + + + + + + + + + +

+ 构建信息 +
+
    +
    +
  • 构建名称>>分支:${PROJECT_NAME}
  • +
  • 构建结果: Successful
  • +
  • 构建编号:${BUILD_NUMBER}
  • +
  • 触发用户:${CAUSE}
  • +
  • 变更概要:${CHANGES}
  • +
  • 构建地址:${BUILD_URL}
  • +
  • 构建日志:${BUILD_URL}console
  • +
  • 变更集:${JELLY_SCRIPT}
  • +
    +
+
+ + ''', + to: "yqliu@taosdata.com,pxiao@taosdata.com", + from: "support@taosdata.com" + ) + } + failure { + emailext ( + subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", + body: ''' + + + + + + + + + + + + +

+ 构建信息 +
+
    +
    +
  • 构建名称>>分支:${PROJECT_NAME}
  • +
  • 构建结果: Successful
  • +
  • 构建编号:${BUILD_NUMBER}
  • +
  • 触发用户:${CAUSE}
  • +
  • 变更概要:${CHANGES}
  • +
  • 构建地址:${BUILD_URL}
  • +
  • 构建日志:${BUILD_URL}console
  • +
  • 变更集:${JELLY_SCRIPT}
  • +
    +
+
+ + ''', + to: "yqliu@taosdata.com,pxiao@taosdata.com", + from: "support@taosdata.com" + ) + } + } +} \ No newline at end of file diff --git a/tests/pytest/crash_gen/service_manager.py b/tests/pytest/crash_gen/service_manager.py index 95507f0142..1cd65c1dde 100644 --- a/tests/pytest/crash_gen/service_manager.py +++ b/tests/pytest/crash_gen/service_manager.py @@ -22,7 +22,7 @@ from queue import Queue, Empty from .shared.config import Config from .shared.db import DbTarget, DbConn from .shared.misc import Logging, Helper, CrashGenError, Status, Progress, Dice -from .shared.types import DirPath +from .shared.types import DirPath, IpcStream # from crash_gen.misc import CrashGenError, Dice, Helper, Logging, Progress, Status # from crash_gen.db import DbConn, DbTarget @@ -177,13 +177,12 @@ quorum 2 return "127.0.0.1" def getServiceCmdLine(self): # to start the instance - cmdLine = [] if Config.getConfig().track_memory_leaks: Logging.info("Invoking VALGRIND on service...") - cmdLine = ['valgrind', '--leak-check=yes'] - # TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control - cmdLine += ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen() - return cmdLine + return ['exec /usr/bin/valgrind', '--leak-check=yes', self.getExecFile(), '-c', self.getCfgDir()] + else: + # TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control + return ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen() def _getDnodes(self, dbc): dbc.query("show dnodes") @@ -281,16 +280,16 @@ class TdeSubProcess: return '[TdeSubProc: pid = {}, status = {}]'.format( self.getPid(), self.getStatus() ) - def getStdOut(self) -> BinaryIO : + def getIpcStdOut(self) -> IpcStream : if self._popen.universal_newlines : # alias of text_mode raise CrashGenError("We need binary mode for STDOUT IPC") # Logging.info("Type of stdout is: {}".format(type(self._popen.stdout))) - return typing.cast(BinaryIO, self._popen.stdout) + return typing.cast(IpcStream, self._popen.stdout) - def getStdErr(self) -> BinaryIO : + def getIpcStdErr(self) -> IpcStream : if self._popen.universal_newlines : # alias of text_mode raise CrashGenError("We need binary mode for STDERR IPC") - return typing.cast(BinaryIO, self._popen.stderr) + return typing.cast(IpcStream, self._popen.stderr) # Now it's always running, since we matched the life cycle # def isRunning(self): @@ -301,11 +300,6 @@ class TdeSubProcess: def _start(self, cmdLine) -> Popen : ON_POSIX = 'posix' in sys.builtin_module_names - - # Sanity check - # if self.subProcess: # already there - # raise RuntimeError("Corrupt process state") - # Prepare environment variables for coverage information # Ref: https://stackoverflow.com/questions/2231227/python-subprocess-popen-with-a-modified-environment @@ -314,9 +308,8 @@ class TdeSubProcess: # print(myEnv) # print("Starting TDengine with env: ", myEnv.items()) - # print("Starting TDengine via Shell: {}".format(cmdLineStr)) + print("Starting TDengine: {}".format(cmdLine)) - # useShell = True # Needed to pass environments into it return Popen( ' '.join(cmdLine), # ' '.join(cmdLine) if useShell else cmdLine, shell=True, # Always use shell, since we need to pass ENV vars @@ -732,19 +725,19 @@ class ServiceManagerThread: self._ipcQueue = Queue() # type: Queue self._thread = threading.Thread( # First thread captures server OUTPUT target=self.svcOutputReader, - args=(subProc.getStdOut(), self._ipcQueue, logDir)) + args=(subProc.getIpcStdOut(), self._ipcQueue, logDir)) self._thread.daemon = True # thread dies with the program self._thread.start() time.sleep(0.01) if not self._thread.is_alive(): # What happened? - Logging.info("Failed to started process to monitor STDOUT") + Logging.info("Failed to start process to monitor STDOUT") self.stop() raise CrashGenError("Failed to start thread to monitor STDOUT") Logging.info("Successfully started process to monitor STDOUT") self._thread2 = threading.Thread( # 2nd thread captures server ERRORs target=self.svcErrorReader, - args=(subProc.getStdErr(), self._ipcQueue, logDir)) + args=(subProc.getIpcStdErr(), self._ipcQueue, logDir)) self._thread2.daemon = True # thread dies with the program self._thread2.start() time.sleep(0.01) @@ -887,14 +880,19 @@ class ServiceManagerThread: print("\nNon-UTF8 server output: {}\n".format(bChunk.decode('cp437'))) return None - def _textChunkGenerator(self, streamIn: BinaryIO, logDir: str, logFile: str + def _textChunkGenerator(self, streamIn: IpcStream, logDir: str, logFile: str ) -> Generator[TextChunk, None, None]: ''' - Take an input stream with binary data, produced a generator of decoded - "text chunks", and also save the original binary data in a log file. + Take an input stream with binary data (likely from Popen), produced a generator of decoded + "text chunks". + + Side effect: it also save the original binary data in a log file. ''' os.makedirs(logDir, exist_ok=True) logF = open(os.path.join(logDir, logFile), 'wb') + if logF is None: + Logging.error("Failed to open log file (binary write): {}/{}".format(logDir, logFile)) + return for bChunk in iter(streamIn.readline, b''): logF.write(bChunk) # Write to log file immediately tChunk = self._decodeBinaryChunk(bChunk) # decode @@ -902,14 +900,14 @@ class ServiceManagerThread: yield tChunk # TODO: split into actual text lines # At the end... - streamIn.close() # Close the stream - logF.close() # Close the output file + streamIn.close() # Close the incoming stream + logF.close() # Close the log file - def svcOutputReader(self, stdOut: BinaryIO, queue, logDir: str): + def svcOutputReader(self, ipcStdOut: IpcStream, queue, logDir: str): ''' The infinite routine that processes the STDOUT stream for the sub process being managed. - :param stdOut: the IO stream object used to fetch the data from + :param ipcStdOut: the IO stream object used to fetch the data from :param queue: the queue where we dump the roughly parsed chunk-by-chunk text data :param logDir: where we should dump a verbatim output file ''' @@ -917,7 +915,7 @@ class ServiceManagerThread: # Important Reference: https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python # print("This is the svcOutput Reader...") # stdOut.readline() # Skip the first output? TODO: remove? - for tChunk in self._textChunkGenerator(stdOut, logDir, 'stdout.log') : + for tChunk in self._textChunkGenerator(ipcStdOut, logDir, 'stdout.log') : queue.put(tChunk) # tChunk garanteed not to be None self._printProgress("_i") @@ -940,12 +938,12 @@ class ServiceManagerThread: Logging.info("EOF found TDengine STDOUT, marking the process as terminated") self.setStatus(Status.STATUS_STOPPED) - def svcErrorReader(self, stdErr: BinaryIO, queue, logDir: str): + def svcErrorReader(self, ipcStdErr: IpcStream, queue, logDir: str): # os.makedirs(logDir, exist_ok=True) # logFile = os.path.join(logDir,'stderr.log') # fErr = open(logFile, 'wb') # for line in iter(err.readline, b''): - for tChunk in self._textChunkGenerator(stdErr, logDir, 'stderr.log') : + for tChunk in self._textChunkGenerator(ipcStdErr, logDir, 'stderr.log') : queue.put(tChunk) # tChunk garanteed not to be None # fErr.write(line) Logging.info("TDengine STDERR: {}".format(tChunk)) diff --git a/tests/pytest/crash_gen/shared/types.py b/tests/pytest/crash_gen/shared/types.py index 814a821917..42fd2a1617 100644 --- a/tests/pytest/crash_gen/shared/types.py +++ b/tests/pytest/crash_gen/shared/types.py @@ -1,4 +1,4 @@ -from typing import Any, List, Dict, NewType +from typing import Any, BinaryIO, List, Dict, NewType from enum import Enum DirPath = NewType('DirPath', str) @@ -26,3 +26,5 @@ class TdDataType(Enum): TdColumns = Dict[str, TdDataType] TdTags = Dict[str, TdDataType] + +IpcStream = NewType('IpcStream', BinaryIO) \ No newline at end of file diff --git a/tests/script/general/parser/select_with_tags.sim b/tests/script/general/parser/select_with_tags.sim index 7e5f31f759..9d5214bfaa 100644 --- a/tests/script/general/parser/select_with_tags.sim +++ b/tests/script/general/parser/select_with_tags.sim @@ -887,10 +887,16 @@ sql_error select tbname, t1 from select_tags_mt0 interval(1y); #valid sql: select first(c1), last(c2), count(*) from select_tags_mt0 group by tbname, t1; #valid sql: select first(c1), tbname, t1 from select_tags_mt0 group by t2; +print ==================================>TD-4231 +sql_error select t1,tbname from select_tags_mt0 where c1<0 +sql_error select t1,tbname from select_tags_mt0 where c1<0 and tbname in ('select_tags_tb12') + +sql select tbname from select_tags_mt0 where tbname in ('select_tags_tb12'); + sql_error select first(c1), last(c2), t1 from select_tags_mt0 group by tbname; sql_error select first(c1), last(c2), tbname, t2 from select_tags_mt0 group by tbname; sql_error select first(c1), count(*), t2, t1, tbname from select_tags_mt0 group by tbname; -# this sql is valid: select first(c1), t2 from select_tags_mt0 group by tbname; +#valid sql: select first(c1), t2 from select_tags_mt0 group by tbname; #sql select first(ts), tbname from select_tags_mt0 group by tbname; #sql select count(c1) from select_tags_mt0 where c1=99 group by tbname; diff --git a/tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim b/tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim index 8e15c4f527..b9ee508f78 100644 --- a/tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim +++ b/tests/script/unique/arbitrator/dn3_mn1_vnode_nomaster.sim @@ -158,7 +158,7 @@ if $dnode4Vtatus != offline then sleep 2000 goto wait_dnode4_vgroup_offline endi -if $dnode3Vtatus != master then +if $dnode3Vtatus != unsynced then sleep 2000 goto wait_dnode4_vgroup_offline endi