Merge remote-tracking branch 'origin/develop' into feature/wal

This commit is contained in:
Shengliang Guan 2020-12-04 13:24:28 +08:00
commit eede5a7711
33 changed files with 175 additions and 84 deletions

View File

@ -101,7 +101,20 @@ Connection = DriverManager.getConnection(url, properties);
<version>2.0.4</version> <version>2.0.4</version>
</dependency> </dependency>
``` ```
## 14. 怎么报告问题? ## 14. taos connect failed, reason: invalid timestamp
常见原因是服务器和客户端时间没有校准可以通过和时间服务器同步的方式Linux 下使用 ntpdate 命令Windows 在系统时间设置中选择自动同步)校准。
## 15. 表名显示不全
由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。
## 16. 怎么报告问题?
如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包: 如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包:
1. /var/log/taos 1. /var/log/taos
2. /etc/taos 2. /etc/taos

View File

@ -251,7 +251,7 @@
# cqDebugFlag 131 # cqDebugFlag 131
# enable/disable recording the SQL in taos client # enable/disable recording the SQL in taos client
# tscEnableRecordSql 0 # enableRecordSql 0
# generate core file when service crash # generate core file when service crash
# enableCoreFile 1 # enableCoreFile 1
@ -264,3 +264,6 @@
# enable/disable stream (continuous query) # enable/disable stream (continuous query)
# stream 1 # stream 1
# only 50% CPU resources will be used in query processing
# halfCoresForQuery 0

View File

@ -4540,11 +4540,11 @@ static void sumrate_finalizer(SQLFunctionCtx *pCtx) {
* *
*/ */
int32_t functionCompatList[] = { int32_t functionCompatList[] = {
// count, sum, avg, min, max, stddev, percentile, apercentile, first, last // count, sum, avg, min, max, stddev, percentile, apercentile, first, last
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
// last_row, top, bottom, spread, twa, leastsqr, ts, ts_dummy, tag_dummy, ts_z // last_row,top, bottom, spread, twa, leastsqr, ts, ts_dummy, tag_dummy, ts_z
4, -1, -1, 1, 1, 1, 1, 1, 1, -1, 4, -1, -1, 1, 1, 1, 1, 1, 1, -1,
// tag, colprj, tagprj, arithmetic, diff, first_dist, last_dist, interp rate irate // tag, colprj, tagprj, arithmetic, diff, first_dist, last_dist, interp rate irate
1, 1, 1, 1, -1, 1, 1, 5, 1, 1, 1, 1, 1, 1, -1, 1, 1, 5, 1, 1,
// sum_rate, sum_irate, avg_rate, avg_irate // sum_rate, sum_irate, avg_rate, avg_irate
1, 1, 1, 1, 1, 1, 1, 1,

View File

@ -630,11 +630,17 @@ int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int3
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static void tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows) { static int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows) {
pBlocks->tid = pTableMeta->id.tid; pBlocks->tid = pTableMeta->id.tid;
pBlocks->uid = pTableMeta->id.uid; pBlocks->uid = pTableMeta->id.uid;
pBlocks->sversion = pTableMeta->sversion; pBlocks->sversion = pTableMeta->sversion;
pBlocks->numOfRows += numOfRows;
if (pBlocks->numOfRows + numOfRows >= INT16_MAX) {
return TSDB_CODE_TSC_INVALID_SQL;
} else {
pBlocks->numOfRows += numOfRows;
return TSDB_CODE_SUCCESS;
}
} }
// data block is disordered, sort it in ascending order // data block is disordered, sort it in ascending order
@ -722,7 +728,11 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **st
} }
SSubmitBlk *pBlocks = (SSubmitBlk *)(dataBuf->pData); SSubmitBlk *pBlocks = (SSubmitBlk *)(dataBuf->pData);
tsSetBlockInfo(pBlocks, pTableMeta, numOfRows); code = tsSetBlockInfo(pBlocks, pTableMeta, numOfRows);
if (code != TSDB_CODE_SUCCESS) {
tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", *str);
return code;
}
dataBuf->vgId = pTableMeta->vgroupInfo.vgId; dataBuf->vgId = pTableMeta->vgroupInfo.vgId;
dataBuf->numOfTables = 1; dataBuf->numOfTables = 1;
@ -1384,7 +1394,10 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock
STableMeta *pTableMeta = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0)->pTableMeta; STableMeta *pTableMeta = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0)->pTableMeta;
SSubmitBlk *pBlocks = (SSubmitBlk *)(pTableDataBlocks->pData); SSubmitBlk *pBlocks = (SSubmitBlk *)(pTableDataBlocks->pData);
tsSetBlockInfo(pBlocks, pTableMeta, numOfRows); code = tsSetBlockInfo(pBlocks, pTableMeta, numOfRows);
if (code != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", NULL);
}
if ((code = tscMergeTableDataBlocks(pSql, pCmd->pDataBlocks)) != TSDB_CODE_SUCCESS) { if ((code = tscMergeTableDataBlocks(pSql, pCmd->pDataBlocks)) != TSDB_CODE_SUCCESS) {
return code; return code;

View File

@ -39,6 +39,7 @@ void tscInitConnCb(void *param, TAOS_RES *result, int code) {
tscSlowQueryConnInitialized = true; tscSlowQueryConnInitialized = true;
tscSaveSlowQueryFp(sql, NULL); tscSaveSlowQueryFp(sql, NULL);
} }
taos_free_result(result);
} }
void tscAddIntoSqlList(SSqlObj *pSql) { void tscAddIntoSqlList(SSqlObj *pSql) {
@ -69,6 +70,7 @@ void tscSaveSlowQueryFpCb(void *param, TAOS_RES *result, int code) {
} else { } else {
tscDebug("success to save slow query, code:%d", code); tscDebug("success to save slow query, code:%d", code);
} }
taos_free_result(result);
} }
void tscSaveSlowQueryFp(void *handle, void *tmrId) { void tscSaveSlowQueryFp(void *handle, void *tmrId) {

View File

@ -2835,6 +2835,10 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) {
if (functionCompatList[functionId] != factor) { if (functionCompatList[functionId] != factor) {
return false; return false;
} else {
if (factor == -1) { // two functions with the same -1 flag
return false;
}
} }
if (functionId == TSDB_FUNC_LAST_ROW && joinQuery) { if (functionId == TSDB_FUNC_LAST_ROW && joinQuery) {

View File

@ -144,7 +144,7 @@ void taos_init_imp(void) {
int64_t refreshTime = 10; // 10 seconds by default int64_t refreshTime = 10; // 10 seconds by default
if (tscMetaCache == NULL) { if (tscMetaCache == NULL) {
tscMetaCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, refreshTime, false, tscFreeTableMetaHelper, "tableMeta"); tscMetaCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, refreshTime, false, tscFreeTableMetaHelper, "tableMeta");
tscObjRef = taosOpenRef(4096, tscFreeRegisteredSqlObj); tscObjRef = taosOpenRef(40960, tscFreeRegisteredSqlObj);
} }
tscRefId = taosOpenRef(200, tscCloseTscObj); tscRefId = taosOpenRef(200, tscCloseTscObj);

View File

@ -56,6 +56,7 @@ extern char tsTempDir[];
//query buffer management //query buffer management
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer for each data node during query processing extern int32_t tsQueryBufferSize; // maximum allowed usage buffer for each data node during query processing
extern int32_t tsHalfCoresForQuery; // only 50% will be used in query processing
// client // client
extern int32_t tsTableMetaKeepTimer; extern int32_t tsTableMetaKeepTimer;

View File

@ -107,6 +107,9 @@ int64_t tsMaxRetentWindow = 24 * 3600L; // maximum time window tolerance
// positive value (in MB) // positive value (in MB)
int32_t tsQueryBufferSize = -1; int32_t tsQueryBufferSize = -1;
// only 50% cpu will be used in query processing in dnode
int32_t tsHalfCoresForQuery = 0;
// db parameters // db parameters
int32_t tsCacheBlockSize = TSDB_DEFAULT_CACHE_BLOCK_SIZE; int32_t tsCacheBlockSize = TSDB_DEFAULT_CACHE_BLOCK_SIZE;
int32_t tsBlocksPerVnode = TSDB_DEFAULT_TOTAL_BLOCKS; int32_t tsBlocksPerVnode = TSDB_DEFAULT_TOTAL_BLOCKS;
@ -884,6 +887,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_BYTE; cfg.unitType = TAOS_CFG_UTYPE_BYTE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "halfCoresForQuery";
cfg.ptr = &tsHalfCoresForQuery;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 1;
cfg.ptrLength = 1;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
// locale & charset // locale & charset
cfg.option = "timezone"; cfg.option = "timezone";
cfg.ptr = tsTimezone; cfg.ptr = tsTimezone;
@ -1290,7 +1303,7 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE; cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg); taosInitConfigOption(cfg);
cfg.option = "tscEnableRecordSql"; cfg.option = "enableRecordSql";
cfg.ptr = &tsTscEnableRecordSql; cfg.ptr = &tsTscEnableRecordSql;
cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;

View File

@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup( setuptools.setup(
name="taos", name="taos",
version="2.0.2", version="2.0.3",
author="Taosdata Inc.", author="Taosdata Inc.",
author_email="support@taosdata.com", author_email="support@taosdata.com",
description="TDengine python client package", description="TDengine python client package",

View File

@ -3,7 +3,7 @@ from .connection import TDengineConnection
from .cursor import TDengineCursor from .cursor import TDengineCursor
# Globals # Globals
apilevel = '2.0' apilevel = '2.0.3'
threadsafety = 0 threadsafety = 0
paramstyle = 'pyformat' paramstyle = 'pyformat'

View File

@ -81,6 +81,7 @@ def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C binary row to python row """Function to convert C binary row to python row
""" """
assert(nbytes is not None)
if num_of_rows > 0: if num_of_rows > 0:
return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
else: else:
@ -106,6 +107,7 @@ def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C binary row to python row """Function to convert C binary row to python row
""" """
assert(nbytes is not None)
res=[] res=[]
if num_of_rows > 0: if num_of_rows > 0:
for i in range(abs(num_of_rows)): for i in range(abs(num_of_rows)):

View File

@ -26,7 +26,7 @@ class TDengineCursor(object):
""" """
def __init__(self, connection=None): def __init__(self, connection=None):
self._description = None self._description = []
self._rowcount = -1 self._rowcount = -1
self._connection = None self._connection = None
self._result = None self._result = None
@ -234,7 +234,7 @@ class TDengineCursor(object):
def _reset_result(self): def _reset_result(self):
"""Reset the result to unused version. """Reset the result to unused version.
""" """
self._description = None self._description = []
self._rowcount = -1 self._rowcount = -1
if self._result is not None: if self._result is not None:
CTaosInterface.freeResult(self._result) CTaosInterface.freeResult(self._result)

View File

@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup( setuptools.setup(
name="taos", name="taos",
version="2.0.2", version="2.0.3",
author="Taosdata Inc.", author="Taosdata Inc.",
author_email="support@taosdata.com", author_email="support@taosdata.com",
description="TDengine python client package", description="TDengine python client package",

View File

@ -3,7 +3,7 @@ from .connection import TDengineConnection
from .cursor import TDengineCursor from .cursor import TDengineCursor
# Globals # Globals
apilevel = '2.0' apilevel = '2.0.3'
threadsafety = 0 threadsafety = 0
paramstyle = 'pyformat' paramstyle = 'pyformat'
@ -21,4 +21,4 @@ def connect(*args, **kwargs):
@rtype: TDengineConnector @rtype: TDengineConnector
""" """
return TDengineConnection(*args, **kwargs) return TDengineConnection(*args, **kwargs)

View File

@ -81,6 +81,7 @@ def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C binary row to python row """Function to convert C binary row to python row
""" """
assert(nbytes is not None)
if num_of_rows > 0: if num_of_rows > 0:
return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
else: else:
@ -106,6 +107,7 @@ def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C binary row to python row """Function to convert C binary row to python row
""" """
assert(nbytes is not None)
res=[] res=[]
if num_of_rows > 0: if num_of_rows > 0:
for i in range(abs(num_of_rows)): for i in range(abs(num_of_rows)):

View File

@ -27,7 +27,7 @@ class TDengineCursor(object):
""" """
def __init__(self, connection=None): def __init__(self, connection=None):
self._description = None self._description = []
self._rowcount = -1 self._rowcount = -1
self._connection = None self._connection = None
self._result = None self._result = None
@ -242,7 +242,7 @@ class TDengineCursor(object):
def _reset_result(self): def _reset_result(self):
"""Reset the result to unused version. """Reset the result to unused version.
""" """
self._description = None self._description = []
self._rowcount = -1 self._rowcount = -1
if self._result is not None: if self._result is not None:
CTaosInterface.freeResult(self._result) CTaosInterface.freeResult(self._result)

View File

@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup( setuptools.setup(
name="taos", name="taos",
version="2.0.2", version="2.0.3",
author="Taosdata Inc.", author="Taosdata Inc.",
author_email="support@taosdata.com", author_email="support@taosdata.com",
description="TDengine python client package", description="TDengine python client package",

View File

@ -3,7 +3,7 @@ from .connection import TDengineConnection
from .cursor import TDengineCursor from .cursor import TDengineCursor
# Globals # Globals
apilevel = '2.0' apilevel = '2.0.3'
threadsafety = 0 threadsafety = 0
paramstyle = 'pyformat' paramstyle = 'pyformat'

View File

@ -81,6 +81,7 @@ def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C binary row to python row """Function to convert C binary row to python row
""" """
assert(nbytes is not None)
if num_of_rows > 0: if num_of_rows > 0:
return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
else: else:
@ -106,6 +107,7 @@ def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C binary row to python row """Function to convert C binary row to python row
""" """
assert(nbytes is not None)
res=[] res=[]
if num_of_rows > 0: if num_of_rows > 0:
for i in range(abs(num_of_rows)): for i in range(abs(num_of_rows)):

View File

@ -27,7 +27,7 @@ class TDengineCursor(object):
""" """
def __init__(self, connection=None): def __init__(self, connection=None):
self._description = None self._description = []
self._rowcount = -1 self._rowcount = -1
self._connection = None self._connection = None
self._result = None self._result = None
@ -193,7 +193,7 @@ class TDengineCursor(object):
def _reset_result(self): def _reset_result(self):
"""Reset the result to unused version. """Reset the result to unused version.
""" """
self._description = None self._description = []
self._rowcount = -1 self._rowcount = -1
if self._result is not None: if self._result is not None:
CTaosInterface.freeResult(self._result) CTaosInterface.freeResult(self._result)

View File

@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup( setuptools.setup(
name="taos", name="taos",
version="2.0.2", version="2.0.3",
author="Taosdata Inc.", author="Taosdata Inc.",
author_email="support@taosdata.com", author_email="support@taosdata.com",
description="TDengine python client package", description="TDengine python client package",

View File

@ -3,7 +3,7 @@ from .connection import TDengineConnection
from .cursor import TDengineCursor from .cursor import TDengineCursor
# Globals # Globals
apilevel = '2.0' apilevel = '2.0.3'
threadsafety = 0 threadsafety = 0
paramstyle = 'pyformat' paramstyle = 'pyformat'

View File

@ -81,6 +81,7 @@ def _crow_double_to_python(data, num_of_rows, nbytes=None, micro=False):
def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False): def _crow_binary_to_python(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C binary row to python row """Function to convert C binary row to python row
""" """
assert(nbytes is not None)
if num_of_rows > 0: if num_of_rows > 0:
return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]] return [ None if ele.value[0:1] == FieldType.C_BINARY_NULL else ele.value.decode('utf-8') for ele in (ctypes.cast(data, ctypes.POINTER(ctypes.c_char * nbytes)))[:abs(num_of_rows)]]
else: else:
@ -108,6 +109,7 @@ def _crow_nchar_to_python(data, num_of_rows, nbytes=None, micro=False):
def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False): def _crow_binary_to_python_block(data, num_of_rows, nbytes=None, micro=False):
"""Function to convert C binary row to python row """Function to convert C binary row to python row
""" """
assert(nbytes is not None)
res=[] res=[]
if num_of_rows > 0: if num_of_rows > 0:
for i in range(abs(num_of_rows)): for i in range(abs(num_of_rows)):

View File

@ -28,7 +28,7 @@ class TDengineCursor(object):
""" """
def __init__(self, connection=None): def __init__(self, connection=None):
self._description = None self._description = []
self._rowcount = -1 self._rowcount = -1
self._connection = None self._connection = None
self._result = None self._result = None
@ -194,7 +194,7 @@ class TDengineCursor(object):
def _reset_result(self): def _reset_result(self):
"""Reset the result to unused version. """Reset the result to unused version.
""" """
self._description = None self._description = []
self._rowcount = -1 self._rowcount = -1
if self._result is not None: if self._result is not None:
CTaosInterface.freeResult(self._result) CTaosInterface.freeResult(self._result)

View File

@ -1372,8 +1372,12 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pDat
} }
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
pResultRow->key = malloc(varDataTLen(pData)); if (pResultRow->key == NULL) {
varDataCopy(pResultRow->key, pData); pResultRow->key = malloc(varDataTLen(pData));
varDataCopy(pResultRow->key, pData);
} else {
assert(memcmp(pResultRow->key, pData, varDataTLen(pData)) == 0);
}
} else { } else {
pResultRow->win.skey = v; pResultRow->win.skey = v;
pResultRow->win.ekey = v; pResultRow->win.ekey = v;

View File

@ -405,14 +405,29 @@ void tSQLSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
if (type->type == 0) { if (type->type == 0) {
pField->bytes = 0; pField->bytes = 0;
} else { } else {
pField->bytes = (int16_t)(-(int32_t)type->type * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); int32_t bytes = -(int32_t)(type->type);
if (bytes > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
// we have to postpone reporting the error because it cannot be done here
// as pField->bytes is int16_t, use 'TSDB_MAX_NCHAR_LEN + 1' to avoid overflow
bytes = TSDB_MAX_NCHAR_LEN + 1;
} else {
bytes = bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE;
}
pField->bytes = (int16_t)bytes;
} }
} else if (i == TSDB_DATA_TYPE_BINARY) { } else if (i == TSDB_DATA_TYPE_BINARY) {
/* for binary, the TOKENTYPE is the length of binary */ /* for binary, the TOKENTYPE is the length of binary */
if (type->type == 0) { if (type->type == 0) {
pField->bytes = 0; pField->bytes = 0;
} else { } else {
pField->bytes = (int16_t) (-(int32_t) type->type + VARSTR_HEADER_SIZE); int32_t bytes = -(int32_t)(type->type);
if (bytes > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
// refer comment for NCHAR above
bytes = TSDB_MAX_BINARY_LEN + 1;
} else {
bytes += VARSTR_HEADER_SIZE;
}
pField->bytes = (int16_t)bytes;
} }
} }
break; break;

View File

@ -786,9 +786,9 @@ static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pT
if (tsdbGetTableSchemaImpl(pTable, false, false, pBlock->sversion) == NULL) { if (tsdbGetTableSchemaImpl(pTable, false, false, pBlock->sversion) == NULL) {
tsdbError("vgId:%d invalid submit schema version %d to table %s tid %d from client", REPO_ID(pRepo), tsdbError("vgId:%d invalid submit schema version %d to table %s tid %d from client", REPO_ID(pRepo),
pBlock->sversion, TABLE_CHAR_NAME(pTable), TABLE_TID(pTable)); pBlock->sversion, TABLE_CHAR_NAME(pTable), TABLE_TID(pTable));
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
return -1;
} }
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
return -1;
} }
return 0; return 0;

View File

@ -19,9 +19,7 @@
#include "taoserror.h" #include "taoserror.h"
#include "tconfig.h" #include "tconfig.h"
#include "tglobal.h" #include "tglobal.h"
#include "tkey.h"
#include "tulog.h" #include "tulog.h"
#include "tsocket.h"
#include "tsystem.h" #include "tsystem.h"
#include "tutil.h" #include "tutil.h"

View File

@ -329,7 +329,7 @@ void *taosIterateRef(int rsetId, int64_t rid) {
pNode->count++; // acquire it pNode->count++; // acquire it
newP = pNode->p; newP = pNode->p;
taosUnlockList(pSet->lockedBy+hash); taosUnlockList(pSet->lockedBy+hash);
uTrace("rsetId:%d p:%p rid:%" PRId64 " is returned", rsetId, newP, rid); uTrace("rsetId:%d p:%p rid:%" PRId64 " is returned", rsetId, newP, rid);
} else { } else {
uTrace("rsetId:%d the list is over", rsetId); uTrace("rsetId:%d the list is over", rsetId);
} }
@ -423,24 +423,25 @@ static int taosDecRefCount(int rsetId, int64_t rid, int remove) {
if (pNode->next) { if (pNode->next) {
pNode->next->prev = pNode->prev; pNode->next->prev = pNode->prev;
} }
(*pSet->fp)(pNode->p);
uTrace("rsetId:%d p:%p rid:%" PRId64 " is removed, count:%d, free mem: %p", rsetId, pNode->p, rid, pSet->count, pNode);
free(pNode);
released = 1; released = 1;
} else { } else {
uTrace("rsetId:%d p:%p rid:%" PRId64 " is released, count:%d", rsetId, pNode->p, rid, pNode->count); uTrace("rsetId:%d p:%p rid:%" PRId64 " is released", rsetId, pNode->p, rid);
} }
} else { } else {
uTrace("rsetId:%d rid:%" PRId64 " is not there, failed to release/remove", rsetId, rid); uTrace("rsetId:%d rid:%" PRId64 " is not there, failed to release/remove", rsetId, rid);
terrno = TSDB_CODE_REF_NOT_EXIST; terrno = TSDB_CODE_REF_NOT_EXIST;
code = -1; code = -1;
} }
taosUnlockList(pSet->lockedBy+hash); taosUnlockList(pSet->lockedBy+hash);
if (released) taosDecRsetCount(pSet); if (released) {
uTrace("rsetId:%d p:%p rid:%" PRId64 " is removed, count:%d, free mem: %p", rsetId, pNode->p, rid, pSet->count, pNode);
(*pSet->fp)(pNode->p);
free(pNode);
taosDecRsetCount(pSet);
}
return code; return code;
} }

View File

@ -275,41 +275,40 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
vDebug("vgId:%d, QInfo:%p, dnode continues to exec query", pVnode->vgId, *qhandle); vDebug("vgId:%d, QInfo:%p, dnode continues to exec query", pVnode->vgId, *qhandle);
// In the retrieve blocking model, only 50% CPU will be used in query processing
#if _NON_BLOCKING_RETRIEVE if (tsHalfCoresForQuery) {
bool freehandle = false; qTableQuery(*qhandle); // do execute query
bool buildRes = qTableQuery(*qhandle); // do execute query qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, false);
// build query rsp, the retrieve request has reached here already
if (buildRes) {
// update the connection info according to the retrieve connection
pRead->rpcHandle = qGetResultRetrieveMsg(*qhandle);
assert(pRead->rpcHandle != NULL);
vDebug("vgId:%d, QInfo:%p, start to build retrieval rsp after query paused, %p", pVnode->vgId, *qhandle,
pRead->rpcHandle);
// set the real rsp error code
pRead->code = vnodeDumpQueryResult(&pRead->rspRet, pVnode, qhandle, &freehandle, pRead->rpcHandle);
// NOTE: set return code to be TSDB_CODE_QRY_HAS_RSP to notify dnode to return msg to client
code = TSDB_CODE_QRY_HAS_RSP;
} else { } else {
void* h1 = qGetResultRetrieveMsg(*qhandle); bool freehandle = false;
assert(h1 == NULL); bool buildRes = qTableQuery(*qhandle); // do execute query
freehandle = qQueryCompleted(*qhandle); // build query rsp, the retrieve request has reached here already
} if (buildRes) {
// update the connection info according to the retrieve connection
pRead->rpcHandle = qGetResultRetrieveMsg(*qhandle);
assert(pRead->rpcHandle != NULL);
// NOTE: if the qhandle is not put into vread queue or query is completed, free the qhandle. vDebug("vgId:%d, QInfo:%p, start to build retrieval rsp after query paused, %p", pVnode->vgId, *qhandle,
// If the building of result is not required, simply free it. Otherwise, mandatorily free the qhandle pRead->rpcHandle);
if (freehandle || (!buildRes)) {
qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, freehandle); // set the real rsp error code
pRead->code = vnodeDumpQueryResult(&pRead->rspRet, pVnode, qhandle, &freehandle, pRead->rpcHandle);
// NOTE: set return code to be TSDB_CODE_QRY_HAS_RSP to notify dnode to return msg to client
code = TSDB_CODE_QRY_HAS_RSP;
} else {
void *h1 = qGetResultRetrieveMsg(*qhandle);
assert(h1 == NULL);
freehandle = qQueryCompleted(*qhandle);
}
// NOTE: if the qhandle is not put into vread queue or query is completed, free the qhandle.
// If the building of result is not required, simply free it. Otherwise, mandatorily free the qhandle
if (freehandle || (!buildRes)) {
qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, freehandle);
}
} }
#else
qTableQuery(*qhandle); // do execute query
qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, false);
#endif
} }
return code; return code;
@ -375,14 +374,16 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) {
freeHandle = true; freeHandle = true;
} else { // result is not ready, return immediately } else { // result is not ready, return immediately
assert(buildRes == true); assert(buildRes == true);
#if _NON_BLOCKING_RETRIEVE
if (!buildRes) {
assert(pRead->rpcHandle != NULL);
qReleaseQInfo(pVnode->qMgmt, (void **)&handle, false); // Only effects in the non-blocking model
return TSDB_CODE_QRY_NOT_READY; if (!tsHalfCoresForQuery) {
if (!buildRes) {
assert(pRead->rpcHandle != NULL);
qReleaseQInfo(pVnode->qMgmt, (void **)&handle, false);
return TSDB_CODE_QRY_NOT_READY;
}
} }
#endif
// ahandle is the sqlObj pointer // ahandle is the sqlObj pointer
code = vnodeDumpQueryResult(pRet, pVnode, handle, &freeHandle, pRead->rpcHandle); code = vnodeDumpQueryResult(pRet, pVnode, handle, &freeHandle, pRead->rpcHandle);

View File

@ -159,7 +159,8 @@ python3 ./test.py -f query/bug1471.py
python3 ./test.py -f query/bug1874.py python3 ./test.py -f query/bug1874.py
python3 ./test.py -f query/bug1875.py python3 ./test.py -f query/bug1875.py
python3 ./test.py -f query/bug1876.py python3 ./test.py -f query/bug1876.py
python3 ./test.py -f query/bug2218.py python3 ./test.py -f query/bug2218.py
python3 ./test.py -f query/sliding.py
#stream #stream
python3 ./test.py -f stream/metric_1.py python3 ./test.py -f stream/metric_1.py

View File

@ -16,6 +16,7 @@ import taos
from util.log import tdLog from util.log import tdLog
from util.cases import tdCases from util.cases import tdCases
from util.sql import tdSql from util.sql import tdSql
from util.dnodes import tdDnodes
class TDTestCase: class TDTestCase:
@ -72,6 +73,19 @@ class TDTestCase:
tdSql.checkData(6, 0, "2020-09-16 00:00:00") tdSql.checkData(6, 0, "2020-09-16 00:00:00")
tdSql.checkData(6, 1, 222.0) tdSql.checkData(6, 1, 222.0)
# test case for https://jira.taosdata.com:18080/browse/TD-2298
tdSql.execute("create database test keep 36500")
tdSql.execute("use test")
tdSql.execute("create table t (ts timestamp, voltage int)")
for i in range(10000):
tdSql.execute("insert into t values(%d, 0)" % (1000000 + i * 6000))
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.query("select last(*) from t interval(1s)")
tdSql.checkRows(10000)
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)