Merge branch 'main' of https://github.com/taosdata/TDengine into main
This commit is contained in:
commit
5cfec7a9e3
|
@ -683,7 +683,7 @@ static int32_t smlCheckMeta(SSchema *schema, int32_t length, SArray *cols, bool
|
|||
SSmlKv *kv = (SSmlKv *)taosArrayGet(cols, i);
|
||||
if (taosHashGet(hashTmp, kv->key, kv->keyLen) == NULL) {
|
||||
taosHashCleanup(hashTmp);
|
||||
return -1;
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
}
|
||||
taosHashCleanup(hashTmp);
|
||||
|
|
|
@ -256,7 +256,8 @@ int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset) {
|
|||
}
|
||||
|
||||
if (unlikely(index >= OTD_JSON_FIELDS_NUM)) {
|
||||
uError("index >= %d, %s", OTD_JSON_FIELDS_NUM, *start) return -1;
|
||||
uError("index >= %d, %s", OTD_JSON_FIELDS_NUM, *start);
|
||||
return TSDB_CODE_TSC_INVALID_JSON;
|
||||
}
|
||||
|
||||
char *sTmp = *start;
|
||||
|
@ -367,7 +368,8 @@ int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset) {
|
|||
|
||||
if (unlikely(index != OTD_JSON_FIELDS_NUM) || element->tags == NULL || element->cols == NULL ||
|
||||
element->measure == NULL || element->timestamp == NULL) {
|
||||
uError("elements != %d or element parse null", OTD_JSON_FIELDS_NUM) return -1;
|
||||
uError("elements != %d or element parse null", OTD_JSON_FIELDS_NUM);
|
||||
return TSDB_CODE_TSC_INVALID_JSON;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -381,7 +383,8 @@ int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset) {
|
|||
}
|
||||
|
||||
if (unlikely(index >= OTD_JSON_FIELDS_NUM)) {
|
||||
uError("index >= %d, %s", OTD_JSON_FIELDS_NUM, *start) return -1;
|
||||
uError("index >= %d, %s", OTD_JSON_FIELDS_NUM, *start);
|
||||
return TSDB_CODE_TSC_INVALID_JSON;
|
||||
}
|
||||
|
||||
if ((*start)[1] == 'm') {
|
||||
|
@ -448,7 +451,8 @@ int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset) {
|
|||
}
|
||||
|
||||
if (unlikely(index != 0 && index != OTD_JSON_FIELDS_NUM)) {
|
||||
uError("elements != %d", OTD_JSON_FIELDS_NUM) return -1;
|
||||
uError("elements != %d", OTD_JSON_FIELDS_NUM);
|
||||
return TSDB_CODE_TSC_INVALID_JSON;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -477,7 +481,7 @@ static int32_t smlGetJsonElements(cJSON *root, cJSON ***marks) {
|
|||
}
|
||||
if (*marks[i] == NULL) {
|
||||
uError("smlGetJsonElements error, not find mark:%d:%s", i, jsonName[i]);
|
||||
return -1;
|
||||
return TSDB_CODE_TSC_INVALID_JSON;
|
||||
}
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -816,25 +820,25 @@ static int64_t smlParseTSFromJSONObj(SSmlHandle *info, cJSON *root, int32_t toPr
|
|||
int32_t size = cJSON_GetArraySize(root);
|
||||
if (unlikely(size != OTD_JSON_SUB_FIELDS_NUM)) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalidate json", NULL);
|
||||
return -1;
|
||||
return TSDB_CODE_TSC_INVALID_JSON;
|
||||
}
|
||||
|
||||
cJSON *value = cJSON_GetObjectItem(root, "value");
|
||||
if (unlikely(!cJSON_IsNumber(value))) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalidate json", NULL);
|
||||
return -1;
|
||||
return TSDB_CODE_TSC_INVALID_JSON;
|
||||
}
|
||||
|
||||
cJSON *type = cJSON_GetObjectItem(root, "type");
|
||||
if (unlikely(!cJSON_IsString(type))) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalidate json", NULL);
|
||||
return -1;
|
||||
return TSDB_CODE_TSC_INVALID_JSON;
|
||||
}
|
||||
|
||||
double timeDouble = value->valuedouble;
|
||||
if (unlikely(smlDoubleToInt64OverFlow(timeDouble))) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL);
|
||||
return -1;
|
||||
return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE;
|
||||
}
|
||||
|
||||
if (timeDouble == 0) {
|
||||
|
@ -849,32 +853,29 @@ static int64_t smlParseTSFromJSONObj(SSmlHandle *info, cJSON *root, int32_t toPr
|
|||
size_t typeLen = strlen(type->valuestring);
|
||||
if (typeLen == 1 && (type->valuestring[0] == 's' || type->valuestring[0] == 'S')) {
|
||||
// seconds
|
||||
int8_t fromPrecision = TSDB_TIME_PRECISION_SECONDS;
|
||||
// int8_t fromPrecision = TSDB_TIME_PRECISION_SECONDS;
|
||||
if (smlFactorS[toPrecision] < INT64_MAX / tsInt64) {
|
||||
return tsInt64 * smlFactorS[toPrecision];
|
||||
}
|
||||
return -1;
|
||||
return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE;
|
||||
} else if (typeLen == 2 && (type->valuestring[1] == 's' || type->valuestring[1] == 'S')) {
|
||||
switch (type->valuestring[0]) {
|
||||
case 'm':
|
||||
case 'M':
|
||||
// milliseconds
|
||||
return convertTimePrecision(tsInt64, TSDB_TIME_PRECISION_MILLI, toPrecision);
|
||||
break;
|
||||
case 'u':
|
||||
case 'U':
|
||||
// microseconds
|
||||
return convertTimePrecision(tsInt64, TSDB_TIME_PRECISION_MICRO, toPrecision);
|
||||
break;
|
||||
case 'n':
|
||||
case 'N':
|
||||
return convertTimePrecision(tsInt64, TSDB_TIME_PRECISION_NANO, toPrecision);
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
return TSDB_CODE_TSC_INVALID_JSON_TYPE;
|
||||
}
|
||||
} else {
|
||||
return -1;
|
||||
return TSDB_CODE_TSC_INVALID_JSON_TYPE;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -895,7 +896,7 @@ static int64_t smlParseTSFromJSON(SSmlHandle *info, cJSON *timestamp) {
|
|||
double timeDouble = timestamp->valuedouble;
|
||||
if (unlikely(smlDoubleToInt64OverFlow(timeDouble))) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "timestamp is too large", NULL);
|
||||
return -1;
|
||||
return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE;
|
||||
}
|
||||
|
||||
if (unlikely(timeDouble < 0)) {
|
||||
|
@ -911,14 +912,14 @@ static int64_t smlParseTSFromJSON(SSmlHandle *info, cJSON *timestamp) {
|
|||
if (unlikely(fromPrecision == -1)) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf,
|
||||
"timestamp precision can only be seconds(10 digits) or milli seconds(13 digits)", NULL);
|
||||
return -1;
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
int64_t tsInt64 = timeDouble;
|
||||
if (fromPrecision == TSDB_TIME_PRECISION_SECONDS) {
|
||||
if (smlFactorS[toPrecision] < INT64_MAX / tsInt64) {
|
||||
return tsInt64 * smlFactorS[toPrecision];
|
||||
}
|
||||
return -1;
|
||||
return TSDB_CODE_TSC_VALUE_OUT_OF_RANGE;
|
||||
} else {
|
||||
return convertTimePrecision(timeDouble, fromPrecision, toPrecision);
|
||||
}
|
||||
|
@ -926,7 +927,7 @@ static int64_t smlParseTSFromJSON(SSmlHandle *info, cJSON *timestamp) {
|
|||
return smlParseTSFromJSONObj(info, timestamp, toPrecision);
|
||||
} else {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalidate json", NULL);
|
||||
return -1;
|
||||
return TSDB_CODE_TSC_INVALID_JSON;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ static int64_t smlParseInfluxTime(SSmlHandle *info, const char *data, int32_t le
|
|||
int64_t ts = smlGetTimeValue(data, len, fromPrecision, toPrecision);
|
||||
if (unlikely(ts == -1)) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalid timestamp", data);
|
||||
return -1;
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
return ts;
|
||||
}
|
||||
|
|
|
@ -516,10 +516,13 @@ int32_t tGetDelData(uint8_t *p, void *ph) {
|
|||
}
|
||||
|
||||
int32_t tsdbKeyFid(TSKEY key, int32_t minutes, int8_t precision) {
|
||||
int64_t fid;
|
||||
if (key < 0) {
|
||||
return (int)((key + 1) / tsTickPerMin[precision] / minutes - 1);
|
||||
fid = ((key + 1) / tsTickPerMin[precision] / minutes - 1);
|
||||
return (fid < INT32_MIN) ? INT32_MIN : (int32_t)fid;
|
||||
} else {
|
||||
return (int)((key / tsTickPerMin[precision] / minutes));
|
||||
fid = ((key / tsTickPerMin[precision] / minutes));
|
||||
return (fid > INT32_MAX) ? INT32_MAX : (int32_t)fid;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -197,6 +197,7 @@ int32_t syncLogBufferInitWithoutLock(SSyncLogBuffer* pBuf, SSyncNode* pNode) {
|
|||
SyncIndex index = toIndex;
|
||||
SSyncRaftEntry* pEntry = NULL;
|
||||
bool takeDummy = false;
|
||||
int emptySize = (TSDB_SYNC_LOG_BUFFER_SIZE >> 1);
|
||||
|
||||
while (true) {
|
||||
if (index <= pBuf->commitIndex) {
|
||||
|
@ -210,7 +211,6 @@ int32_t syncLogBufferInitWithoutLock(SSyncLogBuffer* pBuf, SSyncNode* pNode) {
|
|||
}
|
||||
|
||||
bool taken = false;
|
||||
int emptySize = 5;
|
||||
if (toIndex - index + 1 <= pBuf->size - emptySize) {
|
||||
SSyncLogBufEntry tmp = {.pItem = pEntry, .prevLogIndex = -1, .prevLogTerm = -1};
|
||||
pBuf->entries[index % pBuf->size] = tmp;
|
||||
|
|
|
@ -30,7 +30,7 @@ class TDTestCase:
|
|||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
self.deletedDataSql= '''drop database if exists deldata;create database deldata duration 300;use deldata;
|
||||
self.deletedDataSql= '''drop database if exists deldata;create database deldata duration 300 stt_trigger 4; ;use deldata;
|
||||
create table deldata.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int);
|
||||
create table deldata.ct1 using deldata.stb1 tags ( 1 );
|
||||
insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a );
|
||||
|
@ -38,7 +38,9 @@ class TDTestCase:
|
|||
delete from deldata.stb1;
|
||||
flush database deldata;
|
||||
insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a );
|
||||
delete from deldata.ct1;'''
|
||||
delete from deldata.ct1;
|
||||
insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a );
|
||||
flush database deldata;'''
|
||||
def checkProcessPid(self,processName):
|
||||
i=0
|
||||
while i<60:
|
||||
|
@ -262,7 +264,7 @@ class TDTestCase:
|
|||
if self.is_list_same_as_ordered_list(resultList,expectList):
|
||||
print("The unordered list is the same as the ordered list.")
|
||||
else:
|
||||
tdlog.error("The unordered list is not the same as the ordered list.")
|
||||
tdLog.exit("The unordered list is not the same as the ordered list.")
|
||||
tdsql.execute("insert into test.d80 values (now+1s, 11, 103, 0.21);")
|
||||
tdsql.execute("insert into test.d9 values (now+5s, 4.3, 104, 0.4);")
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ python3 .\test.py -f 1-insert\influxdb_line_taosc_insert.py
|
|||
@REM #python3 .\test.py -f 1-insert\test_stmt_muti_insert_query.py
|
||||
@REM python3 .\test.py -f 1-insert\alter_stable.py
|
||||
@REM python3 .\test.py -f 1-insert\alter_table.py
|
||||
@REM python3 .\test.py -f 2-query\between.py
|
||||
python3 .\test.py -f 2-query\between.py
|
||||
@REM python3 .\test.py -f 2-query\distinct.py
|
||||
@REM python3 .\test.py -f 2-query\varchar.py
|
||||
@REM python3 .\test.py -f 2-query\ltrim.py
|
||||
|
@ -101,3 +101,4 @@ python3 .\test.py -f 7-tmq\subscribeStb.py
|
|||
@REM python3 .\test.py -f 7-tmq\subscribeStb3.py
|
||||
@REM python3 .\test.py -f 7-tmq\subscribeStb4.py
|
||||
@REM python3 .\test.py -f 7-tmq\db.py
|
||||
python3 .\test.py -f 6-cluster\5dnode3mnodeSep1VnodeStopDnodeModifyMeta.py -N 6 -M 3
|
Loading…
Reference in New Issue