Merge branch '3.0' into test/TD-22889-3.0
This commit is contained in:
commit
1f953a422d
|
@ -18,14 +18,8 @@ To achieve absolutely no data loss, set wal_level to 2 and wal_fsync_period to 0
|
|||
|
||||
## Disaster Recovery
|
||||
|
||||
TDengine uses replication to provide high availability.
|
||||
TDengine provides disaster recovery by using taosX to replicate data between two TDengine clusters which are deployed in two distant data centers. Assume there are two TDengine clusters, A and B, A is the source and B is the target, and A takes the workload of writing and querying. You can deploy `taosX` in the data center where cluster A resides in, `taosX` consumes the data written into cluster A and writes into cluster B. If the data center of cluster A is disrupted because of disaster, you can switch to cluster B to take the workload of data writing and querying, and deploy a `taosX` in the data center of cluster B to replicate data from cluster B to cluster A if cluster A has been recovered, or another cluster C if cluster A has not been recovered.
|
||||
|
||||
A TDengine cluster is managed by mnodes. You can configure up to three mnodes to ensure high availability. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency.
|
||||
You can use the data replication feature of `taosX` to build more complicated disaster recovery solution.
|
||||
|
||||
The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, the parameter `replica` is used to specify the number of replicas. To achieve high availability, set `replica` to 3.
|
||||
|
||||
The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create a table.
|
||||
|
||||
As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is higher than 1, high availability can be achieved without any other assistance. For disaster recovery, dnodes of a TDengine cluster should be deployed in geographically different data centers.
|
||||
|
||||
Alternatively, you can use taosX to synchronize the data from one TDengine cluster to another cluster in a remote location. However, taosX is only available in TDengine enterprise version, for more information please contact tdengine.com.
|
||||
taosX is only provided in TDengine enterprise edition, for more details please contact business@tdengine.com.
|
||||
|
|
|
@ -19,12 +19,8 @@ TDengine 接收到应用的请求数据包时,先将请求的原始数据包
|
|||
|
||||
## 灾备
|
||||
|
||||
TDengine 的集群通过多个副本的机制,来提供系统的高可用性,同时具备一定的灾备能力。
|
||||
TDengine 灾备是通过在异地的两个数据中心中设置两个 TDengine 集群并利用 taosX 的数据复制能力来实现的。假定两个集群为集群 A 和集群 B,其中集群 A 为源集群,承担写入请求并提供查询服务。则在集群 A 所在数据中心中可以配置 taosX 利用 TDengine 提供的数据订阅能力,实时消费集群 A 中新写入的数据,并同步到集群 B。如果发生了灾难导致集群 A 所在数据中心不可用,则可以启用集群 B 作为数据写入和查询的主节点,并在集群 B 所处数据中心中配置 taosX 将数据复制到已经恢复的集群 A 或者新建的集群 C。
|
||||
|
||||
TDengine 集群是由 mnode 负责管理的,为保证 mnode 的高可靠,可以配置 三个 mnode 副本。为保证元数据的强一致性,mnode 副本之间通过同步方式进行数据复制,保证了元数据的强一致性。
|
||||
利用 taosX 的数据复制能力也可以构造出更复杂的灾备方案。
|
||||
|
||||
TDengine 集群中的时序数据的副本数是与数据库关联的,一个集群里可以有多个数据库,每个数据库可以配置不同的副本数。创建数据库时,通过参数 replica 指定副本数。为了支持高可靠,需要设置副本数为 3。
|
||||
|
||||
TDengine 集群的节点数必须大于等于副本数,否则创建表时将报错。
|
||||
|
||||
当 TDengine 集群中的节点部署在不同的物理机上,并设置多个副本数时,就实现了系统的高可靠性,无需再使用其他软件或工具。TDengine 企业版还可以将副本部署在不同机房,从而实现异地容灾。
|
||||
taosX 只在 TDengine 企业版中提供,关于其具体细节,请联系 business@taosdata.com
|
||||
|
|
|
@ -251,7 +251,7 @@ class TDSql:
|
|||
|
||||
if self.queryResult[row][col] != data:
|
||||
if self.cursor.istype(col, "TIMESTAMP"):
|
||||
# suppose user want to check nanosecond timestamp if a longer data passed``
|
||||
# suppose user want to check nanosecond timestamp if a longer data passed``
|
||||
if isinstance(data,str) :
|
||||
if (len(data) >= 28):
|
||||
if self.queryResult[row][col] == _parse_ns_timestamp(data):
|
||||
|
@ -260,7 +260,7 @@ class TDSql:
|
|||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
else:
|
||||
if self.queryResult[row][col].astimezone(datetime.timezone.utc) == _parse_datetime(data).astimezone(datetime.timezone.utc):
|
||||
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
|
||||
|
@ -270,12 +270,12 @@ class TDSql:
|
|||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
return
|
||||
elif isinstance(data,int) :
|
||||
if len(str(data)) == 16 :
|
||||
elif isinstance(data,int):
|
||||
if len(str(data)) == 16:
|
||||
precision = 'us'
|
||||
elif len(str(data)) == 13 :
|
||||
elif len(str(data)) == 13:
|
||||
precision = 'ms'
|
||||
elif len(str(data)) == 19 :
|
||||
elif len(str(data)) == 19:
|
||||
precision = 'ns'
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
|
@ -303,11 +303,21 @@ class TDSql:
|
|||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
return
|
||||
elif isinstance(data,datetime.datetime):
|
||||
dt_obj = self.queryResult[row][col]
|
||||
delt_data = data-datetime.datetime.fromtimestamp(0,data.tzinfo)
|
||||
delt_result = self.queryResult[row][col] - datetime.datetime.fromtimestamp(0,self.queryResult[row][col].tzinfo)
|
||||
if delt_data == delt_result:
|
||||
tdLog.info("check successfully")
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
return
|
||||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
|
||||
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
|
||||
|
||||
|
||||
if str(self.queryResult[row][col]) == str(data):
|
||||
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
|
||||
|
|
|
@ -38,11 +38,8 @@ class TDTestCase:
|
|||
if data_tb_col[i] is None:
|
||||
tdSql.checkData( i, 0 , None )
|
||||
if col_name not in ["c2", "double"] or tbname != f"{self.dbname}.t1" or i != 10:
|
||||
utc_zone = datetime.timezone.utc
|
||||
utc_8 = datetime.timezone(datetime.timedelta(hours=8))
|
||||
date_init_stamp = datetime.datetime.utcfromtimestamp(data_tb_col[i]/1000)
|
||||
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
|
||||
tdSql.checkData( i, 0, date_data)
|
||||
date_init_stamp = datetime.datetime.fromtimestamp(data_tb_col[i]/1000)
|
||||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
|
||||
def __range_to_timestamp(self, cols, tables):
|
||||
for col in cols:
|
||||
|
@ -60,7 +57,7 @@ class TDTestCase:
|
|||
self.__range_to_timestamp(cols=__col_list, tables=__table_list)
|
||||
|
||||
def all_test(self):
|
||||
|
||||
_datetime_epoch = datetime.datetime.fromtimestamp(0)
|
||||
tdSql.query(f"select c1 from {self.dbname}.ct4")
|
||||
data_ct4_c1 = [tdSql.getData(i,0) for i in range(tdSql.queryRows)]
|
||||
tdSql.query(f"select c1 from {self.dbname}.t1")
|
||||
|
@ -99,22 +96,16 @@ class TDTestCase:
|
|||
if data_ct4_c1[i] is None:
|
||||
tdSql.checkData( i, 0 , None )
|
||||
else:
|
||||
utc_zone = datetime.timezone.utc
|
||||
utc_8 = datetime.timezone(datetime.timedelta(hours=8))
|
||||
date_init_stamp = datetime.datetime.utcfromtimestamp(data_ct4_c1[i]/1000)
|
||||
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
|
||||
tdSql.checkData( i, 0, date_data)
|
||||
date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_ct4_c1[i]) / 1000.0)
|
||||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
|
||||
tdSql.query(f"select cast(c1 as timestamp) as b from {self.dbname}.t1")
|
||||
for i in range(len(data_t1_c1)):
|
||||
if data_t1_c1[i] is None:
|
||||
tdSql.checkData( i, 0 , None )
|
||||
else:
|
||||
utc_zone = datetime.timezone.utc
|
||||
utc_8 = datetime.timezone(datetime.timedelta(hours=8))
|
||||
date_init_stamp = datetime.datetime.utcfromtimestamp(data_t1_c1[i]/1000)
|
||||
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
|
||||
tdSql.checkData( i, 0, date_data)
|
||||
date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_t1_c1[i]) / 1000.0)
|
||||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
|
||||
|
||||
tdLog.printNoPrefix("==========step8: cast bigint to bigint, expect no changes")
|
||||
|
@ -156,11 +147,8 @@ class TDTestCase:
|
|||
if data_ct4_c2[i] is None:
|
||||
tdSql.checkData( i, 0 , None )
|
||||
else:
|
||||
utc_zone = datetime.timezone.utc
|
||||
utc_8 = datetime.timezone(datetime.timedelta(hours=8))
|
||||
date_init_stamp = datetime.datetime.utcfromtimestamp(data_ct4_c2[i]/1000)
|
||||
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
|
||||
tdSql.checkData( i, 0, date_data)
|
||||
date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_ct4_c2[i]) / 1000.0)
|
||||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
|
||||
|
||||
tdSql.query(f"select cast(c2 as timestamp) as b from {self.dbname}.t1")
|
||||
|
@ -170,11 +158,8 @@ class TDTestCase:
|
|||
elif i == 10:
|
||||
continue
|
||||
else:
|
||||
utc_zone = datetime.timezone.utc
|
||||
utc_8 = datetime.timezone(datetime.timedelta(hours=8))
|
||||
date_init_stamp = datetime.datetime.utcfromtimestamp(data_t1_c2[i]/1000)
|
||||
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
|
||||
tdSql.checkData( i, 0, date_data)
|
||||
date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_t1_c2[i]) / 1000.0)
|
||||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
|
||||
|
||||
tdLog.printNoPrefix("==========step12: cast smallint to bigint, expect no changes")
|
||||
|
@ -216,22 +201,16 @@ class TDTestCase:
|
|||
if data_ct4_c3[i] is None:
|
||||
tdSql.checkData( i, 0 , None )
|
||||
else:
|
||||
utc_zone = datetime.timezone.utc
|
||||
utc_8 = datetime.timezone(datetime.timedelta(hours=8))
|
||||
date_init_stamp = datetime.datetime.utcfromtimestamp(data_ct4_c3[i]/1000)
|
||||
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
|
||||
tdSql.checkData( i, 0, date_data)
|
||||
date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_ct4_c3[i]) / 1000.0)
|
||||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
|
||||
tdSql.query(f"select cast(c3 as timestamp) as b from {self.dbname}.t1")
|
||||
for i in range(len(data_t1_c3)):
|
||||
if data_t1_c3[i] is None:
|
||||
tdSql.checkData( i, 0 , None )
|
||||
else:
|
||||
utc_zone = datetime.timezone.utc
|
||||
utc_8 = datetime.timezone(datetime.timedelta(hours=8))
|
||||
date_init_stamp = datetime.datetime.utcfromtimestamp(data_t1_c3[i]/1000)
|
||||
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
|
||||
tdSql.checkData( i, 0, date_data)
|
||||
date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_t1_c3[i]) / 1000.0)
|
||||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
|
||||
|
||||
tdLog.printNoPrefix("==========step16: cast tinyint to bigint, expect no changes")
|
||||
|
@ -273,22 +252,16 @@ class TDTestCase:
|
|||
if data_ct4_c4[i] is None:
|
||||
tdSql.checkData( i, 0 , None )
|
||||
else:
|
||||
utc_zone = datetime.timezone.utc
|
||||
utc_8 = datetime.timezone(datetime.timedelta(hours=8))
|
||||
date_init_stamp = datetime.datetime.utcfromtimestamp(data_ct4_c4[i]/1000)
|
||||
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
|
||||
tdSql.checkData( i, 0, date_data)
|
||||
date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_ct4_c4[i]) / 1000.0)
|
||||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
|
||||
tdSql.query(f"select cast(c4 as timestamp) as b from {self.dbname}.t1")
|
||||
for i in range(len(data_t1_c4)):
|
||||
if data_t1_c4[i] is None:
|
||||
tdSql.checkData( i, 0 , None )
|
||||
else:
|
||||
utc_zone = datetime.timezone.utc
|
||||
utc_8 = datetime.timezone(datetime.timedelta(hours=8))
|
||||
date_init_stamp = datetime.datetime.utcfromtimestamp(data_t1_c4[i]/1000)
|
||||
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
|
||||
tdSql.checkData( i, 0, date_data)
|
||||
date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_t1_c4[i]) / 1000.0)
|
||||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
|
||||
|
||||
tdLog.printNoPrefix("==========step20: cast float to bigint, expect no changes")
|
||||
|
@ -326,21 +299,15 @@ class TDTestCase:
|
|||
if data_ct4_c5[i] is None:
|
||||
tdSql.checkData( i, 0 , None )
|
||||
else:
|
||||
utc_zone = datetime.timezone.utc
|
||||
utc_8 = datetime.timezone(datetime.timedelta(hours=8))
|
||||
date_init_stamp = datetime.datetime.utcfromtimestamp(int(data_ct4_c5[i])/1000)
|
||||
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
|
||||
tdSql.checkData( i, 0, date_data)
|
||||
date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_ct4_c5[i]) / 1000.0)
|
||||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
tdSql.query(f"select cast(c5 as timestamp) as b from {self.dbname}.t1")
|
||||
for i in range(len(data_t1_c5)):
|
||||
if data_t1_c5[i] is None:
|
||||
tdSql.checkData( i, 0 , None )
|
||||
else:
|
||||
utc_zone = datetime.timezone.utc
|
||||
utc_8 = datetime.timezone(datetime.timedelta(hours=8))
|
||||
date_init_stamp = datetime.datetime.utcfromtimestamp(int(data_t1_c5[i])/1000)
|
||||
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
|
||||
tdSql.checkData( i, 0, date_data)
|
||||
date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_t1_c5[i]) / 1000.0)
|
||||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
|
||||
tdLog.printNoPrefix("==========step24: cast double to bigint, expect no changes")
|
||||
tdSql.query(f"select c6 from {self.dbname}.ct4")
|
||||
|
@ -382,11 +349,8 @@ class TDTestCase:
|
|||
if data_ct4_c6[i] is None:
|
||||
tdSql.checkData( i, 0 , None )
|
||||
else:
|
||||
utc_zone = datetime.timezone.utc
|
||||
utc_8 = datetime.timezone(datetime.timedelta(hours=8))
|
||||
date_init_stamp = datetime.datetime.utcfromtimestamp(int(data_ct4_c6[i])/1000)
|
||||
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
|
||||
tdSql.checkData( i, 0, date_data)
|
||||
date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_ct4_c6[i]) / 1000.0)
|
||||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
|
||||
tdSql.query(f"select cast(c6 as timestamp) as b from {self.dbname}.t1")
|
||||
for i in range(len(data_t1_c6)):
|
||||
|
@ -395,11 +359,8 @@ class TDTestCase:
|
|||
elif i == 10:
|
||||
continue
|
||||
else:
|
||||
utc_zone = datetime.timezone.utc
|
||||
utc_8 = datetime.timezone(datetime.timedelta(hours=8))
|
||||
date_init_stamp = datetime.datetime.utcfromtimestamp(int(data_t1_c6[i])/1000)
|
||||
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
|
||||
tdSql.checkData( i, 0, date_data)
|
||||
date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_t1_c6[i]) / 1000.0)
|
||||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
|
||||
tdLog.printNoPrefix("==========step28: cast bool to bigint, expect no changes")
|
||||
tdSql.query(f"select c7 from {self.dbname}.ct4")
|
||||
|
@ -436,21 +397,15 @@ class TDTestCase:
|
|||
if data_ct4_c7[i] is None:
|
||||
tdSql.checkData( i, 0 , None )
|
||||
else:
|
||||
utc_zone = datetime.timezone.utc
|
||||
utc_8 = datetime.timezone(datetime.timedelta(hours=8))
|
||||
date_init_stamp = datetime.datetime.utcfromtimestamp(int(data_ct4_c7[i])/1000)
|
||||
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
|
||||
tdSql.checkData( i, 0, date_data)
|
||||
date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_ct4_c7[i]) / 1000.0)
|
||||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
tdSql.query(f"select cast(c7 as timestamp) as b from {self.dbname}.t1")
|
||||
for i in range(len(data_t1_c7)):
|
||||
if data_t1_c7[i] is None:
|
||||
tdSql.checkData( i, 0 , None )
|
||||
else:
|
||||
utc_zone = datetime.timezone.utc
|
||||
utc_8 = datetime.timezone(datetime.timedelta(hours=8))
|
||||
date_init_stamp = datetime.datetime.utcfromtimestamp(int(data_t1_c7[i])/1000)
|
||||
date_data = date_init_stamp.replace(tzinfo=utc_zone).astimezone(utc_8).strftime("%Y-%m-%d %H:%M:%S.%f")
|
||||
tdSql.checkData( i, 0, date_data)
|
||||
date_init_stamp = _datetime_epoch+datetime.timedelta(seconds=int(data_t1_c7[i]) / 1000.0)
|
||||
tdSql.checkData( i, 0, date_init_stamp)
|
||||
|
||||
|
||||
tdSql.query(f"select c8 from {self.dbname}.ct4")
|
||||
|
@ -694,7 +649,7 @@ class TDTestCase:
|
|||
tdSql.query(f"select cast('123' as tinyint unsigned) as b from {self.dbname}.stb1 partition by tbname")
|
||||
|
||||
# uion with cast and common cols
|
||||
|
||||
|
||||
tdSql.query(f"select cast(c2 as int) as b from {self.dbname}.stb1 union all select c1 from {self.dbname}.stb1 ")
|
||||
tdSql.query(f"select cast(c3 as bool) as b from {self.dbname}.stb1 union all select c7 from {self.dbname}.ct1 ")
|
||||
tdSql.query(f"select cast(c4 as tinyint) as b from {self.dbname}.stb1 union all select c4 from {self.dbname}.stb1")
|
||||
|
|
Loading…
Reference in New Issue