Merge branch '3.0' of https://github.com/taosdata/TDengine into feat/vnode_compact
This commit is contained in:
commit
da40f5ccb9
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 261fcca
|
||||
GIT_TAG 11b60a4
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -178,6 +178,77 @@ SHOW TABLE DISTRIBUTED table_name;
|
|||
|
||||
Shows how table data is distributed.
|
||||
|
||||
Examples: show table distributed d0\G; Display the block distribution of table `d0` in detailed format.
|
||||
|
||||
*************************** 1.row ***************************
|
||||
_block_dist: Total_Blocks=[5] Total_Size=[93.65 Kb] Average_size=[18.73 Kb] Compression_Ratio=[23.98 %]
|
||||
|
||||
Total_Blocks : Table `d0` contains total 5 blocks
|
||||
Total_Size: The total size of all the data blocks in table `d0` is 93.65 KB
|
||||
Average_size: The average size of each block is 18.73 KB
|
||||
Compression_Ratio: The data compression rate is 23.98%
|
||||
|
||||
*************************** 2.row ***************************
|
||||
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
|
||||
|
||||
Total_Rows: Table `d0` contains 20,000 rows
|
||||
Inmem_Rows: The rows still in memory, i.e. not committed in disk, is 0, i.e. none such rows
|
||||
MinRows: The minimum number of rows in a block is 3,616
|
||||
MaxRows: The maximum number of rows in a block is 4,096B
|
||||
Average_Rows: The average number of rows in a block is 4,000
|
||||
|
||||
*************************** 3.row ***************************
|
||||
_block_dist: Total_Tables=[1] Total_Files=[2]
|
||||
|
||||
Total_Tables: The number of child tables, 1 in this example
|
||||
Total_Files: The number of files storing the table's data, 2 in this example
|
||||
|
||||
*************************** 4.row ***************************
|
||||
_block_dist: --------------------------------------------------------------------------------
|
||||
*************************** 5.row ***************************
|
||||
_block_dist: 0100 |
|
||||
*************************** 6.row ***************************
|
||||
_block_dist: 0299 |
|
||||
*************************** 7.row ***************************
|
||||
_block_dist: 0498 |
|
||||
*************************** 8.row ***************************
|
||||
_block_dist: 0697 |
|
||||
*************************** 9.row ***************************
|
||||
_block_dist: 0896 |
|
||||
*************************** 10.row ***************************
|
||||
_block_dist: 1095 |
|
||||
*************************** 11.row ***************************
|
||||
_block_dist: 1294 |
|
||||
*************************** 12.row ***************************
|
||||
_block_dist: 1493 |
|
||||
*************************** 13.row ***************************
|
||||
_block_dist: 1692 |
|
||||
*************************** 14.row ***************************
|
||||
_block_dist: 1891 |
|
||||
*************************** 15.row ***************************
|
||||
_block_dist: 2090 |
|
||||
*************************** 16.row ***************************
|
||||
_block_dist: 2289 |
|
||||
*************************** 17.row ***************************
|
||||
_block_dist: 2488 |
|
||||
*************************** 18.row ***************************
|
||||
_block_dist: 2687 |
|
||||
*************************** 19.row ***************************
|
||||
_block_dist: 2886 |
|
||||
*************************** 20.row ***************************
|
||||
_block_dist: 3085 |
|
||||
*************************** 21.row ***************************
|
||||
_block_dist: 3284 |
|
||||
*************************** 22.row ***************************
|
||||
_block_dist: 3483 ||||||||||||||||| 1 (20.00%)
|
||||
*************************** 23.row ***************************
|
||||
_block_dist: 3682 |
|
||||
*************************** 24.row ***************************
|
||||
_block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 4 (80.00%)
|
||||
Query OK, 24 row(s) in set (0.002444s)
|
||||
|
||||
The above show the block distribution percentage according to the number of rows in each block. In the above example, `_block_dist: 3483 ||||||||||||||||| 1 (20.00%)` means there is one block whose rows is between 3,483 and 3,681. `_block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 4 (80.00%)` means there are 4 blocks whose rows is between 3,881 and 4,096. The number of blocks whose rows fall in other range is zero.
|
||||
|
||||
## SHOW TAGS
|
||||
|
||||
```sql
|
||||
|
|
|
@ -179,6 +179,75 @@ SHOW TABLE DISTRIBUTED table_name;
|
|||
|
||||
显示表的数据分布信息。
|
||||
|
||||
示例说明:
|
||||
|
||||
语句: show table distributed d0\G; 竖行显示表 d0 的 BLOCK 分布情况
|
||||
|
||||
*************************** 1.row ***************************
|
||||
|
||||
_block_dist: Total_Blocks=[5] Total_Size=[93.65 Kb] Average_size=[18.73 Kb] Compression_Ratio=[23.98 %]
|
||||
|
||||
Total_Blocks : 表d0 占用的 block 个数为 5 个
|
||||
|
||||
Total_Size. : 表 d0 所有 block 在文件中占用的大小为 93.65 KB
|
||||
|
||||
Average_size: 平均每个 block 在文件中占用的空间大小为 18.73 KB
|
||||
|
||||
Compression_Ratio: 数据压缩率为 23.98%
|
||||
|
||||
|
||||
*************************** 2.row ***************************
|
||||
|
||||
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
|
||||
|
||||
Total_Rows: 统计表 d0 的所有行数 为20000 行
|
||||
|
||||
Inmem_Rows: 表示仍然还存放在内存中的行数,即没有落盘的行数,为 0行,表示没有
|
||||
|
||||
MinRows: BLOCK 中最小的行数,为 3616 行
|
||||
|
||||
MaxRows: BLOCK 中最大的行数,为 4096行
|
||||
|
||||
Average_Rows: BLOCK 中的平均行数,为4000 行
|
||||
|
||||
|
||||
*************************** 3.row ***************************
|
||||
|
||||
_block_dist: Total_Tables=[1] Total_Files=[2]
|
||||
|
||||
Total_Tables: 表示子表的个数,这里为1
|
||||
|
||||
Total_Files: 表数据保存在几个文件中,这里保存在 2 个文件中
|
||||
|
||||
|
||||
*************************** 5.row ***************************
|
||||
|
||||
_block_dist: 0100 |
|
||||
|
||||
*************************** 6.row ***************************
|
||||
|
||||
_block_dist: 0299 |
|
||||
|
||||
......
|
||||
|
||||
*************************** 22.row ***************************
|
||||
|
||||
_block_dist: 3483 ||||||||||||||||| 1 (20.00%)
|
||||
|
||||
*************************** 23.row ***************************
|
||||
|
||||
_block_dist: 3682 |
|
||||
|
||||
*************************** 24.row ***************************
|
||||
|
||||
_block_dist: 3881 ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| 4 (80.00%)
|
||||
|
||||
Query OK, 24 row(s) in set (0.002444s)
|
||||
|
||||
|
||||
上面是块中包含数据行数的块儿分布情况图,这里的 0100 0299 0498 … 表示的是每个块中包含的数据行数,上面的意思就是这个表的 5 个块,分布在 3483 ~3681 行的块有 1 个,占整个块的 20%,分布在 3881 ~ 4096(最大行数)的块数为 4 个,占整个块的 80%, 其它区域内分布块数为 0。
|
||||
|
||||
|
||||
## SHOW TAGS
|
||||
|
||||
```sql
|
||||
|
|
|
@ -153,6 +153,8 @@ class TDTestCase:
|
|||
tdSql.query("select count(*) from db.`stb4-2`")
|
||||
tdSql.checkData(0, 0, 160)
|
||||
|
||||
tAdapter.stop()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
|
|
@ -320,7 +320,8 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 0, 160)
|
||||
tdSql.query("select count(*) from db.stb where t13 like 'b1%' or t13 like 'b2%'")
|
||||
tdSql.checkData(0, 0, 160)
|
||||
|
||||
|
||||
tAdapter.stop()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -116,11 +116,11 @@ class TDTestCase:
|
|||
|
||||
assert times == 1, "result is %s != expect: 1" % times
|
||||
|
||||
|
||||
tAdapter.stop()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
|
|
|
@ -480,8 +480,8 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/countAlwaysReturnValue.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/countAlwaysReturnValue.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/db.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/db.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/db.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/db.py -N 3 -n 3 -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/diff.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/diff.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distinct.py
|
||||
|
|
|
@ -227,7 +227,7 @@ class TAdapter:
|
|||
time.sleep(0.1)
|
||||
|
||||
def stop(self, force_kill=False):
|
||||
signal = "-SIGKILL" if force_kill else "-SIGTERM"
|
||||
signal = "-9" if force_kill else "-15"
|
||||
|
||||
if self.remoteIP:
|
||||
self.remote_exec(self.taosadapter_cfg_dict, "tAdapter.running=1\ntAdapter.stop()")
|
||||
|
@ -238,16 +238,13 @@ class TAdapter:
|
|||
|
||||
if self.running != 0:
|
||||
psCmd = f"ps -ef|grep -w {toBeKilled}| grep -v grep | awk '{{print $2}}'"
|
||||
# psCmd = f"pgrep {toBeKilled}"
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True)
|
||||
|
||||
while(processID):
|
||||
killCmd = f"pkill {signal} {processID} > /dev/null "
|
||||
# psCmd = f"pgrep {toBeKilled}"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
while(processID):
|
||||
killCmd = "kill %s %s > /dev/null 2>&1" % (signal, processID)
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8")
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
if not platform.system().lower() == 'windows':
|
||||
port = 6041
|
||||
fuserCmd = f"fuser -k -n tcp {port} > /dev/null"
|
||||
|
|
|
@ -42,16 +42,36 @@ class TDTestCase:
|
|||
|
||||
tdSql.query("select count(c1) from dbns.ntb interval(1b)")
|
||||
tdSql.checkRows(2)
|
||||
|
||||
def case2(self):
|
||||
tdSql.query("show variables")
|
||||
tdSql.checkRows(4)
|
||||
|
||||
for i in range(self.replicaVar):
|
||||
tdSql.query("show dnode %d variables like 'debugFlag'" % (i + 1))
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0, 0, i + 1)
|
||||
tdSql.checkData(0, 1, 'debugFlag')
|
||||
tdSql.checkData(0, 2, 0)
|
||||
|
||||
tdSql.query("show dnode 1 variables like '%debugFlag'")
|
||||
tdSql.checkRows(21)
|
||||
|
||||
tdSql.query("show dnode 1 variables like '____debugFlag'")
|
||||
tdSql.checkRows(2)
|
||||
|
||||
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
|
||||
tdSql.prepare()
|
||||
tdSql.prepare(replica = self.replicaVar)
|
||||
|
||||
|
||||
tdLog.printNoPrefix("==========start case1 run ...............")
|
||||
|
||||
self.case1()
|
||||
|
||||
tdLog.printNoPrefix("==========end case1 run ...............")
|
||||
|
||||
tdLog.printNoPrefix("==========start case2 run ...............")
|
||||
self.case2()
|
||||
tdLog.printNoPrefix("==========end case2 run ...............")
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
|
Loading…
Reference in New Issue