diff --git a/Jenkinsfile b/Jenkinsfile index e6e8a1df32..b073c32e13 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -5,7 +5,7 @@ node { git url: 'https://github.com/taosdata/TDengine.git' } -def skipstage=0 +def skipbuild=0 def abortPreviousBuilds() { def currentJobName = env.JOB_NAME @@ -33,8 +33,7 @@ def abort_previous(){ milestone(buildNumber) } def pre_test(){ - - + sh'hostname' sh ''' sudo rmtaos || echo "taosd has not installed" ''' @@ -52,12 +51,18 @@ def pre_test(){ git checkout master ''' } - else { + else if(env.CHANGE_TARGET == '2.0'){ + sh ''' + cd ${WKC} + git checkout 2.0 + ''' + } + else{ sh ''' cd ${WKC} git checkout develop ''' - } + } } sh''' cd ${WKC} @@ -75,7 +80,13 @@ def pre_test(){ git checkout master ''' } - else { + else if(env.CHANGE_TARGET == '2.0'){ + sh ''' + cd ${WK} + git checkout 2.0 + ''' + } + else{ sh ''' cd ${WK} git checkout develop @@ -95,19 +106,17 @@ def pre_test(){ make > /dev/null make install > /dev/null cd ${WKC}/tests - pip3 install ${WKC}/src/connector/python + pip3 install ${WKC}/src/connector/python/ ''' return 1 } pipeline { agent none - environment{ WK = '/var/lib/jenkins/workspace/TDinternal' WKC= '/var/lib/jenkins/workspace/TDinternal/community' } - stages { stage('pre_build'){ agent{label 'master'} @@ -123,19 +132,22 @@ pipeline { rm -rf ${WORKSPACE}.tes cp -r ${WORKSPACE} ${WORKSPACE}.tes cd ${WORKSPACE}.tes - + git fetch ''' script { if (env.CHANGE_TARGET == 'master') { sh ''' git checkout master - git pull origin master ''' } - else { + else if(env.CHANGE_TARGET == '2.0'){ + sh ''' + git checkout 2.0 + ''' + } + else{ sh ''' git checkout develop - git pull origin develop ''' } } @@ -143,32 +155,34 @@ pipeline { git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD ''' - - script{ - env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) + + script{ + skipbuild='2' + skipbuild=sh(script: "git log -2 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]' && echo 1 || echo 2", returnStdout:true) + println skipbuild } - println env.skipstage sh''' rm -rf ${WORKSPACE}.tes ''' } } - stage('Parallel test stage') { //only build pr when { + allOf{ changeRequest() - expression { - env.skipstage != 0 + expression{ + return skipbuild.trim() == '2' } + } } parallel { stage('python_1_s1') { - agent{label 'p1'} + agent{label " slave1 || slave11 "} steps { pre_test() - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -179,11 +193,11 @@ pipeline { } } stage('python_2_s5') { - agent{label 'p2'} + agent{label " slave5 || slave15 "} steps { pre_test() - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -193,9 +207,9 @@ pipeline { } } stage('python_3_s6') { - agent{label 'p3'} + agent{label " slave6 || slave16 "} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -206,9 +220,9 @@ pipeline { } } stage('test_b1_s2') { - agent{label 'b1'} + agent{label " slave2 || slave12 "} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' cd ${WKC}/tests @@ -217,9 +231,8 @@ pipeline { } } } - stage('test_crash_gen_s3') { - agent{label "b2"} + agent{label " slave3 || slave13 "} steps { pre_test() @@ -245,20 +258,18 @@ pipeline { ./handle_taosd_val_log.sh ''' } - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests ./test-all.sh b2fq date ''' - } - + } } } - stage('test_valgrind_s4') { - agent{label "b3"} + agent{label " slave4 || slave14 "} steps { pre_test() @@ -269,7 +280,7 @@ pipeline { ./handle_val_log.sh ''' } - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ sh ''' date cd ${WKC}/tests @@ -284,9 +295,9 @@ pipeline { } } stage('test_b4_s7') { - agent{label 'b4'} + agent{label " slave7 || slave17 "} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -303,9 +314,9 @@ pipeline { } } stage('test_b5_s8') { - agent{label 'b5'} + agent{label " slave8 || slave18 "} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -316,9 +327,9 @@ pipeline { } } stage('test_b6_s9') { - agent{label 'b6'} + agent{label " slave9 || slave19 "} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -329,9 +340,9 @@ pipeline { } } stage('test_b7_s10') { - agent{label 'b7'} + agent{label " slave10 || slave20 "} steps { - timeout(time: 45, unit: 'MINUTES'){ + timeout(time: 55, unit: 'MINUTES'){ pre_test() sh ''' date @@ -421,6 +432,5 @@ pipeline { from: "support@taosdata.com" ) } - } - -} + } +} \ No newline at end of file diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md index db20ca4edb..ecc9352ba6 100644 --- a/documentation20/cn/10.cluster/docs.md +++ b/documentation20/cn/10.cluster/docs.md @@ -1,6 +1,6 @@ # TDengine 集群安装、管理 -多个TDengine服务器,也就是多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看TDengine 2.0整体架构一章。而且在安装集群之前,先请按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章安装并体验单节点功能。 +多个TDengine服务器,也就是多个taosd的运行实例可以组成一个集群,以保证TDengine的高可靠运行,并提供水平扩展能力。要了解TDengine 2.0的集群管理,需要对集群的基本概念有所了解,请看《TDengine整体架构》一章。而且在安装集群之前,建议先按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章安装并体验单节点功能。 集群的每个数据节点是由End Point来唯一标识的,End Point是由FQDN(Fully Qualified Domain Name)外加Port组成,比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname,可通过Linux命令`hostname -f`获取(如何配置FQDN,请参考:[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html))。端口是这个数据节点对外服务的端口号,缺省是6030,但可以通过taos.cfg里配置参数serverPort进行修改。一个物理节点可能配置了多个hostname, TDengine会自动获取第一个,但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问,可以将参数fqdn设置为本节点的IP地址。 @@ -12,7 +12,7 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预 **第零步**:规划集群所有物理节点的FQDN,将规划好的FQDN分别添加到每个物理节点的/etc/hostname;修改每个物理节点的/etc/hosts,将所有集群物理节点的IP与FQDN的对应添加好。【如部署了DNS,请联系网络管理员在DNS上做好相关配置】 -**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据,具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html ) +**第一步**:如果搭建集群的物理节点中,存有之前的测试数据、装过1.X的版本,或者装过其他版本的TDengine,请先将其删除,并清空所有数据(如果需要保留原有数据,请联系涛思交付团队进行旧版本升级、数据迁移),具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html ) **注意1:**因为FQDN的信息会写进文件,如果之前没有配置或者更改FQDN,且启动了TDengine。请一定在确保数据无用或者备份的前提下,清理一下之前的数据(`rm -rf /var/lib/taos/*`); **注意2:**客户端也需要配置,确保它可以正确解析每个节点的FQDN配置,不管是通过DNS服务,还是 Host 文件。 @@ -23,23 +23,23 @@ TDengine的集群管理极其简单,除添加和删除节点需要人工干预 **第四步**:检查所有数据节点,以及应用程序所在物理节点的网络设置: 1. 每个物理节点上执行命令`hostname -f`,查看和确认所有节点的hostname是不相同的(应用驱动所在节点无需做此项检查); -2. 每个物理节点上执行`ping host`, 其中host是其他物理节点的hostname, 看能否ping通其它物理节点; 如果不能ping通,需要检查网络设置, 或/etc/hosts文件(Windows系统默认路径为C:\Windows\system32\drivers\etc\hosts),或DNS的配置。如果无法ping通,是无法组成集群的; +2. 每个物理节点上执行`ping host`,其中host是其他物理节点的hostname,看能否ping通其它物理节点;如果不能ping通,需要检查网络设置,或/etc/hosts文件(Windows系统默认路径为C:\Windows\system32\drivers\etc\hosts),或DNS的配置。如果无法ping通,是无法组成集群的; 3. 从应用运行的物理节点,ping taosd运行的数据节点,如果无法ping通,应用是无法连接taosd的,请检查应用所在物理节点的DNS设置或hosts文件; 4. 每个数据节点的End Point就是输出的hostname外加端口号,比如h1.taosdata.com:6030 -**第五步**:修改TDengine的配置文件(所有节点的文件/etc/taos/taos.cfg都需要修改)。假设准备启动的第一个数据节点End Point为 h1.taosdata.com:6030, 其与集群配置相关参数如下: +**第五步**:修改TDengine的配置文件(所有节点的文件/etc/taos/taos.cfg都需要修改)。假设准备启动的第一个数据节点End Point为 h1.taosdata.com:6030,其与集群配置相关参数如下: ``` // firstEp 是每个数据节点首次启动后连接的第一个数据节点 firstEp h1.taosdata.com:6030 -// 必须配置为本数据节点的FQDN,如果本机只有一个hostname, 可注释掉本配置 +// 必须配置为本数据节点的FQDN,如果本机只有一个hostname, 可注释掉本项 fqdn h1.taosdata.com // 配置本数据节点的端口号,缺省是6030 serverPort 6030 -// 使用场景,请参考《Arbitrator的使用》的部分 +// 副本数为偶数的时候,需要配置,请参考《Arbitrator的使用》的部分 arbitrator ha.taosdata.com:6042 ``` @@ -53,7 +53,7 @@ arbitrator ha.taosdata.com:6042 | 2 | mnodeEqualVnodeNum | 一个mnode等同于vnode消耗的个数 | | 3 | offlineThreshold | dnode离线阈值,超过该时间将导致Dnode离线 | | 4 | statusInterval | dnode向mnode报告状态时长 | -| 5 | arbitrator | 系统中裁决器的end point | +| 5 | arbitrator | 系统中裁决器的End Point | | 6 | timezone | 时区 | | 7 | balance | 是否启动负载均衡 | | 8 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 | @@ -87,7 +87,7 @@ taos> 1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;(注意:每个物理节点都需要在 taos.cfg 文件中将 firstEP 参数配置为新集群首个节点的 End Point——在本例中是 h1.taos.com:6030) -2. 在第一个数据节点,使用CLI程序taos, 登录进TDengine系统, 执行命令: +2. 在第一个数据节点,使用CLI程序taos,登录进TDengine系统,执行命令: ``` CREATE DNODE "h2.taos.com:6030"; @@ -101,7 +101,7 @@ taos> SHOW DNODES; ``` - 查看新节点是否被成功加入。如果该被加入的数据节点处于离线状态,请做两个检查 + 查看新节点是否被成功加入。如果该被加入的数据节点处于离线状态,请做两个检查: - 查看该数据节点的taosd是否正常工作,如果没有正常运行,需要先检查为什么 - 查看该数据节点taosd日志文件taosdlog.0里前面几行日志(一般在/var/log/taos目录),看日志里输出的该数据节点fqdn以及端口号是否为刚添加的End Point。如果不一致,需要将正确的End Point添加进去。 @@ -121,7 +121,7 @@ taos> ### 添加数据节点 -执行CLI程序taos, 使用root账号登录进系统, 执行: +执行CLI程序taos,使用root账号登录进系统,执行: ``` CREATE DNODE "fqdn:port"; @@ -131,13 +131,13 @@ CREATE DNODE "fqdn:port"; ### 删除数据节点 -执行CLI程序taos, 使用root账号登录进TDengine系统,执行: +执行CLI程序taos,使用root账号登录进TDengine系统,执行: -``` -DROP DNODE "fqdn:port"; +```mysql +DROP DNODE "fqdn:port | dnodeID"; ``` -其中fqdn是被删除的节点的FQDN,port是其对外服务器的端口号 +通过"fqdn:port"或"dnodeID"来指定一个具体的节点都是可以的。其中fqdn是被删除的节点的FQDN,port是其对外服务器的端口号;dnodeID可以通过SHOW DNODES获得。 **【注意】** @@ -147,25 +147,41 @@ DROP DNODE "fqdn:port"; - 一个数据节点被drop之后,其他节点都会感知到这个dnodeID的删除操作,任何集群中的节点都不会再接收此dnodeID的请求。 - - dnodeID的是集群自动分配的,不得人工指定。它在生成时递增的,不会重复。 + - dnodeID是集群自动分配的,不得人工指定。它在生成时是递增的,不会重复。 + +### 手动迁移数据节点 + +手动将某个vnode迁移到指定的dnode。 + +执行CLI程序taos,使用root账号登录进TDengine系统,执行: + +```mysql +ALTER DNODE BALANCE "VNODE:-DNODE:"; +``` + +其中:source-dnodeId是源dnodeId,也就是待迁移的vnode所在的dnodeID;vgId可以通过SHOW VGROUPS获得,列表的第一列;dest-dnodeId是目标dnodeId。 + +**【注意】** + + - 只有在集群的自动负载均衡选项关闭时(balance设置为0),才允许手动迁移。 + - 只有处于正常工作状态的vnode才能被迁移:master/slave,当处于offline/unsynced/syncing状态时,是不能迁移的。 + - 迁移前,务必核实目标dnode的资源足够:CPU、内存、硬盘。 ### 查看数据节点 -执行CLI程序taos,使用root账号登录进TDengine系统,执行: - -``` +执行CLI程序taos,使用root账号登录进TDengine系统,执行: +```mysql SHOW DNODES; ``` -它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等),vnode数目,还未使用的vnode数目等信息。在添加或删除一个数据节点后,可以使用该命令查看。 +它将列出集群中所有的dnode,每个dnode的ID,end_point(fqdn:port),状态(ready, offline等),vnode数目,还未使用的vnode数目等信息。在添加或删除一个数据节点后,可以使用该命令查看。 ### 查看虚拟节点组 为充分利用多核技术,并提供scalability,数据需要分片处理。因此TDengine会将一个DB的数据切分成多份,存放在多个vnode里。这些vnode可能分布在多个数据节点dnode里,这样就实现了水平扩展。一个vnode仅仅属于一个DB,但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况,自动进行分配的,无需任何人工干预。 -执行CLI程序taos,使用root账号登录进TDengine系统,执行: - -``` +执行CLI程序taos,使用root账号登录进TDengine系统,执行: +```mysql SHOW VGROUPS; ``` @@ -173,9 +189,9 @@ SHOW VGROUPS; TDengine通过多副本的机制来提供系统的高可用性,包括vnode和mnode的高可用性。 -vnode的副本数是与DB关联的,一个集群里可以有多个DB,根据运营的需求,每个DB可以配置不同的副本数。创建数据库时,通过参数replica 指定副本数(缺省为1)。如果副本数为1,系统的可靠性无法保证,只要数据所在的节点宕机,就将无法提供服务。集群的节点数必须大于等于副本数,否则创建表时将返回错误“more dnodes are needed"。比如下面的命令将创建副本数为3的数据库demo: +vnode的副本数是与DB关联的,一个集群里可以有多个DB,根据运营的需求,每个DB可以配置不同的副本数。创建数据库时,通过参数replica 指定副本数(缺省为1)。如果副本数为1,系统的可靠性无法保证,只要数据所在的节点宕机,就将无法提供服务。集群的节点数必须大于等于副本数,否则创建表时将返回错误"more dnodes are needed"。比如下面的命令将创建副本数为3的数据库demo: -``` +```mysql CREATE DATABASE demo replica 3; ``` @@ -183,20 +199,19 @@ CREATE DATABASE demo replica 3; 一个数据节点dnode里可能有多个DB的数据,因此一个dnode离线时,可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作,那么该vnode group就无法对外服务,无法插入或读取数据,这样会影响到它所属的DB的一部分表的读写操作。 -因为vnode的引入,无法简单的给出结论:“集群中过半数据节点dnode工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为3,只有三个dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个数据节点不工作,那整个集群就无法正常工作了。 +因为vnode的引入,无法简单地给出结论:“集群中过半数据节点dnode工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为3,只有三个dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个数据节点不工作,那整个集群就无法正常工作了。 ## Mnode的高可用性 TDengine集群是由mnode (taosd的一个模块,管理节点) 负责管理的,为保证mnode的高可用,可以配置多个mnode副本,副本数由系统配置参数numOfMnodes决定,有效范围为1-3。为保证元数据的强一致性,mnode副本之间是通过同步的方式进行数据复制的。 -一个集群有多个数据节点dnode, 但一个dnode至多运行一个mnode实例。多个dnode情况下,哪个dnode可以作为mnode呢?这是完全由系统根据整个系统资源情况,自动指定的。用户可通过CLI程序taos,在TDengine的console里,执行如下命令: +一个集群有多个数据节点dnode,但一个dnode至多运行一个mnode实例。多个dnode情况下,哪个dnode可以作为mnode呢?这是完全由系统根据整个系统资源情况,自动指定的。用户可通过CLI程序taos,在TDengine的console里,执行如下命令: -``` +```mysql SHOW MNODES; ``` -来查看mnode列表,该列表将列出mnode所处的dnode的End Point和角色(master, slave, unsynced 或offline)。 -当集群中第一个数据节点启动时,该数据节点一定会运行一个mnode实例,否则该数据节点dnode无法正常工作,因为一个系统是必须有至少一个mnode的。如果numOfMnodes配置为2,启动第二个dnode时,该dnode也将运行一个mnode实例。 +来查看mnode列表,该列表将列出mnode所处的dnode的End Point和角色(master, slave, unsynced 或offline)。当集群中第一个数据节点启动时,该数据节点一定会运行一个mnode实例,否则该数据节点dnode无法正常工作,因为一个系统是必须有至少一个mnode的。如果numOfMnodes配置为2,启动第二个dnode时,该dnode也将运行一个mnode实例。 为保证mnode服务的高可用性,numOfMnodes必须设置为2或更大。因为mnode保存的元数据必须是强一致的,如果numOfMnodes大于2,复制参数quorum自动设为2,也就是说,至少要保证有两个副本写入数据成功,才通知客户端应用写入成功。 @@ -210,7 +225,7 @@ SHOW MNODES; - 当一个数据节点从集群中移除时,系统将自动把该数据节点上的数据转移到其他数据节点,无需任何人工干预。 - 如果一个数据节点过热(数据量过大),系统将自动进行负载均衡,将该数据节点的一些vnode自动挪到其他节点。 -当上述三种情况发生时,系统将启动一各个数据节点的负载计算,从而决定如何挪动。 +当上述三种情况发生时,系统将启动各个数据节点的负载计算,从而决定如何挪动。 **【提示】负载均衡由参数balance控制,它决定是否启动自动负载均衡。** @@ -225,7 +240,7 @@ SHOW MNODES; ## Arbitrator的使用 -如果副本数为偶数,当一个 vnode group 里一半 vnode 不工作时,是无法从中选出 master 的。同理,一半 mnode 不工作时,是无法选出 mnode 的 master 的,因为存在“split brain”问题。为解决这个问题,TDengine 引入了 Arbitrator 的概念。Arbitrator 模拟一个 vnode 或 mnode 在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含 Arbitrator 在内,超过半数的 vnode 或 mnode 工作,那么该 vnode group 或 mnode 组就可以正常的提供数据插入或查询服务。比如对于副本数为 2 的情形,如果一个节点 A 离线,但另外一个节点 B 正常,而且能连接到 Arbitrator,那么节点 B 就能正常工作。 +如果副本数为偶数,当一个 vnode group 里一半或超过一半的 vnode 不工作时,是无法从中选出 master 的。同理,一半或超过一半的 mnode 不工作时,是无法选出 mnode 的 master 的,因为存在“split brain”问题。为解决这个问题,TDengine 引入了 Arbitrator 的概念。Arbitrator 模拟一个 vnode 或 mnode 在工作,但只简单的负责网络连接,不处理任何数据插入或访问。只要包含 Arbitrator 在内,超过半数的 vnode 或 mnode 工作,那么该 vnode group 或 mnode 组就可以正常的提供数据插入或查询服务。比如对于副本数为 2 的情形,如果一个节点 A 离线,但另外一个节点 B 正常,而且能连接到 Arbitrator,那么节点 B 就能正常工作。 总之,在目前版本下,TDengine 建议在双副本环境要配置 Arbitrator,以提升系统的可用性。 @@ -235,3 +250,9 @@ Arbitrator 的执行程序名为 tarbitrator。该程序对系统资源几乎没 3. 修改每个 taosd 实例的配置文件,在 taos.cfg 里将参数 arbitrator 设置为 tarbitrator 程序所对应的 End Point。(如果该参数配置了,当副本数为偶数时,系统将自动连接配置的 Arbitrator。如果副本数为奇数,即使配置了 Arbitrator,系统也不会去建立连接。) 4. 在配置文件中配置了的 Arbitrator,会出现在 `SHOW DNODES;` 指令的返回结果中,对应的 role 列的值会是“arb”。 + +查看集群 Arbitrator 的状态【2.0.14.0 以后支持】 + +```mysql +SHOW DNODES; +``` diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index 2eb4ac50cc..4a6eca4bb3 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -1,4 +1,4 @@ -# TDengine的运营与维护 +# TDengine的运营与运维 ## 容量规划 @@ -28,12 +28,28 @@ taosd 内存总量 = vnode 内存 + mnode 内存 + 查询内存 最后,如果内存充裕,可以考虑加大 Blocks 的配置,这样更多数据将保存在内存里,提高查询速度。 +#### 客户端内存需求 + +客户端应用采用 taosc 客户端驱动连接服务端,会有内存需求的开销。 + +客户端的内存开销主要由写入过程中的 SQL 语句、表的元数据信息缓存、以及结构性开销构成。系统最大容纳的表数量为 N(每个通过超级表创建的表的 meta data 开销约 256 字节),最大并行写入线程数量 T,最大 SQL 语句长度 S(通常是 1 Mbytes)。由此可以进行客户端内存开销的估算(单位 MBytes): +``` +M = (T * S * 3 + (N / 4096) + 100) +``` + +举例如下:用户最大并发写入线程数 100,子表数总数 10,000,000,那么客户端的内存最低要求是: +``` +100 * 3 + (10000000 / 4096) + 100 = 2741 (MBytes) +``` + +即配置 3 GBytes 内存是最低要求。 + ### CPU 需求 CPU 的需求取决于如下两方面: -* __数据插入__ TDengine 单核每秒能至少处理一万个插入请求。每个插入请求可以带多条记录,一次插入一条记录与插入 10 条记录,消耗的计算资源差别很小。因此每次插入,条数越大,插入效率越高。如果一个插入请求带 200 条以上记录,单核就能达到每秒插入 100 万条记录的速度。但对前端数据采集的要求越高,因为需要缓存记录,然后一批插入。 -* __查询需求__ TDengine 提供高效的查询,但是每个场景的查询差异很大,查询频次变化也很大,难以给出客观数字。需要用户针对自己的场景,写一些查询语句,才能确定。 +* **数据插入** TDengine 单核每秒能至少处理一万个插入请求。每个插入请求可以带多条记录,一次插入一条记录与插入 10 条记录,消耗的计算资源差别很小。因此每次插入,条数越大,插入效率越高。如果一个插入请求带 200 条以上记录,单核就能达到每秒插入 100 万条记录的速度。但对前端数据采集的要求越高,因为需要缓存记录,然后一批插入。 +* **查询需求** TDengine 提供高效的查询,但是每个场景的查询差异很大,查询频次变化也很大,难以给出客观数字。需要用户针对自己的场景,写一些查询语句,才能确定。 因此仅对数据插入而言,CPU 是可以估算出来的,但查询所耗的计算资源无法估算。在实际运营过程中,不建议 CPU 使用率超过 50%,超过后,需要增加新的节点,以获得更多计算资源。 @@ -96,51 +112,170 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修 taosd -C ``` -下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节,而且这些参数的缺省配置都是工作的,一般无需设置。**注意:配置修改后,需要重启*taosd*服务才能生效。** +下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节,而且这些参数的缺省配置都是可以工作的,一般无需设置。**注意:配置文件参数修改后,需要重启*taosd*服务,或客户端应用才能生效。** -- firstEp: taosd启动时,主动连接的集群中首个dnode的end point, 默认值为localhost:6030。 -- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。这个参数值的长度需要控制在 96 个字符以内。 -- serverPort:taosd启动后,对外服务的端口号,默认值为6030。(RESTful服务使用的端口号是在此基础上+11,即默认值为6041。) -- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。 -- logDir:日志文件目录,客户端和服务器的运行日志文件将写入该目录。默认值:/var/log/taos。 -- arbitrator:系统中裁决器的end point, 缺省值为空。 -- role:dnode的可选角色。0-any; 既可作为mnode,也可分配vnode;1-mgmt;只能作为mnode,不能分配vnode;2-dnode;不能作为mnode,只能分配vnode -- debugFlag:运行日志开关。131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志)。默认值:131或135(不同模块有不同的默认值)。 -- numOfLogLines:单个日志文件允许的最大行数。默认值:10,000,000行。 -- logKeepDays:日志文件的最长保存时间。大于0时,日志文件会被重命名为taosdlog.xxx,其中xxx为日志文件最后修改的时间戳,单位为秒。默认值:0天。 -- maxSQLLength:单条SQL语句允许最长限制。默认值:65380字节。 -- telemetryReporting: 是否允许 TDengine 采集和上报基本使用信息,0表示不允许,1表示允许。 默认值:1。 -- stream: 是否启用连续查询(流计算功能),0表示不允许,1表示允许。 默认值:1。 -- queryBufferSize: 为所有并发查询占用保留的内存大小。计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。单位为 MB(2.0.15 以前的版本中,此参数的单位是字节)。 -- ratioOfQueryCores: 设置查询线程的最大数量。最小值0 表示只有1个查询线程;最大值2表示最大建立2倍CPU核数的查询线程。默认为1,表示最大和CPU核数相等的查询线程。该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。 +| **#** | **配置参数名称** | **内部** | **S\|C** | **单位** | **含义** | **取值范围** | **缺省值** | **备注** | +| ----- | ----------------------- | -------- | -------- | -------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| 1 | firstEP | | **SC** | | taosd启动时,主动连接的集群中首个dnode的end point | | localhost:6030 | | +| 2 | secondEP | YES | **SC** | | taosd启动时,如果firstEp连接不上,尝试连接集群中第二个dnode的end point | | 无 | | +| 3 | fqdn | | **SC** | | 数据节点的FQDN。如果习惯IP地址访问,可设置为该节点的IP地址。 | | 缺省为操作系统配置的第一个hostname。 | 这个参数值的长度需要控制在 96 个字符以内。 | +| 4 | serverPort | | **SC** | | taosd启动后,对外服务的端口号 | | 6030 | RESTful服务使用的端口号是在此基础上+11,即默认值为6041。 | +| 5 | logDir | | **SC** | | 日志文件目录,客户端和服务器的运行日志将写入该目录 | | /var/log/taos | | +| 6 | scriptDir | YES | **S** | | | | | | +| 7 | dataDir | | **S** | | 数据文件目录,所有的数据文件都将写入该目录 | | /var/lib/taos | | +| 8 | arbitrator | | **S** | | 系统中裁决器的end point | | 空 | | +| 9 | numOfThreadsPerCore | | **SC** | | 每个CPU核生成的队列消费者线程数量 | | 1.0 | | +| 10 | ratioOfQueryThreads | | **S** | | 设置查询线程的最大数量 | 0:表示只有1个查询线程;1:表示最大和CPU核数相等的查询线程;2:表示最大建立2倍CPU核数的查询线程。 | 1 | 该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。 | +| 11 | numOfMnodes | | **S** | | 系统中管理节点个数 | | 3 | | +| 12 | vnodeBak | | **S** | | 删除vnode时是否备份vnode目录 | 0:否,1:是 | 1 | | +| 13 | telemetryRePorting | | **S** | | 是否允许 TDengine 采集和上报基本使用信息 | 0:不允许;1:允许 | 1 | | +| 14 | balance | | **S** | | 是否启动负载均衡 | 0,1 | 1 | | +| 15 | balanceInterval | YES | **S** | 秒 | 管理节点在正常运行状态下,检查负载均衡的时间间隔 | 1-30000 | 300 | | +| 16 | role | | **S** | | dnode的可选角色 | 0:any(既可作为mnode,也可分配vnode);1:mgmt(只能作为mnode,不能分配vnode);2:dnode(不能作为mnode,只能分配vnode) | 0 | | +| 17 | maxTmerCtrl | | **SC** | 个 | 定时器个数 | 8-2048 | 512 | | +| 18 | monitorInterval | | **S** | 秒 | 监控数据库记录系统参数(CPU/内存)的时间间隔 | 1-600 | 30 | | +| 19 | offlineThreshold | | **S** | 秒 | dnode离线阈值,超过该时间将导致dnode离线 | 5-7200000 | 86400*10(10天) | | +| 20 | rpcTimer | | **SC** | 毫秒 | rpc重试时长 | 100-3000 | 300 | | +| 21 | rpcMaxTime | | **SC** | 秒 | rpc等待应答最大时长 | 100-7200 | 600 | | +| 22 | statusInterval | | **S** | 秒 | dnode向mnode报告状态间隔 | 1-10 | 1 | | +| 23 | shellActivityTimer | | **SC** | 秒 | shell客户端向mnode发送心跳间隔 | 1-120 | 3 | | +| 24 | tableMetaKeepTimer | | **S** | 秒 | 表的元数据cache时长 | 1-8640000 | 7200 | | +| 25 | minSlidingTime | | **S** | 毫秒 | 最小滑动窗口时长 | 10-1000000 | 10 | 支持us补值后,这个值就是1us了。 | +| 26 | minIntervalTime | | **S** | 毫秒 | 时间窗口最小值 | 1-1000000 | 10 | | +| 27 | stream | | **S** | | 是否启用连续查询(流计算功能) | 0:不允许;1:允许 | 1 | | +| 28 | maxStreamCompDelay | | **S** | 毫秒 | 连续查询启动最大延迟 | 10-1000000000 | 20000 | 为避免多个stream同时执行占用太多系统资源,程序中对stream的执行时间人为增加了一些随机的延时。maxFirstStreamCompDelay 是stream第一次执行前最少要等待的时间。streamCompDelayRatio 是延迟时间的计算系数,它乘以查询的 interval 后为延迟时间基准。maxStreamCompDelay是延迟时间基准的上限。实际延迟时间为一个不超过延迟时间基准的随机值。stream某次计算失败后需要重试,retryStreamCompDelay是重试的等待时间基准。实际重试等待时间为不超过等待时间基准的随机值。 | +| 29 | maxFirstStreamCompDelay | | **S** | 毫秒 | 第一次连续查询启动最大延迟 | 10-1000000000 | 10000 | | +| 30 | retryStreamCompDelay | | **S** | 毫秒 | 连续查询重试等待间隔 | 10-1000000000 | 10 | | +| 31 | streamCompDelayRatio | | **S** | | 连续查询的延迟时间计算系数 | 0.1-0.9 | 0.1 | | +| 32 | maxVgroupsPerDb | | **S** | | 每个DB中 能够使用的最大vnode个数 | 0-8192 | | | +| 33 | maxTablesPerVnode | | **S** | | 每个vnode中能够创建的最大表个数 | | 1000000 | | +| 34 | minTablesPerVnode | YES | **S** | | 每个vnode中必须创建的最小表个数 | | 100 | | +| 35 | tableIncStepPerVnode | YES | **S** | | 每个vnode中超过最小表数后递增步长 | | 1000 | | +| 36 | cache | | **S** | MB | 内存块的大小 | | 16 | | +| 37 | blocks | | **S** | | 每个vnode(tsdb)中有多少cache大小的内存块。因此一个vnode的用的内存大小粗略为(cache * blocks) | | 6 | | +| 38 | days | | **S** | 天 | 数据文件存储数据的时间跨度 | | 10 | | +| 39 | keep | | **S** | 天 | 数据保留的天数 | | 3650 | | +| 40 | minRows | | **S** | | 文件块中记录的最小条数 | | 100 | | +| 41 | maxRows | | **S** | | 文件块中记录的最大条数 | | 4096 | | +| 42 | quorum | | **S** | | 异步写入成功所需应答之法定数 | 1-3 | 1 | | +| 43 | comp | | **S** | | 文件压缩标志位 | 0:关闭,1:一阶段压缩,2:两阶段压缩 | 2 | | +| 44 | walLevel | | **S** | | WAL级别 | 1:写wal, 但不执行fsync; 2:写wal, 而且执行fsync | 1 | | +| 45 | fsync | | **S** | 毫秒 | 当wal设置为2时,执行fsync的周期 | 最小为0,表示每次写入,立即执行fsync;最大为180000(三分钟) | 3000 | | +| 46 | replica | | **S** | | 副本个数 | 1-3 | 1 | | +| 47 | mqttHostName | YES | **S** | | mqtt uri | | | [mqtt://username:password@hostname:1883/taos/](mqtt://username:password@hostname:1883/taos/) | +| 48 | mqttPort | YES | **S** | | mqtt client name | | | 1883 | +| 49 | mqttTopic | YES | **S** | | | | | /test | +| 50 | compressMsgSize | | **S** | bytes | 客户端与服务器之间进行消息通讯过程中,对通讯的消息进行压缩的阈值。如果要压缩消息,建议设置为64330字节,即大于64330字节的消息体才进行压缩。 | `0 `表示对所有的消息均进行压缩 >0: 超过该值的消息才进行压缩 -1: 不压缩 | -1 | | +| 51 | maxSQLLength | | **C** | bytes | 单条SQL语句允许的最长限制 | 65480-1048576 | 65380 | | +| 52 | maxNumOfOrderedRes | | **SC** | | 支持超级表时间排序允许的最多记录数限制 | | 10万 | | +| 53 | timezone | | **SC** | | 时区 | | 从系统中动态获取当前的时区设置 | | +| 54 | locale | | **SC** | | 系统区位信息及编码格式 | | 系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 | | +| 55 | charset | | **SC** | | 字符集编码 | | 系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 | | +| 56 | maxShellConns | | **S** | | 一个dnode容许的连接数 | 10-50000000 | 5000 | | +| 57 | maxConnections | | **S** | | 一个数据库连接所容许的dnode连接数 | 1-100000 | 5000 | 实际测试下来,如果默认没有配,选 50 个 worker thread 会产生 Network unavailable | +| 58 | minimalLogDirGB | | **SC** | GB | 当日志文件夹的磁盘大小小于该值时,停止写日志 | | 0.1 | | +| 59 | minimalTmpDirGB | | **SC** | GB | 当日志文件夹的磁盘大小小于该值时,停止写临时文件 | | 0.1 | | +| 60 | minimalDataDirGB | | **S** | GB | 当日志文件夹的磁盘大小小于该值时,停止写时序数据 | | 0.1 | | +| 61 | mnodeEqualVnodeNum | | **S** | | 一个mnode等同于vnode消耗的个数 | | 4 | | +| 62 | http | | **S** | | 服务器内部的http服务开关。 | 0:关闭http服务, 1:激活http服务。 | 1 | | +| 63 | mqtt | YES | **S** | | 服务器内部的mqtt服务开关。 | 0:关闭mqtt服务, 1:激活mqtt服务。 | 0 | | +| 64 | monitor | | **S** | | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括CPU、内存、硬盘、网络带宽、HTTP请求量的监控记录,记录信息存储在`LOG`库中。 | 0:关闭监控服务, 1:激活监控服务。 | 0 | | +| 65 | httpEnableRecordSql | | **S** | | 内部使用,记录通过RESTFul接口,产生的SQL调用 | | 0 | 生成的文件(httpnote.0/httpnote.1),与服务端日志所在目录相同。 | +| 66 | httpMaxThreads | | **S** | | RESTFul接口的线程数 | | 2 | | +| 67 | telegrafUseFieldNum | YES | | | | | | | +| 68 | restfulRowLimit | | **S** | | RESTFul接口单次返回的记录条数 | | 10240 | 最大10,000,000 | +| 69 | numOfLogLines | | **SC** | | 单个日志文件允许的最大行数。 | | 10,000,000 | | +| 70 | asyncLog | | **SC** | | 日志写入模式 | 0:同步、1:异步 | 1 | | +| 71 | logKeepDays | | **SC** | 天 | 日志文件的最长保存时间 | | 0 | 大于0时,日志文件会被重命名为taosdlog.xxx,其中xxx为日志文件最后修改的时间戳。 | +| 72 | debugFlag | | **SC** | | 运行日志开关 | 131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志) | 131或135(不同模块有不同的默认值) | | +| 73 | mDebugFlag | | **S** | | 管理模块的日志开关 | 同上 | 135 | | +| 74 | dDebugFlag | | **SC** | | dnode模块的日志开关 | 同上 | 135 | | +| 75 | sDebugFlag | | **SC** | | sync模块的日志开关 | 同上 | 135 | | +| 76 | wDebugFlag | | **SC** | | wal模块的日志开关 | 同上 | 135 | | +| 77 | sdbDebugFlag | | **SC** | | sdb模块的日志开关 | 同上 | 135 | | +| 78 | rpcDebugFlag | | **SC** | | rpc模块的日志开关 | 同上 | | | +| 79 | tmrDebugFlag | | **SC** | | 定时器模块的日志开关 | 同上 | | | +| 80 | cDebugFlag | | **C** | | client模块的日志开关 | 同上 | | | +| 81 | jniDebugFlag | | **C** | | jni模块的日志开关 | 同上 | | | +| 82 | odbcDebugFlag | | **C** | | odbc模块的日志开关 | 同上 | | | +| 83 | uDebugFlag | | **SC** | | 共用功能模块的日志开关 | 同上 | | | +| 84 | httpDebugFlag | | **S** | | http模块的日志开关 | 同上 | | | +| 85 | mqttDebugFlag | | **S** | | mqtt模块的日志开关 | 同上 | | | +| 86 | monitorDebugFlag | | **S** | | 监控模块的日志开关 | 同上 | | | +| 87 | qDebugFlag | | **SC** | | 查询模块的日志开关 | 同上 | | | +| 88 | vDebugFlag | | **SC** | | vnode模块的日志开关 | 同上 | | | +| 89 | tsdbDebugFlag | | **S** | | TSDB模块的日志开关 | 同上 | | | +| 90 | cqDebugFlag | | **SC** | | 连续查询模块的日志开关 | 同上 | | | +| 91 | tscEnableRecordSql | | **C** | | 是否记录客户端sql语句到文件 | 0:否,1:是 | 0 | 生成的文件(tscnote-xxxx.0/tscnote-xxx.1,xxxx是pid),与客户端日志所在目录相同。 | +| 92 | enableCoreFile | | **SC** | | 是否开启服务crash时生成core文件 | 0:否,1:是 | 1 | 不同的启动方式,生成core文件的目录如下:1、systemctl start taosd启动:生成的core在根目录下;2、手动启动,就在taosd执行目录下。 | +| 93 | gitinfo | YES | **SC** | | | 1 | | | +| 94 | gitinfoofInternal | YES | **SC** | | | 2 | | | +| 95 | Buildinfo | YES | **SC** | | | 3 | | | +| 96 | version | YES | **SC** | | | 4 | | | +| 97 | | | | | | | | | +| 98 | maxBinaryDisplayWidth | | **C** | | Taos shell中binary 和 nchar字段的显示宽度上限,超过此限制的部分将被隐藏 | 5 - | 30 | 实际上限按以下规则计算:如果字段值的长度大于 maxBinaryDisplayWidth,则显示上限为 **字段名长度** 和 **maxBinaryDisplayWidth** 的较大者。否则,上限为 **字段名长度** 和 **字段值长度** 的较大者。可在 shell 中通过命令 set max_binary_display_width nn动态修改此选项 | +| 99 | queryBufferSize | | **S** | MB | 为所有并发查询占用保留的内存大小。 | | | 计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。(2.0.15 以前的版本中,此参数的单位是字节) | +| 100 | ratioOfQueryCores | | **S** | | 设置查询线程的最大数量。 | | | 最小值0 表示只有1个查询线程;最大值2表示最大建立2倍CPU核数的查询线程。默认为1,表示最大和CPU核数相等的查询线程。该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。 | +| 101 | update | | **S** | | 允许更新已存在的数据行 | 0 \| 1 | 0 | 从 2.0.8.0 版本开始 | +| 102 | cacheLast | | **S** | | 是否在内存中缓存子表的最近数据 | 0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能。 | 0 | 2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。 | +| 103 | numOfCommitThreads | YES | **S** | | 设置写入线程的最大数量 | | | | +| 104 | maxWildCardsLength | | **C** | bytes | 设定 LIKE 算子的通配符字符串允许的最大长度 | 0-16384 | 100 | 2.1.6.1 版本新增。 | **注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030到6042共13个端口,而且必须TCP和UDP都打开。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port)) 不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine提供如下存储相关的系统配置参数(既可以作为 create database 指令的参数,也可以写在 taos.cfg 配置文件中用来设定创建新数据库时所采用的默认值): -- days:一个数据文件存储数据的时间跨度。单位为天,默认值:10。 -- keep:数据库中数据保留的天数。单位为天,默认值:3650。(可通过 alter database 修改) -- minRows:文件块中记录的最小条数。单位为条,默认值:100。 -- maxRows:文件块中记录的最大条数。单位为条,默认值:4096。 -- comp:文件压缩标志位。0:关闭;1:一阶段压缩;2:两阶段压缩。默认值:2。(可通过 alter database 修改) -- wal:WAL级别。1:写wal,但不执行fsync;2:写wal, 而且执行fsync。默认值:1。(在 taos.cfg 中参数名需要写作 walLevel) -- fsync:当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。单位为毫秒,默认值:3000。 -- cache:内存块的大小。单位为兆字节(MB),默认值:16。 -- blocks:每个VNODE(TSDB)中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为(cache * blocks)。单位为块,默认值:4。(可通过 alter database 修改) -- replica:副本个数。取值范围:1-3,单位为个,默认值:1。(可通过 alter database 修改) -- quorum:多副本环境下指令执行的确认数要求。取值范围:1、2,单位为个,默认值:1。(可通过 alter database 修改) -- precision:时间戳精度标识。ms表示毫秒,us表示微秒,默认值:ms。(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。) -- cacheLast:是否在内存中缓存子表的最近数据。0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能。默认值:0。(可通过 alter database 修改)(从 2.1.2.0 版本开始此参数支持 0~3 的取值范围,在此之前取值只能是 [0, 1];而 2.0.11.0 之前的版本在 SQL 指令中不支持此参数。)(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。) -- update:是否允许更新。0:不允许;1:允许。默认值:0。 +| **#** | **配置参数名称** | **单位** | **含义** | **取值范围** | **缺省值** | +| ----- | ---------------- | -------- | ------------------------------------------------------------ | ------------------------------------------------ | ---------- | +| 1 | days | 天 | 一个数据文件存储数据的时间跨度 | | 10 | +| 2 | keep | 天 | (可通过 alter database 修改)数据库中数据保留的天数。 | 3650 | +| 3 | cache | MB | 内存块的大小 | | 16 | +| 4 | blocks | | (可通过 alter database 修改)每个 VNODE(TSDB)中有多少个 cache 大小的内存块。因此一个 VNODE 使用的内存大小粗略为(cache * blocks)。 | | 4 | +| 5 | quorum | | (可通过 alter database 修改)多副本环境下指令执行的确认数要求 | 1-2 | 1 | +| 6 | minRows | | 文件块中记录的最小条数 | | 100 | +| 7 | maxRows | | 文件块中记录的最大条数 | | 4096 | +| 8 | comp | | (可通过 alter database 修改)文件压缩标志位 | 0:关闭,1:一阶段压缩,2:两阶段压缩 | 2 | +| 9 | walLevel | | (作为 database 的参数时名为 wal;在 taos.cfg 中作为参数时需要写作 walLevel)WAL级别 | 1:写wal,但不执行fsync;2:写wal, 而且执行fsync | 1 | +| 10 | fsync | 毫秒 | 当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。 | | 3000 | +| 11 | replica | | (可通过 alter database 修改)副本个数 | 1-3 | 1 | +| 12 | precision | | 时间戳精度标识(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。) | ms 表示毫秒,us 表示微秒 | ms | +| 13 | update | | 是否允许更新 | 0:不允许;1:允许 | 0 | +| 14 | cacheLast | | (可通过 alter database 修改)是否在内存中缓存子表的最近数据(从 2.1.2.0 版本开始此参数支持 0~3 的取值范围,在此之前取值只能是 [0, 1];而 2.0.11.0 之前的版本在 SQL 指令中不支持此参数。)(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。) | 0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能 | 0 | 对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述SQL: -``` - create database demo days 10 cache 32 blocks 8 replica 3 update 1; +```mysql + CREATE DATABASE demo DAYS 10 CACHE 32 BLOCKS 8 REPLICA 3 UPDATE 1; ``` 该SQL创建了一个库demo, 每个数据文件存储10天数据,内存块为32兆字节,每个VNODE占用8个内存块,副本数为3,允许更新,而其他参数与系统配置完全一致。 +一个数据库创建成功后,仅部分参数可以修改并实时生效,其余参数不能修改: + +| **参数名** | **能否修改** | **范围** | **修改语法示例** | +| ----------- | ------------ | ------------------------------------------------------------ | ------------------------------------- | +| name | | | | +| create time | | | | +| ntables | | | | +| vgroups | | | | +| replica | **YES** | 在线dnode数目为1:1-1;2:1-2;>=3:1-3 | ALTER DATABASE REPLICA *n* | +| quorum | **YES** | 1-2 | ALTER DATABASE QUORUM *n* | +| days | | | | +| keep | **YES** | days-365000 | ALTER DATABASE KEEP *n* | +| cache | | | | +| blocks | **YES** | 3-1000 | ALTER DATABASE BLOCKS *n* | +| minrows | | | | +| maxrows | | | | +| wal | | | | +| fsync | | | | +| comp | **YES** | 0-2 | ALTER DATABASE COMP *n* | +| precision | | | | +| status | | | | +| update | | | | +| cachelast | **YES** | 0 \| 1 \| 2 \| 3 | ALTER DATABASE CACHELAST *n* | + +**说明:**在 2.1.3.0 版本之前,通过 ALTER DATABASE 语句修改这些参数后,需要重启服务器才能生效。 + TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数必须与已有集群的配置相同,否则不能成功加入到集群中。会进行校验的参数如下: - numOfMnodes:系统中管理节点个数。默认值:3。(2.0 版本从 2.0.20.11 开始、2.1 及以上版本从 2.1.6.0 开始,numOfMnodes 默认值改为 1。) @@ -172,7 +307,7 @@ ALTER DNODE alter dnode 1 debugFlag 135; ``` -## 客户端配置 +## 客户端及应用驱动配置 TDengine系统的前台交互客户端应用程序为taos,以及应用驱动,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。更多taos的使用方法请见帮助信息 `taos --help`。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。 @@ -182,15 +317,15 @@ TDengine系统的前台交互客户端应用程序为taos,以及应用驱动 taos -C 或 taos --dump-config ``` -客户端配置参数 +客户端及应用驱动配置参数列表及解释 - firstEp: taos启动时,主动连接的集群中第一个taosd实例的end point, 缺省值为 localhost:6030。 - secondEp: taos 启动时,如果 firstEp 连不上,将尝试连接 secondEp。 -- locale +- locale:系统区位信息及编码格式。 - 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 + 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。 TDengine为存储中文、日文、韩文等非ASCII编码的宽字符,提供一种专门的字段类型nchar。写入nchar字段的数据将统一采用UCS4-LE格式进行编码并发送到服务器。需要注意的是,编码正确性是客户端来保证。因此,如果用户想要正常使用nchar字段来存储诸如中文、日文、韩文等非ASCII字符,需要正确设置客户端的编码格式。 @@ -198,9 +333,9 @@ taos -C 或 taos --dump-config 在 Linux 中 locale 的命名规则为: <语言>\_<地区>.<字符集编码> 如:zh_CN.UTF-8,zh代表中文,CN代表大陆地区,UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与 Mac OSX 系统可以通过设置locale来确定系统的字符编码,由于Windows使用的locale中不是POSIX标准的locale格式,因此在Windows下需要采用另一个配置参数charset来指定字符编码。在Linux 系统中也可以使用charset来指定字符编码。 -- charset +- charset:字符集编码。 - 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 + 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置。 如果配置文件中不设置charset,在Linux系统中,taos在启动时候,自动读取系统当前的locale信息,并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败,则尝试读取charset配置,如果读取charset配置也失败,则中断启动过程。 @@ -260,7 +395,7 @@ taos -C 或 taos --dump-config - maxBinaryDisplayWidth - Shell中binary 和 nchar字段的显示宽度上限,超过此限制的部分将被隐藏。默认值:30。可在 shell 中通过命令 set max_binary_display_width nn 动态修改此选项。 + Shell中 binary 和 nchar 字段的显示宽度上限,超过此限制的部分将被隐藏。默认值:30。可在 taos shell 中通过命令 set max_binary_display_width nn 动态修改此选项。 ## 用户管理 @@ -315,7 +450,7 @@ TDengine也支持在shell对已存在的表从CSV文件中进行数据导入。C ```mysql insert into tb1 file 'path/data.csv'; ``` -注意:如果CSV文件首行存在描述信息,请手动删除后再导入 +**注意:如果CSV文件首行存在描述信息,请手动删除后再导入。如某列为空,填NULL,无引号。** 例如,现在存在一个子表d1001, 其表结构如下: @@ -343,7 +478,7 @@ taos> DESCRIBE d1001 '2018-10-11 06:38:05.000',17.30000,219,0.32000 '2018-10-12 06:38:05.000',18.30000,219,0.31000 ``` -那么可以用如下命令导入数据 +那么可以用如下命令导入数据: ```mysql taos> insert into d1001 file '~/data.csv'; @@ -360,7 +495,7 @@ TDengine提供了方便的数据库导入导出工具taosdump。用户可以将t **按表导出CSV文件** -如果用户需要导出一个表或一个STable中的数据,可在shell中运行 +如果用户需要导出一个表或一个STable中的数据,可在taos shell中运行: ```mysql select * from >> data.csv; @@ -370,7 +505,9 @@ select * from >> data.csv; **用taosdump导出数据** -TDengine提供了方便的数据库导出工具taosdump。用户可以根据需要选择导出所有数据库、一个数据库或者数据库中的一张表,所有数据或一时间段的数据,甚至仅仅表的定义。具体使用方法,请参见博客:[TDengine DUMP工具使用指南](https://www.taosdata.com/blog/2020/03/09/1334.html) +利用taosdump,用户可以根据需要选择导出所有数据库、一个数据库或者数据库中的一张表,所有数据或一时间段的数据,甚至仅仅表的定义。 + +具体使用方法,请参见博客:[TDengine DUMP工具使用指南](https://www.taosdata.com/blog/2020/03/09/1334.html)。 ## 系统连接、任务查询管理 @@ -435,46 +572,100 @@ COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会 安装TDengine后,默认会在操作系统中生成下列目录或文件: -| 目录/文件 | 说明 | -| ------------------------- | :----------------------------------------------------------- | +| **目录/文件** | **说明** | +| ------------------------- | ------------------------------------------------------------ | | /usr/local/taos/bin | TDengine可执行文件目录。其中的执行文件都会软链接到/usr/bin目录下。 | | /usr/local/taos/connector | TDengine各种连接器目录。 | | /usr/local/taos/driver | TDengine动态链接库目录。会软链接到/usr/lib目录下。 | | /usr/local/taos/examples | TDengine各种语言应用示例目录。 | | /usr/local/taos/include | TDengine对外提供的C语言接口的头文件。 | | /etc/taos/taos.cfg | TDengine默认[配置文件] | -| /var/lib/taos | TDengine默认数据文件目录,可通过[配置文件]修改位置. | -| /var/log/taos | TDengine默认日志文件目录,可通过[配置文件]修改位置 | +| /var/lib/taos | TDengine默认数据文件目录。可通过[配置文件]修改位置。 | +| /var/log/taos | TDengine默认日志文件目录。可通过[配置文件]修改位置。 | **可执行文件** TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下。其中包括: -- _taosd_:TDengine服务端可执行文件 -- _taos_: TDengine Shell可执行文件 -- _taosdump_:数据导入导出工具 -- remove.sh:卸载TDengine的脚本, 请谨慎执行,链接到/usr/bin目录下的rmtaos命令。会删除TDengine的安装目录/usr/local/taos,但会保留/etc/taos、/var/lib/taos、/var/log/taos。 +- *taosd*:TDengine服务端可执行文件 +- *taos*:TDengine Shell可执行文件 +- *taosdump*:数据导入导出工具 +- *taosdemo*:TDengine测试工具 +- remove.sh:卸载TDengine的脚本,请谨慎执行,链接到/usr/bin目录下的**rmtaos**命令。会删除TDengine的安装目录/usr/local/taos,但会保留/etc/taos、/var/lib/taos、/var/log/taos。 您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录。 +## TDengine 的启动、停止、卸载 + +TDengine 使用 Linux 系统的 systemd/systemctl/service 来管理系统的启动和、停止、重启操作。TDengine 的服务进程是 taosd,默认情况下 TDengine 在系统启动后将自动启动。DBA 可以通过 systemd/systemctl/service 手动操作停止、启动、重新启动服务。 + +以 systemctl 为例,命令如下: + +- 启动服务进程:`systemctl start taosd` + +- 停止服务进程:`systemctl stop taosd` + +- 重启服务进程:`systemctl restart taosd` + +- 查看服务状态:`systemctl status taosd` + +如果服务进程处于活动状态,则 status 指令会显示如下的相关信息: +``` +...... + +Active: active (running) + +...... +``` + +如果后台服务进程处于停止状态,则 status 指令会显示如下的相关信息: +``` +...... + +Active: inactive (dead) + +...... +``` + +卸载 TDengine,只需要执行如下命令 +``` +rmtaos +``` + +**警告:执行该命令后,TDengine 程序将被完全删除,务必谨慎使用。** + ## TDengine参数限制与保留关键字 +**名称命名规则** + +1. 合法字符:英文字符、数字和下划线 +2. 允许英文字符或下划线开头,不允许以数字开头 +3. 不区分大小写 + +**密码合法字符集** + +`[a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/]` + +去掉了 ```‘“`\``` (单双引号、撇号、反斜杠、空格) + - 数据库名:不能包含“.”以及特殊字符,不能超过 32 个字符 -- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符 +- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过 192 个字符,每行数据最大长度 16k 个字符 - 表的列名:不能包含特殊字符,不能超过 64 个字符 - 数据库名、表名、列名,都不能以数字开头,合法的可用字符集是“英文字符、数字和下划线” -- 表的列数:不能超过 1024 列 +- 表的列数:不能超过 1024 列,最少需要 2 列,第一列必须是时间戳 - 记录的最大长度:包括时间戳 8 byte,不能超过 16KB(每个 BINARY/NCHAR 类型的列还会额外占用 2 个 byte 的存储位置) -- 单条 SQL 语句默认最大字符串长度:65480 byte +- 单条 SQL 语句默认最大字符串长度:65480 byte,但可通过系统配置参数 maxSQLLength 修改,最长可配置为 1048576 byte - 数据库副本数:不能超过 3 - 用户名:不能超过 23 个 byte - 用户密码:不能超过 15 个 byte -- 标签(Tags)数量:不能超过 128 个 +- 标签(Tags)数量:不能超过 128 个,可以 0 个 - 标签的总长度:不能超过 16K byte - 记录条数:仅受存储空间限制 - 表的个数:仅受节点个数限制 - 库的个数:仅受节点个数限制 - 单个库上虚拟节点个数:不能超过 64 个 +- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 +- SELECT 语句的查询结果,最多允许返回 1024 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错。 目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable 名、数据列名及标签列名等。这些关键字列表如下: @@ -519,3 +710,102 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下 | CONNS | ID | NOTNULL | STABLE | WAL | | COPY | IF | NOW | STABLES | WHERE | +## 诊断及其他 + +#### 网络连接诊断 + +当出现客户端应用无法访问服务端时,需要确认客户端与服务端之间网络的各端口连通情况,以便有针对性地排除故障。 + +目前网络连接诊断支持在:Linux 与 Linux,Linux 与 Windows 之间进行诊断测试。 + +诊断步骤: + +1. 如拟诊断的端口范围与服务器 taosd 实例的端口范围相同,须先停掉 taosd 实例 +2. 服务端命令行输入:`taos -n server -P ` 以服务端身份启动对端口 port 为基准端口的监听 +3. 客户端命令行输入:`taos -n client -h -P ` 以客户端身份启动对指定的服务器、指定的端口发送测试包 + +服务端运行正常的话会输出以下信息 + +```bash +# taos -n server -P 6000 +12/21 14:50:13.522509 0x7f536f455200 UTL work as server, host:172.27.0.7 startPort:6000 endPort:6011 pkgLen:1000 + +12/21 14:50:13.522659 0x7f5352242700 UTL TCP server at port:6000 is listening +12/21 14:50:13.522727 0x7f5351240700 UTL TCP server at port:6001 is listening +... +... +... +12/21 14:50:13.523954 0x7f5342fed700 UTL TCP server at port:6011 is listening +12/21 14:50:13.523989 0x7f53437ee700 UTL UDP server at port:6010 is listening +12/21 14:50:13.524019 0x7f53427ec700 UTL UDP server at port:6011 is listening +12/21 14:50:22.192849 0x7f5352242700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6000 +12/21 14:50:22.192993 0x7f5352242700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6000 +12/21 14:50:22.237082 0x7f5351a41700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6000 +12/21 14:50:22.237203 0x7f5351a41700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6000 +12/21 14:50:22.237450 0x7f5351240700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6001 +12/21 14:50:22.237576 0x7f5351240700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6001 +12/21 14:50:22.281038 0x7f5350a3f700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6001 +12/21 14:50:22.281141 0x7f5350a3f700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6001 +... +... +... +12/21 14:50:22.677443 0x7f5342fed700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6011 +12/21 14:50:22.677576 0x7f5342fed700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6011 +12/21 14:50:22.721144 0x7f53427ec700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6011 +12/21 14:50:22.721261 0x7f53427ec700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6011 +``` + +客户端运行正常会输出以下信息: + +```bash +# taos -n client -h 172.27.0.7 -P 6000 +12/21 14:50:22.192434 0x7fc95d859200 UTL work as client, host:172.27.0.7 startPort:6000 endPort:6011 pkgLen:1000 + +12/21 14:50:22.192472 0x7fc95d859200 UTL server ip:172.27.0.7 is resolved from host:172.27.0.7 +12/21 14:50:22.236869 0x7fc95d859200 UTL successed to test TCP port:6000 +12/21 14:50:22.237215 0x7fc95d859200 UTL successed to test UDP port:6000 +... +... +... +12/21 14:50:22.676891 0x7fc95d859200 UTL successed to test TCP port:6010 +12/21 14:50:22.677240 0x7fc95d859200 UTL successed to test UDP port:6010 +12/21 14:50:22.720893 0x7fc95d859200 UTL successed to test TCP port:6011 +12/21 14:50:22.721274 0x7fc95d859200 UTL successed to test UDP port:6011 +``` + +仔细阅读打印出来的错误信息,可以帮助管理员找到原因,以解决问题。 + +#### 启动状态及RPC诊断 + +`taos -n startup -h ` + +判断 taosd 服务端是否成功启动,是数据库管理员经常遇到的一种情形。特别当若干台服务器组成集群时,判断每个服务端实例是否成功启动就会是一个重要问题。除检索 taosd 服务端日志文件进行问题定位、分析外,还可以通过 `taos -n startup -h ` 来诊断一个 taosd 进程的启动状态。 + +针对多台服务器组成的集群,当服务启动过程耗时较长时,可通过该命令行来诊断每台服务器的 taosd 实例的启动状态,以准确定位问题。 + +`taos -n rpc -h ` + +该命令用来诊断已经启动的 taosd 实例的端口是否可正常访问。如果 taosd 程序异常或者失去响应,可以通过 `taos -n rpc -h ` 来发起一个与指定 fqdn 的 rpc 通信,看看 taosd 是否能收到,以此来判定是网络问题还是 taosd 程序异常问题。 + +#### sync 及 arbitrator 诊断 + +``` +taos -n sync -P 6040 -h +taos -n sync -P 6042 -h +``` + +用来诊断 sync 端口是否工作正常,判断服务端 sync 模块是否成功工作。另外,-P 6042 用来诊断 arbitrator 是否配置正常,判断指定服务器的 arbitrator 是否能正常工作。 + +#### 服务端日志 + +taosd 服务端日志文件标志位 debugflag 默认为 131,在 debug 时往往需要将其提升到 135 或 143 。 + +一旦设定为 135 或 143,日志文件增长很快,特别是写入、查询请求量较大时,增长速度惊人。如合并保存日志,很容易把日志内的关键信息(如配置信息、错误信息等)冲掉。为此,服务端将重要信息日志与其他日志分开存放: + +- taosinfo 存放重要信息日志 +- taosdlog 存放其他日志 + +其中,taosinfo 日志文件最大长度由 numOfLogLines 来进行配置,一个 taosd 实例最多保留两个文件。 + +taosd 服务端日志采用异步落盘写入机制,优点是可以避免硬盘写入压力太大,对性能造成很大影响。缺点是,在极端情况下,存在少量日志行数丢失的可能。 + diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index ed5c282da1..6a53423e9b 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -206,7 +206,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 显示当前数据库下的所有数据表信息。 - 说明:可在like中使用通配符进行名称的匹配,这一通配符字符串最长不能超过24字节。 + 说明:可在 like 中使用通配符进行名称的匹配,这一通配符字符串最长不能超过 20 字节。( 从 2.1.6.1 版本开始,通配符字符串的长度放宽到了 100 字节,并可以通过 taos.cfg 中的 maxWildCardsLength 参数来配置这一长度限制。但不建议使用太长的通配符字符串,将有可能严重影响 LIKE 操作的执行性能。) 通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。 @@ -435,6 +435,17 @@ INSERT INTO INSERT INTO d1001 FILE '/tmp/csvfile.csv'; ``` +- **插入来自文件的数据记录,并自动建表** + 从 2.1.5.0 版本开始,支持在插入来自 CSV 文件的数据时,以超级表为模板来自动创建不存在的数据表。例如: + ```mysql + INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile.csv'; + ``` + 也可以在一条语句中向多个表以自动建表的方式插入记录。例如: + ```mysql + INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) FILE '/tmp/csvfile_21001.csv' + d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv'; + ``` + **历史记录写入**:可使用IMPORT或者INSERT命令,IMPORT的语法,功能与INSERT完全一样。 **说明:**针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分 SQL 仍会执行。下面的 SQL 中,INSERT 语句是无效的,但是 d1001 仍会被创建。 @@ -942,6 +953,8 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ### 选择函数 +在使用所有的选择函数的时候,可以同时指定输出 ts 列或标签列(包括 tbname),这样就可以方便地知道被选出的值是源于哪个数据行的。 + - **MIN** ```mysql SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]; @@ -1215,6 +1228,37 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 Query OK, 1 row(s) in set (0.001042s) ``` +- **INTERP** + ```mysql + SELECT INTERP(field_name) FROM { tb_name | stb_name } WHERE ts='timestamp' [FILL ({ VALUE | PREV | NULL | LINEAR})]; + ``` + 功能说明:返回表/超级表的指定时间截面、指定字段的记录。 + + 返回结果数据类型:同应用的字段。 + + 应用字段:所有字段。 + + 适用于:**表、超级表**。 + + 说明:(从 2.0.15.0 版本开始新增此函数)INTERP 必须指定时间断面,如果该时间断面不存在直接对应的数据,那么会根据 FILL 参数的设定进行插值。其中,条件语句里面可以附带更多的筛选条件,例如标签、tbname。 + + 限制:INTERP 目前不支持 FILL(NEXT)。 + + 示例: + ```mysql + taos> select interp(*) from meters where ts='2017-7-14 10:42:00.005' fill(prev); + interp(ts) | interp(f1) | interp(f2) | interp(f3) | + ==================================================================== + 2017-07-14 10:42:00.005 | 5 | 9 | 6 | + Query OK, 1 row(s) in set (0.002912s) + + taos> select interp(*) from meters where tbname in ('t1') and ts='2017-7-14 10:42:00.005' fill(prev); + interp(ts) | interp(f1) | interp(f2) | interp(f3) | + ==================================================================== + 2017-07-14 10:42:00.005 | 5 | 6 | 7 | + Query OK, 1 row(s) in set (0.002005s) + ``` + ### 计算函数 - **DIFF** diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 42550b3339..f2d25c1e84 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -342,7 +342,7 @@ STableMeta* createSuperTableMeta(STableMetaMsg* pChild); uint32_t tscGetTableMetaSize(STableMeta* pTableMeta); CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta); uint32_t tscGetTableMetaMaxSize(); -int32_t tscCreateTableMetaFromSTableMeta(STableMeta** pChild, const char* name, size_t *tableMetaCapacity); +int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, size_t *tableMetaCapacity, STableMeta **ppStable); STableMeta* tscTableMetaDup(STableMeta* pTableMeta); SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index ac5adcbbb4..8d579b375a 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -84,9 +84,14 @@ typedef struct SParamInfo { } SParamInfo; typedef struct SBoundColumn { - bool hasVal; // denote if current column has bound or not - int32_t offset; // all column offset value + int32_t offset; // all column offset value + int32_t toffset; // first part offset for SDataRow TODO: get offset from STSchema on future + uint8_t valStat; // denote if current column bound or not(0 means has val, 1 means no val) } SBoundColumn; +typedef enum { + VAL_STAT_HAS = 0x0, // 0 means has val + VAL_STAT_NONE = 0x01, // 1 means no val +} EValStat; typedef struct { uint16_t schemaColIdx; @@ -99,32 +104,106 @@ typedef enum _COL_ORDER_STATUS { ORDER_STATUS_ORDERED = 1, ORDER_STATUS_DISORDERED = 2, } EOrderStatus; - typedef struct SParsedDataColInfo { int16_t numOfCols; int16_t numOfBound; - int32_t * boundedColumns; // bounded column idx according to schema + uint16_t flen; // TODO: get from STSchema + uint16_t allNullLen; // TODO: get from STSchema + uint16_t extendedVarLen; + int32_t * boundedColumns; // bound column idx according to schema SBoundColumn * cols; SBoundIdxInfo *colIdxInfo; - int8_t orderStatus; // bounded columns: + int8_t orderStatus; // bound columns } SParsedDataColInfo; -#define IS_DATA_COL_ORDERED(s) ((s) == (int8_t)ORDER_STATUS_ORDERED) +#define IS_DATA_COL_ORDERED(spd) ((spd->orderStatus) == (int8_t)ORDER_STATUS_ORDERED) typedef struct { - SSchema * pSchema; - int16_t sversion; - int32_t flen; - uint16_t nCols; - void * buf; - void * pDataBlock; - SSubmitBlk *pSubmitBlk; + int32_t dataLen; // len of SDataRow + int32_t kvLen; // len of SKVRow +} SMemRowInfo; +typedef struct { + uint8_t memRowType; + uint8_t compareStat; // 0 unknown, 1 need compare, 2 no need + TDRowTLenT dataRowInitLen; + TDRowTLenT kvRowInitLen; + SMemRowInfo *rowInfo; } SMemRowBuilder; -typedef struct { - TDRowLenT allNullLen; -} SMemRowHelper; +typedef enum { + ROW_COMPARE_UNKNOWN = 0, + ROW_COMPARE_NEED = 1, + ROW_COMPARE_NO_NEED = 2, +} ERowCompareStat; +int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec); + +int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint32_t nCols, uint32_t nBoundCols, + int32_t allNullLen); +void destroyMemRowBuilder(SMemRowBuilder *pBuilder); + +/** + * @brief + * + * @param memRowType + * @param spd + * @param idx the absolute bound index of columns + * @return FORCE_INLINE + */ +static FORCE_INLINE void tscGetMemRowAppendInfo(SSchema *pSchema, uint8_t memRowType, SParsedDataColInfo *spd, + int32_t idx, int32_t *toffset, int16_t *colId) { + int32_t schemaIdx = 0; + if (IS_DATA_COL_ORDERED(spd)) { + schemaIdx = spd->boundedColumns[idx]; + if (isDataRowT(memRowType)) { + *toffset = (spd->cols + schemaIdx)->toffset; // the offset of firstPart + } else { + *toffset = idx * sizeof(SColIdx); // the offset of SColIdx + } + } else { + ASSERT(idx == (spd->colIdxInfo + idx)->boundIdx); + schemaIdx = (spd->colIdxInfo + idx)->schemaColIdx; + if (isDataRowT(memRowType)) { + *toffset = (spd->cols + schemaIdx)->toffset; + } else { + *toffset = ((spd->colIdxInfo + idx)->finalIdx) * sizeof(SColIdx); + } + } + *colId = pSchema[schemaIdx].colId; +} + +/** + * @brief Applicable to consume by multi-columns + * + * @param row + * @param value + * @param isCopyVarData In some scenario, the varVal is copied to row directly before calling tdAppend***ColVal() + * @param colId + * @param colType + * @param idx index in SSchema + * @param pBuilder + * @param spd + * @return FORCE_INLINE + */ +static FORCE_INLINE void tscAppendMemRowColVal(SMemRow row, const void *value, bool isCopyVarData, int16_t colId, + int8_t colType, int32_t toffset, SMemRowBuilder *pBuilder, + int32_t rowNum) { + tdAppendMemRowColVal(row, value, isCopyVarData, colId, colType, toffset); + if (pBuilder->compareStat == ROW_COMPARE_NEED) { + SMemRowInfo *pRowInfo = pBuilder->rowInfo + rowNum; + tdGetColAppendDeltaLen(value, colType, &pRowInfo->dataLen, &pRowInfo->kvLen); + } +} + +// Applicable to consume by one row +static FORCE_INLINE void tscAppendMemRowColValEx(SMemRow row, const void *value, bool isCopyVarData, int16_t colId, + int8_t colType, int32_t toffset, int32_t *dataLen, int32_t *kvLen, + uint8_t compareStat) { + tdAppendMemRowColVal(row, value, isCopyVarData, colId, colType, toffset); + if (compareStat == ROW_COMPARE_NEED) { + tdGetColAppendDeltaLen(value, colType, dataLen, kvLen); + } +} typedef struct STableDataBlocks { SName tableName; int8_t tsSource; // where does the UNIX timestamp come from, server or client @@ -146,7 +225,7 @@ typedef struct STableDataBlocks { uint32_t numOfAllocedParams; uint32_t numOfParams; SParamInfo * params; - SMemRowHelper rowHelper; + SMemRowBuilder rowBuilder; } STableDataBlocks; typedef struct { @@ -435,8 +514,398 @@ int16_t getNewResColId(SSqlCmd* pCmd); int32_t schemaIdxCompar(const void *lhs, const void *rhs); int32_t boundIdxCompar(const void *lhs, const void *rhs); -int initSMemRowHelper(SMemRowHelper *pHelper, SSchema *pSSchema, uint16_t nCols, uint16_t allNullColsLen); -int32_t getExtendedRowSize(STableComInfo *tinfo); +static FORCE_INLINE int32_t getExtendedRowSize(STableDataBlocks *pBlock) { + ASSERT(pBlock->rowSize == pBlock->pTableMeta->tableInfo.rowSize); + return pBlock->rowSize + TD_MEM_ROW_DATA_HEAD_SIZE + pBlock->boundColumnInfo.extendedVarLen; +} + +static FORCE_INLINE void checkAndConvertMemRow(SMemRow row, int32_t dataLen, int32_t kvLen) { + if (isDataRow(row)) { + if (kvLen < (dataLen * KVRatioConvert)) { + memRowSetConvert(row); + } + } else if (kvLen > dataLen) { + memRowSetConvert(row); + } +} + +static FORCE_INLINE void initSMemRow(SMemRow row, uint8_t memRowType, STableDataBlocks *pBlock, int16_t nBoundCols) { + memRowSetType(row, memRowType); + if (isDataRowT(memRowType)) { + dataRowSetVersion(memRowDataBody(row), pBlock->pTableMeta->sversion); + dataRowSetLen(memRowDataBody(row), (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pBlock->boundColumnInfo.flen)); + } else { + ASSERT(nBoundCols > 0); + memRowSetKvVersion(row, pBlock->pTableMeta->sversion); + kvRowSetNCols(memRowKvBody(row), nBoundCols); + kvRowSetLen(memRowKvBody(row), (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nBoundCols)); + } +} +/** + * TODO: Move to tdataformat.h and refactor when STSchema available. + * - fetch flen and toffset from STSChema and remove param spd + */ +static FORCE_INLINE void convertToSDataRow(SMemRow dest, SMemRow src, SSchema *pSchema, int nCols, + SParsedDataColInfo *spd) { + ASSERT(isKvRow(src)); + SKVRow kvRow = memRowKvBody(src); + SDataRow dataRow = memRowDataBody(dest); + + memRowSetType(dest, SMEM_ROW_DATA); + dataRowSetVersion(dataRow, memRowKvVersion(src)); + dataRowSetLen(dataRow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + spd->flen)); + + int32_t kvIdx = 0; + for (int i = 0; i < nCols; ++i) { + SSchema *schema = pSchema + i; + void * val = tdGetKVRowValOfColEx(kvRow, schema->colId, &kvIdx); + tdAppendDataColVal(dataRow, val != NULL ? val : getNullValue(schema->type), true, schema->type, + (spd->cols + i)->toffset); + } +} + +// TODO: Move to tdataformat.h and refactor when STSchema available. +static FORCE_INLINE void convertToSKVRow(SMemRow dest, SMemRow src, SSchema *pSchema, int nCols, int nBoundCols, + SParsedDataColInfo *spd) { + ASSERT(isDataRow(src)); + + SDataRow dataRow = memRowDataBody(src); + SKVRow kvRow = memRowKvBody(dest); + + memRowSetType(dest, SMEM_ROW_KV); + memRowSetKvVersion(kvRow, dataRowVersion(dataRow)); + kvRowSetNCols(kvRow, nBoundCols); + kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nBoundCols)); + + int32_t toffset = 0, kvOffset = 0; + for (int i = 0; i < nCols; ++i) { + if ((spd->cols + i)->valStat == VAL_STAT_HAS) { + SSchema *schema = pSchema + i; + toffset = (spd->cols + i)->toffset; + void *val = tdGetRowDataOfCol(dataRow, schema->type, toffset + TD_DATA_ROW_HEAD_SIZE); + tdAppendKvColVal(kvRow, val, true, schema->colId, schema->type, kvOffset); + kvOffset += sizeof(SColIdx); + } + } +} + +// TODO: Move to tdataformat.h and refactor when STSchema available. +static FORCE_INLINE void convertSMemRow(SMemRow dest, SMemRow src, STableDataBlocks *pBlock) { + STableMeta * pTableMeta = pBlock->pTableMeta; + STableComInfo tinfo = tscGetTableInfo(pTableMeta); + SSchema * pSchema = tscGetTableSchema(pTableMeta); + SParsedDataColInfo *spd = &pBlock->boundColumnInfo; + + ASSERT(dest != src); + + if (isDataRow(src)) { + // TODO: Can we use pBlock -> numOfParam directly? + ASSERT(spd->numOfBound > 0); + convertToSKVRow(dest, src, pSchema, tinfo.numOfColumns, spd->numOfBound, spd); + } else { + convertToSDataRow(dest, src, pSchema, tinfo.numOfColumns, spd); + } +} + +static bool isNullStr(SStrToken *pToken) { + return (pToken->type == TK_NULL) || ((pToken->type == TK_STRING) && (pToken->n != 0) && + (strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0)); +} + +static FORCE_INLINE int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) { + errno = 0; + *value = strtold(pToken->z, endPtr); + + // not a valid integer number, return error + if ((*endPtr - pToken->z) != pToken->n) { + return TK_ILLEGAL; + } + + return pToken->type; +} + +static uint8_t TRUE_VALUE = (uint8_t)TSDB_TRUE; +static uint8_t FALSE_VALUE = (uint8_t)TSDB_FALSE; + +static FORCE_INLINE int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, SMemRow row, char *msg, char **str, + bool primaryKey, int16_t timePrec, int32_t toffset, int16_t colId, + int32_t *dataLen, int32_t *kvLen, uint8_t compareStat) { + int64_t iv; + int32_t ret; + char * endptr = NULL; + + if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) { + return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z); + } + + switch (pSchema->type) { + case TSDB_DATA_TYPE_BOOL: { // bool + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) { + if (strncmp(pToken->z, "true", pToken->n) == 0) { + tscAppendMemRowColValEx(row, &TRUE_VALUE, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } else if (strncmp(pToken->z, "false", pToken->n) == 0) { + tscAppendMemRowColValEx(row, &FALSE_VALUE, true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z); + } + } else if (pToken->type == TK_INTEGER) { + iv = strtoll(pToken->z, NULL, 10); + tscAppendMemRowColValEx(row, ((iv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset, + dataLen, kvLen, compareStat); + } else if (pToken->type == TK_FLOAT) { + double dv = strtod(pToken->z, NULL); + tscAppendMemRowColValEx(row, ((dv == 0) ? &FALSE_VALUE : &TRUE_VALUE), true, colId, pSchema->type, toffset, + dataLen, kvLen, compareStat); + } else { + return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z); + } + } + break; + } + + case TSDB_DATA_TYPE_TINYINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z); + } else if (!IS_VALID_TINYINT(iv)) { + return tscInvalidOperationMsg(msg, "data overflow", pToken->z); + } + + uint8_t tmpVal = (uint8_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + + case TSDB_DATA_TYPE_UTINYINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z); + } else if (!IS_VALID_UTINYINT(iv)) { + return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z); + } + + uint8_t tmpVal = (uint8_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + + case TSDB_DATA_TYPE_SMALLINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z); + } else if (!IS_VALID_SMALLINT(iv)) { + return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z); + } + + int16_t tmpVal = (int16_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + + case TSDB_DATA_TYPE_USMALLINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z); + } else if (!IS_VALID_USMALLINT(iv)) { + return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z); + } + + uint16_t tmpVal = (uint16_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + + case TSDB_DATA_TYPE_INT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid int data", pToken->z); + } else if (!IS_VALID_INT(iv)) { + return tscInvalidOperationMsg(msg, "int data overflow", pToken->z); + } + + int32_t tmpVal = (int32_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + + case TSDB_DATA_TYPE_UINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z); + } else if (!IS_VALID_UINT(iv)) { + return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z); + } + + uint32_t tmpVal = (uint32_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + + case TSDB_DATA_TYPE_BIGINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z); + } else if (!IS_VALID_BIGINT(iv)) { + return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z); + } + + tscAppendMemRowColValEx(row, &iv, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + break; + + case TSDB_DATA_TYPE_UBIGINT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); + if (ret != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z); + } else if (!IS_VALID_UBIGINT((uint64_t)iv)) { + return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z); + } + + uint64_t tmpVal = (uint64_t)iv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + break; + + case TSDB_DATA_TYPE_FLOAT: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + double dv; + if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { + return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); + } + + if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) || + isnan(dv)) { + return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); + } + + float tmpVal = (float)dv; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + break; + + case TSDB_DATA_TYPE_DOUBLE: + if (isNullStr(pToken)) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + double dv; + if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { + return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); + } + + if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) { + return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); + } + + tscAppendMemRowColValEx(row, &dv, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + break; + + case TSDB_DATA_TYPE_BINARY: + // binary data cannot be null-terminated char string, otherwise the last char of the string is lost + if (pToken->type == TK_NULL) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { // too long values will return invalid sql, not be truncated automatically + if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor + return tscInvalidOperationMsg(msg, "string data overflow", pToken->z); + } + // STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n); + char *rowEnd = memRowEnd(row); + STR_WITH_SIZE_TO_VARSTR(rowEnd, pToken->z, pToken->n); + tscAppendMemRowColValEx(row, rowEnd, false, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + break; + + case TSDB_DATA_TYPE_NCHAR: + if (pToken->type == TK_NULL) { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } else { + // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long' + int32_t output = 0; + char * rowEnd = memRowEnd(row); + if (!taosMbsToUcs4(pToken->z, pToken->n, (char *)varDataVal(rowEnd), pSchema->bytes - VARSTR_HEADER_SIZE, + &output)) { + char buf[512] = {0}; + snprintf(buf, tListLen(buf), "%s", strerror(errno)); + return tscInvalidOperationMsg(msg, buf, pToken->z); + } + varDataSetLen(rowEnd, output); + tscAppendMemRowColValEx(row, rowEnd, false, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + break; + + case TSDB_DATA_TYPE_TIMESTAMP: { + if (pToken->type == TK_NULL) { + if (primaryKey) { + // When building SKVRow primaryKey, we should not skip even with NULL value. + int64_t tmpVal = 0; + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } else { + tscAppendMemRowColValEx(row, getNullValue(pSchema->type), true, colId, pSchema->type, toffset, dataLen, kvLen, + compareStat); + } + } else { + int64_t tmpVal; + if (tsParseTime(pToken, &tmpVal, str, msg, timePrec) != TSDB_CODE_SUCCESS) { + return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z); + } + tscAppendMemRowColValEx(row, &tmpVal, true, colId, pSchema->type, toffset, dataLen, kvLen, compareStat); + } + + break; + } + } + + return TSDB_CODE_SUCCESS; +} #ifdef __cplusplus } diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 73e4f898c8..89e3832007 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -38,43 +38,60 @@ enum { TSDB_USE_CLI_TS = 1, }; -static uint8_t TRUE_VALUE = (uint8_t)TSDB_TRUE; -static uint8_t FALSE_VALUE = (uint8_t)TSDB_FALSE; - static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t *numOfRows); static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDataColInfo *pColInfo, SSchema *pSchema, char *str, char **end); - -int32_t getExtendedRowSize(STableComInfo *tinfo) { - return tinfo->rowSize + PAYLOAD_HEADER_LEN + PAYLOAD_COL_HEAD_LEN * tinfo->numOfColumns; -} -int initSMemRowHelper(SMemRowHelper *pHelper, SSchema *pSSchema, uint16_t nCols, uint16_t allNullColsLen) { - pHelper->allNullLen = allNullColsLen; // TODO: get allNullColsLen when creating or altering table meta - if (pHelper->allNullLen == 0) { - for (uint16_t i = 0; i < nCols; ++i) { - uint8_t type = pSSchema[i].type; - int32_t typeLen = TYPE_BYTES[type]; - pHelper->allNullLen += typeLen; - if (TSDB_DATA_TYPE_BINARY == type) { - pHelper->allNullLen += (VARSTR_HEADER_SIZE + CHAR_BYTES); - } else if (TSDB_DATA_TYPE_NCHAR == type) { - int len = VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE; - pHelper->allNullLen += len; - } +int initMemRowBuilder(SMemRowBuilder *pBuilder, uint32_t nRows, uint32_t nCols, uint32_t nBoundCols, + int32_t allNullLen) { + ASSERT(nRows >= 0 && nCols > 0 && (nBoundCols <= nCols)); + if (nRows > 0) { + // already init(bind multiple rows by single column) + if (pBuilder->compareStat == ROW_COMPARE_NEED && (pBuilder->rowInfo != NULL)) { + return TSDB_CODE_SUCCESS; } } - return 0; -} -static int32_t tscToDouble(SStrToken *pToken, double *value, char **endPtr) { - errno = 0; - *value = strtold(pToken->z, endPtr); - - // not a valid integer number, return error - if ((*endPtr - pToken->z) != pToken->n) { - return TK_ILLEGAL; + + if (nBoundCols == 0) { // file input + pBuilder->memRowType = SMEM_ROW_DATA; + pBuilder->compareStat = ROW_COMPARE_NO_NEED; + return TSDB_CODE_SUCCESS; + } else { + float boundRatio = ((float)nBoundCols / (float)nCols); + + if (boundRatio < KVRatioKV) { + pBuilder->memRowType = SMEM_ROW_KV; + pBuilder->compareStat = ROW_COMPARE_NO_NEED; + return TSDB_CODE_SUCCESS; + } else if (boundRatio > KVRatioData) { + pBuilder->memRowType = SMEM_ROW_DATA; + pBuilder->compareStat = ROW_COMPARE_NO_NEED; + return TSDB_CODE_SUCCESS; + } + pBuilder->compareStat = ROW_COMPARE_NEED; + + if (boundRatio < KVRatioPredict) { + pBuilder->memRowType = SMEM_ROW_KV; + } else { + pBuilder->memRowType = SMEM_ROW_DATA; + } } - return pToken->type; + pBuilder->dataRowInitLen = TD_MEM_ROW_DATA_HEAD_SIZE + allNullLen; + pBuilder->kvRowInitLen = TD_MEM_ROW_KV_HEAD_SIZE + nBoundCols * sizeof(SColIdx); + + if (nRows > 0) { + pBuilder->rowInfo = tcalloc(nRows, sizeof(SMemRowInfo)); + if (pBuilder->rowInfo == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + for (int i = 0; i < nRows; ++i) { + (pBuilder->rowInfo + i)->dataLen = pBuilder->dataRowInitLen; + (pBuilder->rowInfo + i)->kvLen = pBuilder->kvRowInitLen; + } + } + + return TSDB_CODE_SUCCESS; } int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) { @@ -146,10 +163,6 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1 return TSDB_CODE_SUCCESS; } -static bool isNullStr(SStrToken* pToken) { - return (pToken->type == TK_NULL) || ((pToken->type == TK_STRING) && (pToken->n != 0) && - (strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0)); -} int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, char *msg, char **str, bool primaryKey, int16_t timePrec) { int64_t iv; @@ -400,342 +413,6 @@ int32_t tsParseOneColumn(SSchema *pSchema, SStrToken *pToken, char *payload, cha return TSDB_CODE_SUCCESS; } -static FORCE_INLINE TDRowLenT tsSetPayloadColValue(char *payloadStart, char *payload, int16_t columnId, - uint8_t columnType, const void *value, uint16_t valueLen, TDRowTLenT tOffset) { - payloadColSetId(payload, columnId); - payloadColSetType(payload, columnType); - memcpy(POINTER_SHIFT(payloadStart,tOffset), value, valueLen); - return valueLen; -} - -static int32_t tsParseOneColumnKV(SSchema *pSchema, SStrToken *pToken, char *payloadStart, char *primaryKeyStart, - char *payload, char *msg, char **str, bool primaryKey, int16_t timePrec, - TDRowTLenT tOffset, TDRowLenT *sizeAppend, TDRowLenT *dataRowColDeltaLen, - TDRowLenT *kvRowColLen) { - int64_t iv; - int32_t ret; - char * endptr = NULL; - - if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) { - return tscInvalidOperationMsg(msg, "invalid numeric data", pToken->z); - } - - switch (pSchema->type) { - case TSDB_DATA_TYPE_BOOL: { // bool - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_BOOL), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); - } else { - if ((pToken->type == TK_BOOL || pToken->type == TK_STRING) && (pToken->n != 0)) { - if (strncmp(pToken->z, "true", pToken->n) == 0) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &TRUE_VALUE, - TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]); - } else if (strncmp(pToken->z, "false", pToken->n) == 0) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &FALSE_VALUE, - TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]); - } else { - return tscSQLSyntaxErrMsg(msg, "invalid bool data", pToken->z); - } - } else if (pToken->type == TK_INTEGER) { - iv = strtoll(pToken->z, NULL, 10); - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - ((iv == 0) ? &FALSE_VALUE : &TRUE_VALUE), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]); - } else if (pToken->type == TK_FLOAT) { - double dv = strtod(pToken->z, NULL); - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - ((dv == 0) ? &FALSE_VALUE : &TRUE_VALUE), TYPE_BYTES[TSDB_DATA_TYPE_BOOL], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BOOL]); - } else { - return tscInvalidOperationMsg(msg, "invalid bool data", pToken->z); - } - } - break; - } - - case TSDB_DATA_TYPE_TINYINT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_TINYINT), TYPE_BYTES[TSDB_DATA_TYPE_TINYINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid tinyint data", pToken->z); - } else if (!IS_VALID_TINYINT(iv)) { - return tscInvalidOperationMsg(msg, "data overflow", pToken->z); - } - - uint8_t tmpVal = (uint8_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_TINYINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TINYINT]); - } - - break; - - case TSDB_DATA_TYPE_UTINYINT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_UTINYINT), TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned tinyint data", pToken->z); - } else if (!IS_VALID_UTINYINT(iv)) { - return tscInvalidOperationMsg(msg, "unsigned tinyint data overflow", pToken->z); - } - - uint8_t tmpVal = (uint8_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UTINYINT]); - } - - break; - - case TSDB_DATA_TYPE_SMALLINT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_SMALLINT), TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid smallint data", pToken->z); - } else if (!IS_VALID_SMALLINT(iv)) { - return tscInvalidOperationMsg(msg, "smallint data overflow", pToken->z); - } - - int16_t tmpVal = (int16_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_SMALLINT]); - } - - break; - - case TSDB_DATA_TYPE_USMALLINT: - if (isNullStr(pToken)) { - *sizeAppend = - tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_USMALLINT), TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned smallint data", pToken->z); - } else if (!IS_VALID_USMALLINT(iv)) { - return tscInvalidOperationMsg(msg, "unsigned smallint data overflow", pToken->z); - } - - uint16_t tmpVal = (uint16_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_USMALLINT]); - } - - break; - - case TSDB_DATA_TYPE_INT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_INT), TYPE_BYTES[TSDB_DATA_TYPE_INT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid int data", pToken->z); - } else if (!IS_VALID_INT(iv)) { - return tscInvalidOperationMsg(msg, "int data overflow", pToken->z); - } - - int32_t tmpVal = (int32_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_INT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_INT]); - } - - break; - - case TSDB_DATA_TYPE_UINT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_UINT), TYPE_BYTES[TSDB_DATA_TYPE_UINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned int data", pToken->z); - } else if (!IS_VALID_UINT(iv)) { - return tscInvalidOperationMsg(msg, "unsigned int data overflow", pToken->z); - } - - uint32_t tmpVal = (uint32_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_UINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UINT]); - } - - break; - - case TSDB_DATA_TYPE_BIGINT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_BIGINT), TYPE_BYTES[TSDB_DATA_TYPE_BIGINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, true); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid bigint data", pToken->z); - } else if (!IS_VALID_BIGINT(iv)) { - return tscInvalidOperationMsg(msg, "bigint data overflow", pToken->z); - } - - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &iv, - TYPE_BYTES[TSDB_DATA_TYPE_BIGINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_BIGINT]); - } - break; - - case TSDB_DATA_TYPE_UBIGINT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_UBIGINT), TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT], tOffset); - } else { - ret = tStrToInteger(pToken->z, pToken->type, pToken->n, &iv, false); - if (ret != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid unsigned bigint data", pToken->z); - } else if (!IS_VALID_UBIGINT((uint64_t)iv)) { - return tscInvalidOperationMsg(msg, "unsigned bigint data overflow", pToken->z); - } - - uint64_t tmpVal = (uint64_t)iv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_UBIGINT]); - } - break; - - case TSDB_DATA_TYPE_FLOAT: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_FLOAT), TYPE_BYTES[TSDB_DATA_TYPE_FLOAT], tOffset); - } else { - double dv; - if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { - return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); - } - - if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || dv > FLT_MAX || dv < -FLT_MAX || isinf(dv) || - isnan(dv)) { - return tscInvalidOperationMsg(msg, "illegal float data", pToken->z); - } - - float tmpVal = (float)dv; - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_FLOAT], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_FLOAT]); - } - break; - - case TSDB_DATA_TYPE_DOUBLE: - if (isNullStr(pToken)) { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_DOUBLE), TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE], tOffset); - } else { - double dv; - if (TK_ILLEGAL == tscToDouble(pToken, &dv, &endptr)) { - return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); - } - - if (((dv == HUGE_VAL || dv == -HUGE_VAL) && errno == ERANGE) || isinf(dv) || isnan(dv)) { - return tscInvalidOperationMsg(msg, "illegal double data", pToken->z); - } - - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, &dv, - TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_DOUBLE]); - } - break; - - case TSDB_DATA_TYPE_BINARY: - // binary data cannot be null-terminated char string, otherwise the last char of the string is lost - if (pToken->type == TK_NULL) { - payloadColSetId(payload, pSchema->colId); - payloadColSetType(payload, pSchema->type); - memcpy(POINTER_SHIFT(payloadStart, tOffset), getNullValue(TSDB_DATA_TYPE_BINARY), VARSTR_HEADER_SIZE + CHAR_BYTES); - *sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + CHAR_BYTES); - } else { // too long values will return invalid sql, not be truncated automatically - if (pToken->n + VARSTR_HEADER_SIZE > pSchema->bytes) { // todo refactor - return tscInvalidOperationMsg(msg, "string data overflow", pToken->z); - } - // STR_WITH_SIZE_TO_VARSTR(payload, pToken->z, pToken->n); - - payloadColSetId(payload, pSchema->colId); - payloadColSetType(payload, pSchema->type); - varDataSetLen(POINTER_SHIFT(payloadStart,tOffset), pToken->n); - memcpy(varDataVal(POINTER_SHIFT(payloadStart,tOffset)), pToken->z, pToken->n); - *sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + pToken->n); - *dataRowColDeltaLen += (TDRowLenT)(pToken->n - CHAR_BYTES); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + VARSTR_HEADER_SIZE + pToken->n); - } - - break; - - case TSDB_DATA_TYPE_NCHAR: - if (pToken->type == TK_NULL) { - payloadColSetId(payload, pSchema->colId); - payloadColSetType(payload, pSchema->type); - memcpy(POINTER_SHIFT(payloadStart,tOffset), getNullValue(TSDB_DATA_TYPE_NCHAR), VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE); - *sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE); - } else { - // if the converted output len is over than pColumnModel->bytes, return error: 'Argument list too long' - int32_t output = 0; - payloadColSetId(payload, pSchema->colId); - payloadColSetType(payload, pSchema->type); - if (!taosMbsToUcs4(pToken->z, pToken->n, varDataVal(POINTER_SHIFT(payloadStart,tOffset)), - pSchema->bytes - VARSTR_HEADER_SIZE, &output)) { - char buf[512] = {0}; - snprintf(buf, tListLen(buf), "%s", strerror(errno)); - return tscInvalidOperationMsg(msg, buf, pToken->z); - } - - varDataSetLen(POINTER_SHIFT(payloadStart,tOffset), output); - - *sizeAppend = (TDRowLenT)(VARSTR_HEADER_SIZE + output); - *dataRowColDeltaLen += (TDRowLenT)(output - sizeof(uint32_t)); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + VARSTR_HEADER_SIZE + output); - } - break; - - case TSDB_DATA_TYPE_TIMESTAMP: { - if (pToken->type == TK_NULL) { - if (primaryKey) { - // When building SKVRow primaryKey, we should not skip even with NULL value. - int64_t tmpVal = 0; - *sizeAppend = tsSetPayloadColValue(payloadStart, primaryKeyStart, pSchema->colId, pSchema->type, &tmpVal, - TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP]); - } else { - *sizeAppend = tsSetPayloadColValue(payloadStart, payload, pSchema->colId, pSchema->type, - getNullValue(TSDB_DATA_TYPE_TIMESTAMP), - TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset); - } - } else { - int64_t tmpVal; - if (tsParseTime(pToken, &tmpVal, str, msg, timePrec) != TSDB_CODE_SUCCESS) { - return tscInvalidOperationMsg(msg, "invalid timestamp", pToken->z); - } - - *sizeAppend = tsSetPayloadColValue(payloadStart, primaryKey ? primaryKeyStart : payload, pSchema->colId, - pSchema->type, &tmpVal, TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], tOffset); - *kvRowColLen += (TDRowLenT)(sizeof(SColIdx) + TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP]); - } - - break; - } - } - - return TSDB_CODE_SUCCESS; -} - /* * The server time/client time should not be mixed up in one sql string * Do not employ sort operation is not involved if server time is used. @@ -777,31 +454,24 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i int32_t index = 0; SStrToken sToken = {0}; - SMemRowHelper *pHelper = &pDataBlocks->rowHelper; - char * payload = pDataBlocks->pData + pDataBlocks->size; + char *row = pDataBlocks->pData + pDataBlocks->size; // skip the SSubmitBlk header SParsedDataColInfo *spd = &pDataBlocks->boundColumnInfo; - SSchema * schema = tscGetTableSchema(pDataBlocks->pTableMeta); + STableMeta * pTableMeta = pDataBlocks->pTableMeta; + SSchema * schema = tscGetTableSchema(pTableMeta); + SMemRowBuilder * pBuilder = &pDataBlocks->rowBuilder; + int32_t dataLen = pBuilder->dataRowInitLen; + int32_t kvLen = pBuilder->kvRowInitLen; + bool isParseBindParam = false; - TDRowTLenT dataRowLen = pHelper->allNullLen; - TDRowTLenT kvRowLen = TD_MEM_ROW_KV_VER_SIZE; - TDRowTLenT payloadValOffset = 0; - TDRowLenT colValOffset = 0; - ASSERT(dataRowLen > 0); - - payloadSetNCols(payload, spd->numOfBound); - payloadValOffset = payloadValuesOffset(payload); // rely on payloadNCols - // payloadSetTLen(payload, payloadValOffset); - - char *kvPrimaryKeyStart = payload + PAYLOAD_HEADER_LEN; // primaryKey in 1st column tuple - char *kvStart = kvPrimaryKeyStart + PAYLOAD_COL_HEAD_LEN; // the column tuple behind the primaryKey + initSMemRow(row, pBuilder->memRowType, pDataBlocks, spd->numOfBound); // 1. set the parsed value from sql string for (int i = 0; i < spd->numOfBound; ++i) { // the start position in data block buffer of current value in sql int32_t colIndex = spd->boundedColumns[i]; - char *start = payload + spd->cols[colIndex].offset; + char *start = row + spd->cols[colIndex].offset; SSchema *pSchema = &schema[colIndex]; // get colId here @@ -810,6 +480,9 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i *str += index; if (sToken.type == TK_QUESTION) { + if (!isParseBindParam) { + isParseBindParam = true; + } if (pInsertParam->insertType != TSDB_QUERY_TYPE_STMT_INSERT) { return tscSQLSyntaxErrMsg(pInsertParam->msg, "? only allowed in binding insertion", *str); } @@ -860,54 +533,45 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i sToken.n -= 2 + cnt; } - bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX); - TDRowLenT dataRowDeltaColLen = 0; // When combine the data as SDataRow, the delta len between all NULL columns. - TDRowLenT kvRowColLen = 0; - TDRowLenT colValAppended = 0; + bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX); + int32_t toffset = -1; + int16_t colId = -1; + tscGetMemRowAppendInfo(schema, pBuilder->memRowType, spd, i, &toffset, &colId); - if (!IS_DATA_COL_ORDERED(spd->orderStatus)) { - ASSERT(spd->colIdxInfo != NULL); - if(!isPrimaryKey) { - kvStart = POINTER_SHIFT(kvPrimaryKeyStart, spd->colIdxInfo[i].finalIdx * PAYLOAD_COL_HEAD_LEN); - } else { - ASSERT(spd->colIdxInfo[i].finalIdx == 0); - } - } - // the primary key locates in 1st column - int32_t ret = tsParseOneColumnKV(pSchema, &sToken, payload, kvPrimaryKeyStart, kvStart, pInsertParam->msg, str, - isPrimaryKey, timePrec, payloadValOffset + colValOffset, &colValAppended, - &dataRowDeltaColLen, &kvRowColLen); + int32_t ret = tsParseOneColumnKV(pSchema, &sToken, row, pInsertParam->msg, str, isPrimaryKey, timePrec, toffset, + colId, &dataLen, &kvLen, pBuilder->compareStat); if (ret != TSDB_CODE_SUCCESS) { return ret; } if (isPrimaryKey) { - if (tsCheckTimestamp(pDataBlocks, payloadValues(payload)) != TSDB_CODE_SUCCESS) { + TSKEY tsKey = memRowKey(row); + if (tsCheckTimestamp(pDataBlocks, (const char *)&tsKey) != TSDB_CODE_SUCCESS) { tscInvalidOperationMsg(pInsertParam->msg, "client time/server time can not be mixed up", sToken.z); return TSDB_CODE_TSC_INVALID_TIME_STAMP; } - payloadColSetOffset(kvPrimaryKeyStart, colValOffset); - } else { - payloadColSetOffset(kvStart, colValOffset); - if (IS_DATA_COL_ORDERED(spd->orderStatus)) { - kvStart += PAYLOAD_COL_HEAD_LEN; // move to next column + } + } + + if (!isParseBindParam) { + // 2. check and set convert flag + if (pBuilder->compareStat == ROW_COMPARE_NEED) { + checkAndConvertMemRow(row, dataLen, kvLen); + } + + // 3. set the null value for the columns that do not assign values + if ((spd->numOfBound < spd->numOfCols) && isDataRow(row) && !isNeedConvertRow(row)) { + SDataRow dataRow = memRowDataBody(row); + for (int32_t i = 0; i < spd->numOfCols; ++i) { + if (spd->cols[i].valStat == VAL_STAT_NONE) { + tdAppendDataColVal(dataRow, getNullValue(schema[i].type), true, schema[i].type, spd->cols[i].toffset); + } } } - - colValOffset += colValAppended; - kvRowLen += kvRowColLen; - dataRowLen += dataRowDeltaColLen; } - if (kvRowLen < dataRowLen) { - payloadSetType(payload, SMEM_ROW_KV); - } else { - payloadSetType(payload, SMEM_ROW_DATA); - } + *len = getExtendedRowSize(pDataBlocks); - *len = (int32_t)(payloadValOffset + colValOffset); - payloadSetTLen(payload, *len); - return TSDB_CODE_SUCCESS; } @@ -957,11 +621,13 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn int32_t precision = tinfo.precision; - int32_t extendedRowSize = getExtendedRowSize(&tinfo); - - initSMemRowHelper(&pDataBlock->rowHelper, tscGetTableSchema(pDataBlock->pTableMeta), - tscGetNumOfColumns(pDataBlock->pTableMeta), 0); + int32_t extendedRowSize = getExtendedRowSize(pDataBlock); + if (TSDB_CODE_SUCCESS != + (code = initMemRowBuilder(&pDataBlock->rowBuilder, 0, tinfo.numOfColumns, pDataBlock->boundColumnInfo.numOfBound, + pDataBlock->boundColumnInfo.allNullLen))) { + return code; + } while (1) { index = 0; sToken = tStrGetToken(*str, &index, false); @@ -991,9 +657,7 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn index = 0; sToken = tStrGetToken(*str, &index, false); if (sToken.n == 0 || sToken.type != TK_RP) { - tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", *str); - code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR; - return code; + return tscSQLSyntaxErrMsg(pInsertParam->msg, ") expected", *str); } *str += index; @@ -1012,19 +676,37 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SIn void tscSetBoundColumnInfo(SParsedDataColInfo *pColInfo, SSchema *pSchema, int32_t numOfCols) { pColInfo->numOfCols = numOfCols; pColInfo->numOfBound = numOfCols; - pColInfo->orderStatus = ORDER_STATUS_ORDERED; + pColInfo->orderStatus = ORDER_STATUS_ORDERED; // default is ORDERED for non-bound mode pColInfo->boundedColumns = calloc(pColInfo->numOfCols, sizeof(int32_t)); pColInfo->cols = calloc(pColInfo->numOfCols, sizeof(SBoundColumn)); pColInfo->colIdxInfo = NULL; + pColInfo->flen = 0; + pColInfo->allNullLen = 0; + int32_t nVar = 0; for (int32_t i = 0; i < pColInfo->numOfCols; ++i) { + uint8_t type = pSchema[i].type; if (i > 0) { pColInfo->cols[i].offset = pSchema[i - 1].bytes + pColInfo->cols[i - 1].offset; + pColInfo->cols[i].toffset = pColInfo->flen; + } + pColInfo->flen += TYPE_BYTES[type]; + switch (type) { + case TSDB_DATA_TYPE_BINARY: + pColInfo->allNullLen += (VARSTR_HEADER_SIZE + CHAR_BYTES); + ++nVar; + break; + case TSDB_DATA_TYPE_NCHAR: + pColInfo->allNullLen += (VARSTR_HEADER_SIZE + TSDB_NCHAR_SIZE); + ++nVar; + break; + default: + break; } - - pColInfo->cols[i].hasVal = true; pColInfo->boundedColumns[i] = i; } + pColInfo->allNullLen += pColInfo->flen; + pColInfo->extendedVarLen = (uint16_t)(nVar * sizeof(VarDataOffsetT)); } int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows) { @@ -1124,35 +806,29 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk if (dataBuf->tsSource == TSDB_USE_SERVER_TS) { assert(dataBuf->ordered); } - // allocate memory + // allocate memory size_t nAlloc = nRows * sizeof(SBlockKeyTuple); if (pBlkKeyInfo->pKeyTuple == NULL || pBlkKeyInfo->maxBytesAlloc < nAlloc) { size_t nRealAlloc = nAlloc + 10 * sizeof(SBlockKeyTuple); char * tmp = trealloc(pBlkKeyInfo->pKeyTuple, nRealAlloc); if (tmp == NULL) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; + return TSDB_CODE_TSC_OUT_OF_MEMORY; } pBlkKeyInfo->pKeyTuple = (SBlockKeyTuple *)tmp; pBlkKeyInfo->maxBytesAlloc = (int32_t)nRealAlloc; } memset(pBlkKeyInfo->pKeyTuple, 0, nAlloc); + int32_t extendedRowSize = getExtendedRowSize(dataBuf); SBlockKeyTuple *pBlkKeyTuple = pBlkKeyInfo->pKeyTuple; char * pBlockData = pBlocks->data; - TDRowTLenT totolPayloadTLen = 0; - TDRowTLenT payloadTLen = 0; int n = 0; while (n < nRows) { - pBlkKeyTuple->skey = payloadTSKey(pBlockData); + pBlkKeyTuple->skey = memRowKey(pBlockData); pBlkKeyTuple->payloadAddr = pBlockData; - payloadTLen = payloadTLen(pBlockData); -#if 0 - ASSERT(payloadNCols(pBlockData) <= 4096); - ASSERT(payloadTLen(pBlockData) < 65536); -#endif - totolPayloadTLen += payloadTLen; + // next loop - pBlockData += payloadTLen; + pBlockData += extendedRowSize; ++pBlkKeyTuple; ++n; } @@ -1169,7 +845,6 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk TSKEY tj = (pBlkKeyTuple + j)->skey; if (ti == tj) { - totolPayloadTLen -= payloadTLen(pBlkKeyTuple + j); ++j; continue; } @@ -1185,17 +860,15 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk pBlocks->numOfRows = i + 1; } - dataBuf->size = sizeof(SSubmitBlk) + totolPayloadTLen; + dataBuf->size = sizeof(SSubmitBlk) + pBlocks->numOfRows * extendedRowSize; dataBuf->prevTS = INT64_MIN; return 0; } -static int32_t doParseInsertStatement(SInsertStatementParam *pInsertParam, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) { - STableComInfo tinfo = tscGetTableInfo(dataBuf->pTableMeta); - +static int32_t doParseInsertStatement(SInsertStatementParam *pInsertParam, char **str, STableDataBlocks* dataBuf, int32_t *totalNum) { int32_t maxNumOfRows; - int32_t code = tscAllocateMemIfNeed(dataBuf, getExtendedRowSize(&tinfo), &maxNumOfRows); + int32_t code = tscAllocateMemIfNeed(dataBuf, getExtendedRowSize(dataBuf), &maxNumOfRows); if (TSDB_CODE_SUCCESS != code) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -1533,7 +1206,7 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat pColInfo->numOfBound = 0; memset(pColInfo->boundedColumns, 0, sizeof(int32_t) * nCols); for (int32_t i = 0; i < nCols; ++i) { - pColInfo->cols[i].hasVal = false; + pColInfo->cols[i].valStat = VAL_STAT_NONE; } int32_t code = TSDB_CODE_SUCCESS; @@ -1572,12 +1245,12 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat int32_t nScanned = 0, t = lastColIdx + 1; while (t < nCols) { if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) { - if (pColInfo->cols[t].hasVal == true) { + if (pColInfo->cols[t].valStat == VAL_STAT_HAS) { code = tscInvalidOperationMsg(pInsertParam->msg, "duplicated column name", sToken.z); goto _clean; } - pColInfo->cols[t].hasVal = true; + pColInfo->cols[t].valStat = VAL_STAT_HAS; pColInfo->boundedColumns[pColInfo->numOfBound] = t; ++pColInfo->numOfBound; findColumnIndex = true; @@ -1595,12 +1268,12 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat int32_t nRemain = nCols - nScanned; while (t < nRemain) { if (strncmp(sToken.z, pSchema[t].name, sToken.n) == 0 && strlen(pSchema[t].name) == sToken.n) { - if (pColInfo->cols[t].hasVal == true) { + if (pColInfo->cols[t].valStat == VAL_STAT_HAS) { code = tscInvalidOperationMsg(pInsertParam->msg, "duplicated column name", sToken.z); goto _clean; } - pColInfo->cols[t].hasVal = true; + pColInfo->cols[t].valStat = VAL_STAT_HAS; pColInfo->boundedColumns[pColInfo->numOfBound] = t; ++pColInfo->numOfBound; findColumnIndex = true; @@ -1835,7 +1508,7 @@ int tsParseInsertSql(SSqlObj *pSql) { goto _clean; } - if (dataBuf->boundColumnInfo.cols[0].hasVal == false) { + if (dataBuf->boundColumnInfo.cols[0].valStat == VAL_STAT_NONE) { code = tscInvalidOperationMsg(pInsertParam->msg, "primary timestamp column can not be null", NULL); goto _clean; } @@ -2046,15 +1719,18 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow goto _error; } - tscAllocateMemIfNeed(pTableDataBlock, getExtendedRowSize(&tinfo), &maxRows); + tscAllocateMemIfNeed(pTableDataBlock, getExtendedRowSize(pTableDataBlock), &maxRows); tokenBuf = calloc(1, TSDB_MAX_BYTES_PER_ROW); if (tokenBuf == NULL) { code = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; } - initSMemRowHelper(&pTableDataBlock->rowHelper, tscGetTableSchema(pTableDataBlock->pTableMeta), - tscGetNumOfColumns(pTableDataBlock->pTableMeta), 0); + if (TSDB_CODE_SUCCESS != + (ret = initMemRowBuilder(&pTableDataBlock->rowBuilder, 0, tinfo.numOfColumns, pTableDataBlock->numOfParams, + pTableDataBlock->boundColumnInfo.allNullLen))) { + goto _error; + } while ((readLen = tgetline(&line, &n, fp)) != -1) { if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 2c2a299549..40664241c1 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -299,7 +299,7 @@ static int fillColumnsNull(STableDataBlocks* pBlock, int32_t rowNum) { SSchema *schema = (SSchema*)pBlock->pTableMeta->schema; for (int32_t i = 0; i < spd->numOfCols; ++i) { - if (!spd->cols[i].hasVal) { // current column do not have any value to insert, set it to null + if (spd->cols[i].valStat == VAL_STAT_NONE) { // current column do not have any value to insert, set it to null for (int32_t n = 0; n < rowNum; ++n) { char *ptr = pBlock->pData + sizeof(SSubmitBlk) + pBlock->rowSize * n + offset; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index b1bd2efa07..17b693faf2 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -4293,7 +4293,7 @@ static bool isValidExpr(tSqlExpr* pLeft, tSqlExpr* pRight, int32_t optr) { if (pRight == NULL) { return true; } - + if (pLeft->tokenId >= TK_BOOL && pLeft->tokenId <= TK_BINARY && pRight->tokenId >= TK_BOOL && pRight->tokenId <= TK_BINARY) { return false; } @@ -5757,10 +5757,15 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) { } int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSchema* pSchema) { - const char* msg0 = "only support order by primary timestamp"; - const char* msg1 = "invalid column name"; - const char* msg2 = "order by primary timestamp, first tag or groupby column in groupby clause allowed"; - const char* msg3 = "invalid column in order by clause, only primary timestamp or first tag in groupby clause allowed"; + const char* msg0 = "only one column allowed in orderby"; + const char* msg1 = "invalid column name in orderby clause"; + const char* msg2 = "too many order by columns"; + const char* msg3 = "only primary timestamp/tbname/first tag in groupby clause allowed"; + const char* msg4 = "only tag in groupby clause allowed in order by"; + const char* msg5 = "only primary timestamp/column in top/bottom function allowed as orderby column"; + const char* msg6 = "only primary timestamp allowed as the second orderby column"; + const char* msg7 = "only primary timestamp/column in groupby clause allowed as orderby column"; + const char* msg8 = "only column in groupby clause allowed as orderby column"; setDefaultOrderInfo(pQueryInfo); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -5790,7 +5795,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq } } else { if (size > 2) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } } @@ -5819,7 +5824,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq // it is a tag column if (pQueryInfo->groupbyExpr.columnInfo == NULL) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4); } SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0); if (relTagIndex == pColIndex->colIndex) { @@ -5865,7 +5870,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq pExpr = tscExprGet(pQueryInfo, 1); if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); } tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); @@ -5906,7 +5911,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq } if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } else { tVariantListItem* p1 = taosArrayGet(pSortorder, 1); pQueryInfo->order.order = p1->sortOrder; @@ -5926,7 +5931,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq validOrder = (pColIndex->colIndex == index.columnIndex); } if (!validOrder) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); } tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->groupbyExpr.orderIndex = pSchema[index.columnIndex].colId; @@ -5940,6 +5945,9 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) { SColIndex* pColIndex = taosArrayGet(columnInfo, 0); validOrder = (pColIndex->colIndex == index.columnIndex); + if (!validOrder) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8); + } } else { /* order of top/bottom query in interval is not valid */ SExprInfo* pExpr = tscExprGet(pQueryInfo, 0); @@ -5947,15 +5955,11 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq pExpr = tscExprGet(pQueryInfo, 1); if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); } validOrder = true; } - if (!validOrder) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); - } - tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->order.order = pItem->sortOrder; @@ -8399,19 +8403,13 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { char name[TSDB_TABLE_FNAME_LEN] = {0}; - //if (!pSql->pBuf) { - // if (NULL == (pSql->pBuf = tcalloc(1, 80 * TSDB_MAX_COLUMNS))) { - // code = TSDB_CODE_TSC_OUT_OF_MEMORY; - // goto _end; - // } - //} - plist = taosArrayInit(4, POINTER_BYTES); pVgroupList = taosArrayInit(4, POINTER_BYTES); taosArraySort(tableNameList, tnameComparFn); taosArrayRemoveDuplicate(tableNameList, tnameComparFn, NULL); + STableMeta* pSTMeta = (STableMeta *)(pSql->pBuf); size_t numOfTables = taosArrayGetSize(tableNameList); for (int32_t i = 0; i < numOfTables; ++i) { SName* pname = taosArrayGet(tableNameList, i); @@ -8427,7 +8425,8 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) { // avoid mem leak, may should update pTableMeta void* pVgroupIdList = NULL; if (pTableMeta->tableType == TSDB_CHILD_TABLE) { - code = tscCreateTableMetaFromSTableMeta((STableMeta **)(&pTableMeta), name, &tableMetaCapacity); + code = tscCreateTableMetaFromSTableMeta((STableMeta **)(&pTableMeta), name, &tableMetaCapacity, (STableMeta **)(&pSTMeta)); + pSql->pBuf = (void *)pSTMeta; // create the child table meta from super table failed, try load it from mnode if (code != TSDB_CODE_SUCCESS) { diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index e05c8b562b..f0ee180bbe 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2872,18 +2872,19 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool tNameExtractFullName(&pTableMetaInfo->name, name); size_t len = strlen(name); - if (pTableMetaInfo->tableMetaCapacity != 0) { - if (pTableMetaInfo->pTableMeta != NULL) { - memset(pTableMetaInfo->pTableMeta, 0, pTableMetaInfo->tableMetaCapacity); - } + // just make runtime happy + if (pTableMetaInfo->tableMetaCapacity != 0 && pTableMetaInfo->pTableMeta != NULL) { + memset(pTableMetaInfo->pTableMeta, 0, pTableMetaInfo->tableMetaCapacity); } taosHashGetCloneExt(tscTableMetaMap, name, len, NULL, (void **)&(pTableMetaInfo->pTableMeta), &pTableMetaInfo->tableMetaCapacity); - - STableMeta* pMeta = pTableMetaInfo->pTableMeta; + + STableMeta* pMeta = pTableMetaInfo->pTableMeta; + STableMeta* pSTMeta = (STableMeta *)(pSql->pBuf); if (pMeta && pMeta->id.uid > 0) { // in case of child table, here only get the if (pMeta->tableType == TSDB_CHILD_TABLE) { - int32_t code = tscCreateTableMetaFromSTableMeta(&pTableMetaInfo->pTableMeta, name, &pTableMetaInfo->tableMetaCapacity); + int32_t code = tscCreateTableMetaFromSTableMeta(&pTableMetaInfo->pTableMeta, name, &pTableMetaInfo->tableMetaCapacity, (STableMeta **)(&pSTMeta)); + pSql->pBuf = (void *)(pSTMeta); if (code != TSDB_CODE_SUCCESS) { return getTableMetaFromMnode(pSql, pTableMetaInfo, autocreate); } diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 3554b43ff6..0d26ec58f6 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2404,8 +2404,8 @@ int32_t tscHandleFirstRoundStableQuery(SSqlObj *pSql) { SColumn* x = taosArrayGetP(pNewQueryInfo->colList, index1); tscColumnCopy(x, pCol); } else { - SColumn *p = tscColumnClone(pCol); - taosArrayPush(pNewQueryInfo->colList, &p); + SSchema ss = {.type = (uint8_t)pCol->info.type, .bytes = pCol->info.bytes, .colId = (int16_t)pCol->columnIndex}; + tscColumnListInsert(pNewQueryInfo->colList, pCol->columnIndex, pCol->tableUid, &ss); } } } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 82f6827909..19a816faeb 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1808,101 +1808,6 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i return TSDB_CODE_SUCCESS; } -static SMemRow tdGenMemRowFromBuilder(SMemRowBuilder* pBuilder) { - SSchema* pSchema = pBuilder->pSchema; - char* p = (char*)pBuilder->buf; - int toffset = 0; - uint16_t nCols = pBuilder->nCols; - - uint8_t memRowType = payloadType(p); - uint16_t nColsBound = payloadNCols(p); - if (pBuilder->nCols <= 0 || nColsBound <= 0) { - return NULL; - } - char* pVals = POINTER_SHIFT(p, payloadValuesOffset(p)); - SMemRow* memRow = (SMemRow)pBuilder->pDataBlock; - memRowSetType(memRow, memRowType); - - // ----------------- Raw payload structure for row: - /* |<------------ Head ------------->|<----------- body of column data tuple ------------------->| - * | |<----------------- flen ------------->|<--- value part --->| - * |SMemRowType| dataTLen | nCols | colId | colType | offset | ... | value |...|...|... | - * +-----------+----------+----------+--------------------------------------|--------------------| - * | uint8_t | uint32_t | uint16_t | int16_t | uint8_t | uint16_t | ... |.......|...|...|... | - * +-----------+----------+----------+--------------------------------------+--------------------| - * 1. offset in column data tuple starts from the value part in case of uint16_t overflow. - * 2. dataTLen: total length including the header and body. - */ - - if (memRowType == SMEM_ROW_DATA) { - SDataRow trow = (SDataRow)memRowDataBody(memRow); - dataRowSetLen(trow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pBuilder->flen)); - dataRowSetVersion(trow, pBuilder->sversion); - - p = (char*)payloadBody(pBuilder->buf); - uint16_t i = 0, j = 0; - while (j < nCols) { - if (i >= nColsBound) { - break; - } - int16_t colId = payloadColId(p); - if (colId == pSchema[j].colId) { - // ASSERT(payloadColType(p) == pSchema[j].type); - tdAppendColVal(trow, POINTER_SHIFT(pVals, payloadColOffset(p)), pSchema[j].type, toffset); - toffset += TYPE_BYTES[pSchema[j].type]; - p = payloadNextCol(p); - ++i; - ++j; - } else if (colId < pSchema[j].colId) { - p = payloadNextCol(p); - ++i; - } else { - tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset); - toffset += TYPE_BYTES[pSchema[j].type]; - ++j; - } - } - - while (j < nCols) { - tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset); - toffset += TYPE_BYTES[pSchema[j].type]; - ++j; - } - - #if 0 // no need anymore - while (i < nColsBound) { - p = payloadNextCol(p); - ++i; - } - #endif - - } else if (memRowType == SMEM_ROW_KV) { - SKVRow kvRow = (SKVRow)memRowKvBody(memRow); - kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nColsBound)); - kvRowSetNCols(kvRow, nColsBound); - memRowSetKvVersion(memRow, pBuilder->sversion); - - p = (char*)payloadBody(pBuilder->buf); - int i = 0; - while (i < nColsBound) { - int16_t colId = payloadColId(p); - uint8_t colType = payloadColType(p); - tdAppendKvColVal(kvRow, POINTER_SHIFT(pVals,payloadColOffset(p)), colId, colType, &toffset); - //toffset += sizeof(SColIdx); - p = payloadNextCol(p); - ++i; - } - - } else { - ASSERT(0); - } - int32_t rowTLen = memRowTLen(memRow); - pBuilder->pDataBlock = (char*)pBuilder->pDataBlock + rowTLen; // next row - pBuilder->pSubmitBlk->dataLen += rowTLen; - - return memRow; -} - // Erase the empty space reserved for binary data static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SInsertStatementParam* insertParam, SBlockKeyTuple* blkKeyTuple) { @@ -1934,10 +1839,11 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI int32_t schemaSize = sizeof(STColumn) * numOfCols; pBlock->schemaLen = schemaSize; } else { - for (int32_t j = 0; j < tinfo.numOfColumns; ++j) { - flen += TYPE_BYTES[pSchema[j].type]; + if (IS_RAW_PAYLOAD(insertParam->payloadType)) { + for (int32_t j = 0; j < tinfo.numOfColumns; ++j) { + flen += TYPE_BYTES[pSchema[j].type]; + } } - pBlock->schemaLen = 0; } @@ -1964,18 +1870,19 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI pBlock->dataLen += memRowTLen(memRow); } } else { - SMemRowBuilder rowBuilder; - rowBuilder.pSchema = pSchema; - rowBuilder.sversion = pTableMeta->sversion; - rowBuilder.flen = flen; - rowBuilder.nCols = tinfo.numOfColumns; - rowBuilder.pDataBlock = pDataBlock; - rowBuilder.pSubmitBlk = pBlock; - rowBuilder.buf = p; - for (int32_t i = 0; i < numOfRows; ++i) { - rowBuilder.buf = (blkKeyTuple + i)->payloadAddr; - tdGenMemRowFromBuilder(&rowBuilder); + char* payload = (blkKeyTuple + i)->payloadAddr; + if (isNeedConvertRow(payload)) { + convertSMemRow(pDataBlock, payload, pTableDataBlock); + TDRowTLenT rowTLen = memRowTLen(pDataBlock); + pDataBlock = POINTER_SHIFT(pDataBlock, rowTLen); + pBlock->dataLen += rowTLen; + } else { + TDRowTLenT rowTLen = memRowTLen(payload); + memcpy(pDataBlock, payload, rowTLen); + pDataBlock = POINTER_SHIFT(pDataBlock, rowTLen); + pBlock->dataLen += rowTLen; + } } } @@ -1988,9 +1895,9 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SI static int32_t getRowExpandSize(STableMeta* pTableMeta) { int32_t result = TD_MEM_ROW_DATA_HEAD_SIZE; - int32_t columns = tscGetNumOfColumns(pTableMeta); + int32_t columns = tscGetNumOfColumns(pTableMeta); SSchema* pSchema = tscGetTableSchema(pTableMeta); - for(int32_t i = 0; i < columns; i++) { + for (int32_t i = 0; i < columns; i++) { if (IS_VAR_DATA_TYPE((pSchema + i)->type)) { result += TYPE_BYTES[TSDB_DATA_TYPE_BINARY]; } @@ -2036,7 +1943,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData; if (pBlocks->numOfRows > 0) { // the maximum expanded size in byte when a row-wise data is converted to SDataRow format - int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta); + int32_t expandSize = isRawPayload ? getRowExpandSize(pOneTableBlock->pTableMeta) : 0; STableDataBlocks* dataBuf = NULL; int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE, @@ -2049,7 +1956,8 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl return ret; } - int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); + int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); if (dataBuf->nAllocSize < destSize) { dataBuf->nAllocSize = (uint32_t)(destSize * 1.5); @@ -2093,7 +2001,9 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl pBlocks->numOfRows, pBlocks->sversion, blkKeyInfo.pKeyTuple->skey, pLastKeyTuple->skey); } - int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); + int32_t len = pBlocks->numOfRows * + (isRawPayload ? (pOneTableBlock->rowSize + expandSize) : getExtendedRowSize(pOneTableBlock)) + + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); pBlocks->tid = htonl(pBlocks->tid); pBlocks->uid = htobe64(pBlocks->uid); @@ -4554,14 +4464,16 @@ CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) { return cMeta; } -int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, size_t *tableMetaCapacity) { +int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, size_t *tableMetaCapacity, STableMeta**ppSTable) { assert(*ppChild != NULL); - - STableMeta* p = NULL; - size_t sz = 0; + STableMeta* p = *ppSTable; STableMeta* pChild = *ppChild; - + size_t sz = (p != NULL) ? tscGetTableMetaSize(p) : 0; //ppSTableBuf actually capacity may larger than sz, dont care + if (p != NULL && sz != 0) { + memset((char *)p, 0, sz); + } taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz); + *ppSTable = p; // tableMeta exists, build child table meta according to the super table meta // the uid need to be checked in addition to the general name of the super table. @@ -4580,10 +4492,8 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name, memcpy(pChild->schema, p->schema, totalBytes); *ppChild = pChild; - tfree(p); return TSDB_CODE_SUCCESS; } else { // super table has been removed, current tableMeta is also expired. remove it here - tfree(p); taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); return -1; } diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 1637c4832b..46259c8488 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -186,6 +186,7 @@ typedef void *SDataRow; #define TD_DATA_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t)) #define dataRowLen(r) (*(TDRowLenT *)(r)) // 0~65535 +#define dataRowEnd(r) POINTER_SHIFT(r, dataRowLen(r)) #define dataRowVersion(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(int16_t))) #define dataRowTuple(r) POINTER_SHIFT(r, TD_DATA_ROW_HEAD_SIZE) #define dataRowTKey(r) (*(TKEY *)(dataRowTuple(r))) @@ -201,14 +202,18 @@ void tdFreeDataRow(SDataRow row); void tdInitDataRow(SDataRow row, STSchema *pSchema); SDataRow tdDataRowDup(SDataRow row); + // offset here not include dataRow header length -static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t type, int32_t offset) { +static FORCE_INLINE int tdAppendDataColVal(SDataRow row, const void *value, bool isCopyVarData, int8_t type, + int32_t offset) { ASSERT(value != NULL); int32_t toffset = offset + TD_DATA_ROW_HEAD_SIZE; if (IS_VAR_DATA_TYPE(type)) { *(VarDataOffsetT *)POINTER_SHIFT(row, toffset) = dataRowLen(row); - memcpy(POINTER_SHIFT(row, dataRowLen(row)), value, varDataTLen(value)); + if (isCopyVarData) { + memcpy(POINTER_SHIFT(row, dataRowLen(row)), value, varDataTLen(value)); + } dataRowLen(row) += varDataTLen(value); } else { if (offset == 0) { @@ -223,6 +228,12 @@ static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t t return 0; } + +// offset here not include dataRow header length +static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t type, int32_t offset) { + return tdAppendDataColVal(row, value, true, type, offset); +} + // NOTE: offset here including the header size static FORCE_INLINE void *tdGetRowDataOfCol(SDataRow row, int8_t type, int32_t offset) { if (IS_VAR_DATA_TYPE(type)) { @@ -472,9 +483,10 @@ static FORCE_INLINE void *tdGetKVRowIdxOfCol(SKVRow row, int16_t colId) { } // offset here not include kvRow header length -static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t colId, int8_t type, int32_t *offset) { +static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, bool isCopyValData, int16_t colId, int8_t type, + int32_t offset) { ASSERT(value != NULL); - int32_t toffset = *offset + TD_KV_ROW_HEAD_SIZE; + int32_t toffset = offset + TD_KV_ROW_HEAD_SIZE; SColIdx *pColIdx = (SColIdx *)POINTER_SHIFT(row, toffset); char * ptr = (char *)POINTER_SHIFT(row, kvRowLen(row)); @@ -482,10 +494,12 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t pColIdx->offset = kvRowLen(row); // offset of pColIdx including the TD_KV_ROW_HEAD_SIZE if (IS_VAR_DATA_TYPE(type)) { - memcpy(ptr, value, varDataTLen(value)); + if (isCopyValData) { + memcpy(ptr, value, varDataTLen(value)); + } kvRowLen(row) += varDataTLen(value); } else { - if (*offset == 0) { + if (offset == 0) { ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP); TKEY tvalue = tdGetTKEY(*(TSKEY *)value); memcpy(ptr, (void *)(&tvalue), TYPE_BYTES[type]); @@ -494,7 +508,6 @@ static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t } kvRowLen(row) += TYPE_BYTES[type]; } - *offset += sizeof(SColIdx); return 0; } @@ -589,12 +602,24 @@ typedef void *SMemRow; #define TD_MEM_ROW_DATA_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_DATA_ROW_HEAD_SIZE) #define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE) -#define SMEM_ROW_DATA 0U // SDataRow -#define SMEM_ROW_KV 1U // SKVRow +#define SMEM_ROW_DATA 0x0U // SDataRow +#define SMEM_ROW_KV 0x01U // SKVRow +#define SMEM_ROW_CONVERT 0x80U // SMemRow convert flag -#define memRowType(r) (*(uint8_t *)(r)) +#define KVRatioKV (0.2f) // all bool +#define KVRatioPredict (0.4f) +#define KVRatioData (0.75f) // all bigint +#define KVRatioConvert (0.9f) + +#define memRowType(r) ((*(uint8_t *)(r)) & 0x01) + +#define memRowSetType(r, t) ((*(uint8_t *)(r)) = (t)) // set the total byte in case of dirty memory +#define memRowSetConvert(r) ((*(uint8_t *)(r)) = (((*(uint8_t *)(r)) & 0x7F) | SMEM_ROW_CONVERT)) // highest bit +#define isDataRowT(t) (SMEM_ROW_DATA == (((uint8_t)(t)) & 0x01)) #define isDataRow(r) (SMEM_ROW_DATA == memRowType(r)) +#define isKvRowT(t) (SMEM_ROW_KV == (((uint8_t)(t)) & 0x01)) #define isKvRow(r) (SMEM_ROW_KV == memRowType(r)) +#define isNeedConvertRow(r) (((*(uint8_t *)(r)) & 0x80) == SMEM_ROW_CONVERT) #define memRowDataBody(r) POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE) // section after flag #define memRowKvBody(r) \ @@ -611,6 +636,14 @@ typedef void *SMemRow; #define memRowLen(r) (isDataRow(r) ? memRowDataLen(r) : memRowKvLen(r)) #define memRowTLen(r) (isDataRow(r) ? memRowDataTLen(r) : memRowKvTLen(r)) // using uint32_t/int32_t to store the TLen +static FORCE_INLINE char *memRowEnd(SMemRow row) { + if (isDataRow(row)) { + return (char *)dataRowEnd(memRowDataBody(row)); + } else { + return (char *)kvRowEnd(memRowKvBody(row)); + } +} + #define memRowDataVersion(r) dataRowVersion(memRowDataBody(r)) #define memRowKvVersion(r) (*(int16_t *)POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE)) #define memRowVersion(r) (isDataRow(r) ? memRowDataVersion(r) : memRowKvVersion(r)) // schema version @@ -628,7 +661,6 @@ typedef void *SMemRow; } \ } while (0) -#define memRowSetType(r, t) (memRowType(r) = (t)) #define memRowSetLen(r, l) (isDataRow(r) ? memRowDataLen(r) = (l) : memRowKvLen(r) = (l)) #define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowSetKvVersion(r, v)) #define memRowCpy(dst, r) memcpy((dst), (r), memRowTLen(r)) @@ -661,12 +693,12 @@ static FORCE_INLINE void *tdGetMemRowDataOfColEx(void *row, int16_t colId, int8_ } } -static FORCE_INLINE int tdAppendMemColVal(SMemRow row, const void *value, int16_t colId, int8_t type, int32_t offset, - int32_t *kvOffset) { +static FORCE_INLINE int tdAppendMemRowColVal(SMemRow row, const void *value, bool isCopyVarData, int16_t colId, + int8_t type, int32_t offset) { if (isDataRow(row)) { - tdAppendColVal(memRowDataBody(row), value, type, offset); + tdAppendDataColVal(memRowDataBody(row), value, isCopyVarData, type, offset); } else { - tdAppendKvColVal(memRowKvBody(row), value, colId, type, kvOffset); + tdAppendKvColVal(memRowKvBody(row), value, isCopyVarData, colId, type, offset); } return 0; } @@ -688,6 +720,30 @@ static FORCE_INLINE int32_t tdGetColAppendLen(uint8_t rowType, const void *value return len; } +/** + * 1. calculate the delta of AllNullLen for SDataRow. + * 2. calculate the real len for SKVRow. + */ +static FORCE_INLINE void tdGetColAppendDeltaLen(const void *value, int8_t colType, int32_t *dataLen, int32_t *kvLen) { + switch (colType) { + case TSDB_DATA_TYPE_BINARY: { + int32_t varLen = varDataLen(value); + *dataLen += (varLen - CHAR_BYTES); + *kvLen += (varLen + sizeof(SColIdx)); + break; + } + case TSDB_DATA_TYPE_NCHAR: { + int32_t varLen = varDataLen(value); + *dataLen += (varLen - TSDB_NCHAR_SIZE); + *kvLen += (varLen + sizeof(SColIdx)); + break; + } + default: { + *kvLen += (TYPE_BYTES[colType] + sizeof(SColIdx)); + break; + } + } +} typedef struct { int16_t colId; @@ -703,7 +759,7 @@ static FORCE_INLINE void setSColInfo(SColInfo* colInfo, int16_t colId, uint8_t c SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSchema1, STSchema *pSchema2); - +#if 0 // ----------------- Raw payload structure for row: /* |<------------ Head ------------->|<----------- body of column data tuple ------------------->| * | |<----------------- flen ------------->|<--- value part --->| @@ -749,6 +805,8 @@ SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSch static FORCE_INLINE char *payloadNextCol(char *pCol) { return (char *)POINTER_SHIFT(pCol, PAYLOAD_COL_HEAD_LEN); } +#endif + #ifdef __cplusplus } #endif diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 25d1c90ec5..62f369d987 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -41,6 +41,7 @@ extern char tsArbitrator[]; extern int8_t tsArbOnline; extern int64_t tsArbOnlineTimestamp; extern int32_t tsDnodeId; +extern int64_t tsDnodeStartTime; // common extern int tsRpcTimer; diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index c793d241f6..a3a6c0fed4 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -851,7 +851,8 @@ SMemRow mergeTwoMemRows(void *buffer, SMemRow row1, SMemRow row2, STSchema *pSch int16_t k; for (k = 0; k < nKvNCols; ++k) { SColInfo *pColInfo = taosArrayGet(stashRow, k); - tdAppendKvColVal(kvRow, pColInfo->colVal, pColInfo->colId, pColInfo->colType, &toffset); + tdAppendKvColVal(kvRow, pColInfo->colVal, true, pColInfo->colId, pColInfo->colType, toffset); + toffset += sizeof(SColIdx); } ASSERT(kvLen == memRowTLen(tRow)); } diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 3c904dc034..d1b816f122 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -46,6 +46,7 @@ int8_t tsArbOnline = 0; int64_t tsArbOnlineTimestamp = TSDB_ARB_DUMMY_TIME; char tsEmail[TSDB_FQDN_LEN] = {0}; int32_t tsDnodeId = 0; +int64_t tsDnodeStartTime = 0; // common int32_t tsRpcTimer = 300; @@ -991,7 +992,7 @@ static void doInitGlobalConfig(void) { cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW; cfg.minValue = 0; - cfg.maxValue = TSDB_MAX_ALLOWED_SQL_LEN; + cfg.maxValue = TSDB_MAX_FIELD_LEN; cfg.ptrLength = 0; cfg.unitType = TAOS_CFG_UTYPE_BYTE; taosInitConfigOption(cfg); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisonInNanoRestTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisonInNanoRestTest.java new file mode 100644 index 0000000000..2ae03b4e5c --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisonInNanoRestTest.java @@ -0,0 +1,570 @@ +package com.taosdata.jdbc.cases; + + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.After; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; +import java.text.Format; +import java.text.SimpleDateFormat; + +public class TimestampPrecisonInNanoRestTest { + + private static final String host = "127.0.0.1"; + private static final String ns_timestamp_db = "ns_precision_test"; + private static final long timestamp1 = System.currentTimeMillis(); + private static final long timestamp2 = timestamp1 * 1000_000 + 123455; + private static final long timestamp3 = (timestamp1 + 10) * 1000_000 + 123456; + private static final Format format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); + private static final String date1 = format.format(new Date(timestamp1)); + private static final String date4 = format.format(new Date(timestamp1 + 10L)); + private static final String date2 = date1 + "123455"; + private static final String date3 = date4 + "123456"; + + + private static Connection conn; + + @BeforeClass + public static void beforeClass() throws SQLException { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; + conn = DriverManager.getConnection(url, properties); + + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + ns_timestamp_db); + stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'"); + stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)"); + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)"); + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)"); + stmt.close(); + } + + @After + public void afterEach() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + ns_timestamp_db); + stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'"); + stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)"); + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)"); + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)"); + stmt.close(); + } + + @AfterClass + public static void afterClass() { + try { + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + private void checkCount(long count, ResultSet rs) throws SQLException { + if (count == 0) { + Assert.fail(); + } + rs.next(); + long test_count = rs.getLong(1); + Assert.assertEquals(count, test_count); + } + + private void checkTime(long ts, ResultSet rs) throws SQLException { + rs.next(); + int nanos = rs.getTimestamp(1).getNanos(); + Assert.assertEquals(ts % 1000_000_000l, nanos); + long test_ts = rs.getLong(1); + Assert.assertEquals(ts / 1000_000l, test_ts); + } + + @Test + public void canInsertTimestampAndQueryByEqualToInDateTypeInBothFirstAndSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + date3 + "'"); + checkTime(timestamp3, rs); + rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + date3 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canImportTimestampAndQueryByEqualToInDateTypeInBothFirstAndSecondCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date1 + "123123\", \"" + date1 + "123123\", 127)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'"); + checkTime(timestamp1 * 1000_000l + 123123l, rs); + rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "123123'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "123123'"); + checkTime(timestamp1 * 1000_000l + 123123l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canInsertTimestampAndQueryByEqualToInNumberTypeInBothFirstAndSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + timestamp2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + timestamp2 + "'"); + checkTime(timestamp2, rs); + rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp2 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canImportTimestampAndQueryByEqualToInNumberTypeInBothFirstAndSecondCol() { + try (Statement stmt = conn.createStatement()) { + long timestamp4 = timestamp1 * 1000_000 + 123123; + stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp4 + ", " + timestamp4 + ", 127)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'"); + checkTime(timestamp4, rs); + rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp4 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp4 + "'"); + checkTime(timestamp4, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canSelectLastRowFromWeatherForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select last(ts) from " + ns_timestamp_db + ".weather"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canSelectLastRowFromWeatherForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select last(ts2) from " + ns_timestamp_db + ".weather"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canSelectFirstRowFromWeatherForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select first(ts) from " + ns_timestamp_db + ".weather"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canSelectFirstRowFromWeatherForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select first(ts2) from " + ns_timestamp_db + ".weather"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanInDateTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts > '" + date2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts > '" + date2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 > '" + date2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 > '" + date2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanInNumberTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts > '" + timestamp2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts > '" + timestamp2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 > '" + timestamp2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 > '" + timestamp2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanOrEqualToInDateTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts >= '" + date2 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanOrEqualToInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 >= '" + date2 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanOrEqualToInNumberTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts >= '" + timestamp2 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanOrEqualToInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 >= '" + timestamp2 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanInDateTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts < '" + date3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts < '" + date3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 < '" + date3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 < '" + date3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanInNumberTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts < '" + timestamp3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts < '" + timestamp3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 < '" + timestamp3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 < '" + timestamp3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanOrEqualToInDateTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanOrEqualToInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanOrEqualToInNumberTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanOrEqualToInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryBetweenAndInDateTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "' AND ts > '" + date2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "' AND ts > '" + date2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryBetweenAndInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "' AND ts2 > '" + date2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "' AND ts2 > '" + date2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryBetweenAndInNumberTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "' AND ts > '" + timestamp2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "' AND ts > '" + timestamp2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryBetweenAndInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "' AND ts2 > '" + timestamp2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "' AND ts2 > '" + timestamp2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryNotEqualToInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <> '" + date3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <> '" + date3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryNotEqualToInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <> '" + timestamp3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <> '" + timestamp3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryNotEqualInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 != '" + date3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 != '" + date3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryNotEqualInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 != '" + timestamp3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 != '" + timestamp3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canInsertTimestampWithNowAndNsOffsetInBothFirstAndSecondCol(){ + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(now + 1000b, now - 1000b, 128)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather"); + checkCount(3l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canIntervalAndSlidingAcceptNsUnitForFirstCol(){ + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)"); + rs.next(); + long sum = rs.getLong(2); + Assert.assertEquals(127l, sum); + rs.next(); + sum = rs.getLong(2); + Assert.assertEquals(128l, sum); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canIntervalAndSlidingAcceptNsUnitForSecondCol(){ + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts2 >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)"); + rs.next(); + long sum = rs.getLong(2); + Assert.assertEquals(127l, sum); + rs.next(); + sum = rs.getLong(2); + Assert.assertEquals(128l, sum); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testDataOutOfRangeExceptionForFirstCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(123456789012345678, 1234567890123456789, 127)"); + } catch (SQLException e) { + Assert.assertEquals("TDengine ERROR (60b): Timestamp data out of range", e.getMessage()); + } + } + + @Test + public void testDataOutOfRangeExceptionForSecondCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(1234567890123456789, 123456789012345678, 127)"); + } catch (SQLException e) { + Assert.assertEquals("TDengine ERROR (60b): Timestamp data out of range", e.getMessage()); + } + } + + @Test + public void willAutomaticallyFillToNsUnitWithZerosForFirstCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "', '" + date1 + "', 127)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "000000'"); + checkCount(1l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void willAutomaticallyFillToNsUnitWithZerosForSecondCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "', '" + date1 + "', 127)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "000000'"); + checkCount(1l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void willAutomaticallyDropDigitExceedNsDigitNumberForFirstCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "999999999', '" + date1 + "999999999', 127)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "999999'"); + checkCount(1l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void willAutomaticallyDropDigitExceedNsDigitNumberForSecondCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "999999999', '" + date1 + "999999999', 127)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "999999'"); + checkCount(1l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/nodejs/nodetaos/taosobjects.js b/src/connector/nodejs/nodetaos/taosobjects.js index 0fc8dc8ef1..3bc0fe0aca 100644 --- a/src/connector/nodejs/nodetaos/taosobjects.js +++ b/src/connector/nodejs/nodetaos/taosobjects.js @@ -47,7 +47,8 @@ class TaosTimestamp extends Date { super(Math.floor(date / 1000)); this.precisionExtras = date % 1000; } else if (precision === 2) { - super(parseInt(date / 1000000)); + // use BigInt to fix: 1623254400999999999 / 1000000 = 1623254401000 which not expected + super(parseInt(BigInt(date) / 1000000n)); // use BigInt to fix: 1625801548423914405 % 1000000 = 914496 which not expected (914405) this.precisionExtras = parseInt(BigInt(date) % 1000000n); } else { diff --git a/src/connector/nodejs/package.json b/src/connector/nodejs/package.json index db37318a16..6a2c66100b 100644 --- a/src/connector/nodejs/package.json +++ b/src/connector/nodejs/package.json @@ -1,6 +1,6 @@ { "name": "td2.0-connector", - "version": "2.0.9", + "version": "2.0.10", "description": "A Node.js connector for TDengine.", "main": "tdengine.js", "directories": { diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index eac04fe7bb..abbc99ac02 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -195,6 +195,7 @@ int32_t dnodeInitSystem() { dnodeSetRunStatus(TSDB_RUN_STATUS_RUNING); moduleStart(); + tsDnodeStartTime = taosGetTimestampMs(); dnodeReportStep("TDengine", "initialized successfully", 1); dInfo("TDengine is initialized successfully"); diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c index ee37ffdcbb..2f77788025 100644 --- a/src/dnode/src/dnodeSystem.c +++ b/src/dnode/src/dnodeSystem.c @@ -42,6 +42,8 @@ int32_t main(int32_t argc, char *argv[]) { } } else if (strcmp(argv[i], "-C") == 0) { dump_config = 1; + } else if (strcmp(argv[i], "--force-keep-file") == 0) { + tsdbForceKeepFile = true; } else if (strcmp(argv[i], "--compact-mnode-wal") == 0) { tsCompactMnodeWal = 1; } else if (strcmp(argv[i], "-V") == 0) { diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index a74a531361..1767b25402 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -471,6 +471,7 @@ typedef struct { bool stableQuery; // super table query or not bool topBotQuery; // TODO used bitwise flag + bool interpQuery; // interp query or not bool groupbyColumn; // denote if this is a groupby normal column query bool hasTagResults; // if there are tag values in final result or not bool timeWindowInterpo;// if the time window start/end required interpolation diff --git a/src/inc/tfs.h b/src/inc/tfs.h index e72620eca6..11e33a3af7 100644 --- a/src/inc/tfs.h +++ b/src/inc/tfs.h @@ -41,9 +41,16 @@ typedef struct { int64_t avail; } SFSMeta; +typedef struct { + int64_t size; + int64_t used; + int64_t free; + int16_t nAvailDisks; // # of Available disks +} STierMeta; + int tfsInit(SDiskCfg *pDiskCfg, int ndisk); void tfsDestroy(); -void tfsUpdateInfo(SFSMeta *pFSMeta); +void tfsUpdateInfo(SFSMeta *pFSMeta, STierMeta *tierMetas, int8_t numLevels); void tfsGetMeta(SFSMeta *pMeta); void tfsAllocDisk(int expLevel, int *level, int *id); diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 3838344a8b..e0ca76a922 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -75,6 +75,7 @@ extern char configDir[]; #define BUFFER_SIZE TSDB_MAX_ALLOWED_SQL_LEN #define COND_BUF_LEN (BUFFER_SIZE - 30) #define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS) + #define MAX_USERNAME_SIZE 64 #define MAX_PASSWORD_SIZE 16 #define MAX_HOSTNAME_SIZE 253 // https://man7.org/linux/man-pages/man7/hostname.7.html @@ -245,7 +246,6 @@ typedef struct SArguments_S { uint32_t disorderRatio; // 0: no disorder, >0: x% int disorderRange; // ms, us or ns. accordig to database precision uint32_t method_of_delete; - char ** arg_list; uint64_t totalInsertRows; uint64_t totalAffectedRows; bool demo_mode; // use default column name and semi-random data @@ -637,7 +637,6 @@ SArguments g_args = { 0, // disorderRatio 1000, // disorderRange 1, // method_of_delete - NULL, // arg_list 0, // totalInsertRows; 0, // totalAffectedRows; true, // demo_mode; @@ -1009,6 +1008,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { exit(EXIT_FAILURE); } arguments->datatype[0] = argv[i]; + arguments->datatype[1] = NULL; } else { // more than one col int index = 0; @@ -1413,6 +1413,7 @@ static char *rand_float_str() return g_randfloat_buff + (cursor * FLOAT_BUFF_LEN); } + static float rand_float() { static int cursor; @@ -6407,6 +6408,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { bool flagSleep = true; uint64_t sleepTimeTotal = 0; + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { if ((flagSleep) && (insert_interval)) { st = taosGetTimestampMs(); @@ -6583,6 +6587,11 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->totalAffectedRows += affectedRows; + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } int64_t currentPrintTime = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", @@ -6604,6 +6613,8 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { } } } + if (percentComplete < 100) + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); free_of_interlace: tmfree(pThreadInfo->buffer); @@ -6641,6 +6652,9 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->samplePos = 0; + int percentComplete = 0; + int64_t totalRows = insertRows * pThreadInfo->ntables; + for (uint64_t tableSeq = pThreadInfo->start_table_from; tableSeq <= pThreadInfo->end_table_to; tableSeq ++) { @@ -6746,6 +6760,11 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { pThreadInfo->totalAffectedRows += affectedRows; + int currentPercent = pThreadInfo->totalAffectedRows * 100 / totalRows; + if (currentPercent > percentComplete ) { + printf("[%d]:%d%%\n", pThreadInfo->threadID, currentPercent); + percentComplete = currentPercent; + } int64_t currentPrintTime = taosGetTimestampMs(); if (currentPrintTime - lastPrintTime > 30*1000) { printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", @@ -6768,6 +6787,8 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { __func__, __LINE__, pThreadInfo->samplePos); } } // tableSeq + if (percentComplete < 100) + printf("[%d]:%d%%\n", pThreadInfo->threadID, percentComplete); free_of_progressive: tmfree(pThreadInfo->buffer); diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index 5fe22826b7..570f5c344b 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -253,11 +253,15 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) { int32_t connId = htonl(pHBMsg->connId); SConnObj *pConn = mnodeAccquireConn(connId, connInfo.user, connInfo.clientIp, connInfo.clientPort); + if (pConn == NULL) { + pHBMsg->pid = htonl(pHBMsg->pid); + pConn = mnodeCreateConn(connInfo.user, connInfo.clientIp, connInfo.clientPort, pHBMsg->pid, pHBMsg->appName); + } if (pConn == NULL) { // do not close existing links, otherwise // mError("failed to create connId, close connect"); - // pRsp->killConnection = 1; + // pRsp->killConnection = 1; } else { pRsp->connId = htonl(pConn->connId); mnodeSaveQueryStreamList(pConn, pHBMsg); diff --git a/src/mnode/src/mnodeWrite.c b/src/mnode/src/mnodeWrite.c index c0699b05b3..9a993dfaaf 100644 --- a/src/mnode/src/mnodeWrite.c +++ b/src/mnode/src/mnodeWrite.c @@ -65,7 +65,14 @@ int32_t mnodeProcessWrite(SMnodeMsg *pMsg) { return TSDB_CODE_MND_MSG_NOT_PROCESSED; } - int32_t code = mnodeInitMsg(pMsg); + int32_t code = grantCheck(TSDB_GRANT_TIME); + if (code != TSDB_CODE_SUCCESS) { + mError("msg:%p, app:%p type:%s not processed, reason:%s", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType], + tstrerror(code)); + return code; + } + + code = mnodeInitMsg(pMsg); if (code != TSDB_CODE_SUCCESS) { mError("msg:%p, app:%p type:%s not processed, reason:%s", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType], tstrerror(code)); diff --git a/src/os/inc/osSysinfo.h b/src/os/inc/osSysinfo.h index 5f0bc2950c..0320ab0f7f 100644 --- a/src/os/inc/osSysinfo.h +++ b/src/os/inc/osSysinfo.h @@ -28,8 +28,11 @@ typedef struct { int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize); +int32_t taosGetCpuCores(); void taosGetSystemInfo(); +bool taosReadProcIO(int64_t* rchars, int64_t* wchars); bool taosGetProcIO(float *readKB, float *writeKB); +bool taosGetCardInfo(int64_t *bytes, int64_t *rbytes, int64_t *tbytes); bool taosGetBandSpeed(float *bandSpeedKb); void taosGetDisk(); bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) ; diff --git a/src/os/src/darwin/dwSysInfo.c b/src/os/src/darwin/dwSysInfo.c index 10e0acc130..54c6fb1d32 100644 --- a/src/os/src/darwin/dwSysInfo.c +++ b/src/os/src/darwin/dwSysInfo.c @@ -164,6 +164,10 @@ void taosKillSystem() { exit(0); } +int32_t taosGetCpuCores() { + return sysconf(_SC_NPROCESSORS_ONLN); +} + void taosGetSystemInfo() { // taosGetProcInfos(); @@ -185,12 +189,25 @@ void taosGetSystemInfo() { taosGetSystemLocale(); } +bool taosReadProcIO(int64_t *rchars, int64_t *wchars) { + if (rchars) *rchars = 0; + if (wchars) *wchars = 0; + return true; +} + bool taosGetProcIO(float *readKB, float *writeKB) { *readKB = 0; *writeKB = 0; return true; } +bool taosGetCardInfo(int64_t *bytes, int64_t *rbytes, int64_t *tbytes) { + if (bytes) *bytes = 0; + if (rbytes) *rbytes = 0; + if (tbytes) *tbytes = 0; + return true; +} + bool taosGetBandSpeed(float *bandSpeedKb) { *bandSpeedKb = 0; return true; diff --git a/src/os/src/detail/osSysinfo.c b/src/os/src/detail/osSysinfo.c index 04b1efe7bf..8094358853 100644 --- a/src/os/src/detail/osSysinfo.c +++ b/src/os/src/detail/osSysinfo.c @@ -277,7 +277,7 @@ static void taosGetSystemLocale() { // get and set default locale } } -static int32_t taosGetCpuCores() { return (int32_t)sysconf(_SC_NPROCESSORS_ONLN); } +int32_t taosGetCpuCores() { return (int32_t)sysconf(_SC_NPROCESSORS_ONLN); } bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) { static uint64_t lastSysUsed = 0; @@ -332,7 +332,7 @@ int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize) { } } -static bool taosGetCardInfo(int64_t *bytes) { +bool taosGetCardInfo(int64_t *bytes, int64_t *rbytes, int64_t *tbytes) { *bytes = 0; FILE *fp = fopen(tsSysNetFile, "r"); if (fp == NULL) { @@ -347,9 +347,9 @@ static bool taosGetCardInfo(int64_t *bytes) { while (!feof(fp)) { memset(line, 0, len); - int64_t rbytes = 0; + int64_t o_rbytes = 0; int64_t rpackts = 0; - int64_t tbytes = 0; + int64_t o_tbytes = 0; int64_t tpackets = 0; int64_t nouse1 = 0; int64_t nouse2 = 0; @@ -374,8 +374,10 @@ static bool taosGetCardInfo(int64_t *bytes) { sscanf(line, "%s %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64, - nouse0, &rbytes, &rpackts, &nouse1, &nouse2, &nouse3, &nouse4, &nouse5, &nouse6, &tbytes, &tpackets); - *bytes += (rbytes + tbytes); + nouse0, &o_rbytes, &rpackts, &nouse1, &nouse2, &nouse3, &nouse4, &nouse5, &nouse6, &o_tbytes, &tpackets); + if (rbytes) *rbytes = o_rbytes; + if (tbytes) *tbytes = o_tbytes; + *bytes += (o_rbytes + o_tbytes); } tfree(line); @@ -390,7 +392,7 @@ bool taosGetBandSpeed(float *bandSpeedKb) { int64_t curBytes = 0; time_t curTime = time(NULL); - if (!taosGetCardInfo(&curBytes)) { + if (!taosGetCardInfo(&curBytes, NULL, NULL)) { return false; } @@ -420,7 +422,7 @@ bool taosGetBandSpeed(float *bandSpeedKb) { return true; } -static bool taosReadProcIO(int64_t *readbyte, int64_t *writebyte) { +bool taosReadProcIO(int64_t *rchars, int64_t *wchars) { FILE *fp = fopen(tsProcIOFile, "r"); if (fp == NULL) { uError("open file:%s failed", tsProcIOFile); @@ -441,10 +443,10 @@ static bool taosReadProcIO(int64_t *readbyte, int64_t *writebyte) { break; } if (strstr(line, "rchar:") != NULL) { - sscanf(line, "%s %" PRId64, tmp, readbyte); + sscanf(line, "%s %" PRId64, tmp, rchars); readIndex++; } else if (strstr(line, "wchar:") != NULL) { - sscanf(line, "%s %" PRId64, tmp, writebyte); + sscanf(line, "%s %" PRId64, tmp, wchars); readIndex++; } else { } diff --git a/src/os/src/windows/wSysinfo.c b/src/os/src/windows/wSysinfo.c index 72793a1049..89101ee148 100644 --- a/src/os/src/windows/wSysinfo.c +++ b/src/os/src/windows/wSysinfo.c @@ -115,7 +115,7 @@ static void taosGetSystemLocale() { } } -static int32_t taosGetCpuCores() { +int32_t taosGetCpuCores() { SYSTEM_INFO info; GetSystemInfo(&info); return (int32_t)info.dwNumberOfProcessors; @@ -146,6 +146,13 @@ int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize) { } } +bool taosGetCardInfo(int64_t *bytes, int64_t *rbytes, int64_t *tbytes) { + if (bytes) *bytes = 0; + if (rbytes) *rbytes = 0; + if (tbytes) *tbytes = 0; + return true; +} + bool taosGetBandSpeed(float *bandSpeedKb) { *bandSpeedKb = 0; return true; diff --git a/src/plugins/http/inc/httpMetricsHandle.h b/src/plugins/http/inc/httpMetricsHandle.h new file mode 100644 index 0000000000..e05a8ce687 --- /dev/null +++ b/src/plugins/http/inc/httpMetricsHandle.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#ifndef TDENGINE_HTTPMETRICSHANDLE_H +#define TDENGINE_HTTPMETRICSHANDLE_H + +#include "http.h" +#include "httpInt.h" +#include "httpUtil.h" +#include "httpResp.h" + +void metricsInitHandle(HttpServer* httpServer); + +bool metricsProcessRequest(struct HttpContext* httpContext); + +#endif // TDENGINE_HTTPMETRICHANDLE_H diff --git a/src/plugins/http/src/httpMetricsHandle.c b/src/plugins/http/src/httpMetricsHandle.c new file mode 100644 index 0000000000..dbabd48774 --- /dev/null +++ b/src/plugins/http/src/httpMetricsHandle.c @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +#define _DEFAULT_SOURCE +#include "os.h" +#include "taoserror.h" +#include "tfs.h" + +#include "httpMetricsHandle.h" +#include "dnode.h" +#include "httpLog.h" + +static HttpDecodeMethod metricsDecodeMethod = {"metrics", metricsProcessRequest}; + +void metricsInitHandle(HttpServer* pServer) { + httpAddMethod(pServer, &metricsDecodeMethod); +} + +bool metricsProcessRequest(HttpContext* pContext) { + httpDebug("context:%p, fd:%d, user:%s, process admin grant msg", pContext, pContext->fd, pContext->user); + + JsonBuf* jsonBuf = httpMallocJsonBuf(pContext); + if (jsonBuf == NULL) { + httpError("failed to allocate memory for metrics"); + httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_ENOUGH_MEMORY); + return false; + } + + httpInitJsonBuf(jsonBuf, pContext); + httpWriteJsonBufHead(jsonBuf); + + httpJsonToken(jsonBuf, JsonObjStt); + { + char* keyDisks = "tags"; + httpJsonPairHead(jsonBuf, keyDisks, (int32_t)strlen(keyDisks)); + httpJsonToken(jsonBuf, JsonArrStt); + { + httpJsonItemToken(jsonBuf); + httpJsonToken(jsonBuf, JsonObjStt); + char* keyTagName = "name"; + char* keyTagValue = "value"; + httpJsonPairOriginString(jsonBuf, keyTagName, (int32_t)strlen(keyTagName), "\"dnode_id\"", + (int32_t)strlen("\"dnode_id\"")); + int32_t dnodeId = dnodeGetDnodeId(); + httpJsonPairIntVal(jsonBuf, keyTagValue, (int32_t)strlen(keyTagValue), dnodeId); + httpJsonToken(jsonBuf, JsonObjEnd); + } + httpJsonToken(jsonBuf, JsonArrEnd); + } + + { + if (tsDnodeStartTime != 0) { + int64_t now = taosGetTimestampMs(); + int64_t upTime = now-tsDnodeStartTime; + char* keyUpTime = "up_time"; + httpJsonPairInt64Val(jsonBuf, keyUpTime, (int32_t)strlen(keyUpTime), upTime); + } + } + + { + int32_t cpuCores = taosGetCpuCores(); + char* keyCpuCores = "cpu_cores"; + httpJsonPairIntVal(jsonBuf, keyCpuCores, (int32_t)strlen(keyCpuCores), cpuCores); + + float sysCpuUsage = 0; + float procCpuUsage = 0; + bool succeeded = taosGetCpuUsage(&sysCpuUsage, &procCpuUsage); + if (!succeeded) { + httpError("failed to get cpu usage"); + } else { + if (sysCpuUsage <= procCpuUsage) { + sysCpuUsage = procCpuUsage + 0.1f; + } + char* keyCpuSystem = "cpu_system"; + char* keyCpuEngine = "cpu_engine"; + httpJsonPairFloatVal(jsonBuf, keyCpuSystem, (int32_t)strlen(keyCpuSystem), sysCpuUsage); + httpJsonPairFloatVal(jsonBuf, keyCpuEngine, (int32_t)strlen(keyCpuEngine), procCpuUsage); + } + } + + { + float sysMemoryUsedMB = 0; + bool succeeded = taosGetSysMemory(&sysMemoryUsedMB); + if (!succeeded) { + httpError("failed to get sys memory info"); + } else { + char* keyMemSystem = "mem_system"; + httpJsonPairFloatVal(jsonBuf, keyMemSystem, (int32_t)strlen(keyMemSystem), sysMemoryUsedMB); + } + + float procMemoryUsedMB = 0; + succeeded = taosGetProcMemory(&procMemoryUsedMB); + if (!succeeded) { + httpError("failed to get proc memory info"); + } else { + char* keyMemEngine = "mem_engine"; + httpJsonPairFloatVal(jsonBuf, keyMemEngine, (int32_t)strlen(keyMemEngine), procMemoryUsedMB); + } + } + + { + int64_t bytes = 0, rbytes = 0, tbytes = 0; + bool succeeded = taosGetCardInfo(&bytes, &rbytes, &tbytes); + if (!succeeded) { + httpError("failed to get network info"); + } else { + char* keyNetIn = "net_in"; + char* keyNetOut = "net_out"; + httpJsonPairInt64Val(jsonBuf, keyNetIn, (int32_t)strlen(keyNetIn), rbytes); + httpJsonPairInt64Val(jsonBuf, keyNetOut, (int32_t)strlen(keyNetOut), tbytes); + } + } + + { + int64_t rchars = 0; + int64_t wchars = 0; + bool succeeded = taosReadProcIO(&rchars, &wchars); + if (!succeeded) { + httpError("failed to get io info"); + } else { + char* keyIORead = "io_read"; + char* keyIOWrite = "io_write"; + httpJsonPairInt64Val(jsonBuf, keyIORead, (int32_t)strlen(keyIORead), rchars); + httpJsonPairInt64Val(jsonBuf, keyIOWrite, (int32_t)strlen(keyIOWrite), wchars); + } + } + + { + const int8_t numTiers = 3; + SFSMeta fsMeta; + STierMeta* tierMetas = calloc(numTiers, sizeof(STierMeta)); + tfsUpdateInfo(&fsMeta, tierMetas, numTiers); + { + char* keyDiskUsed = "disk_used"; + char* keyDiskTotal = "disk_total"; + httpJsonPairInt64Val(jsonBuf, keyDiskTotal, (int32_t)strlen(keyDiskTotal), fsMeta.tsize); + httpJsonPairInt64Val(jsonBuf, keyDiskUsed, (int32_t)strlen(keyDiskUsed), fsMeta.used); + char* keyDisks = "disks"; + httpJsonPairHead(jsonBuf, keyDisks, (int32_t)strlen(keyDisks)); + httpJsonToken(jsonBuf, JsonArrStt); + for (int i = 0; i < numTiers; ++i) { + httpJsonItemToken(jsonBuf); + httpJsonToken(jsonBuf, JsonObjStt); + char* keyDataDirLevelUsed = "datadir_used"; + char* keyDataDirLevelTotal = "datadir_total"; + httpJsonPairInt64Val(jsonBuf, keyDataDirLevelUsed, (int32_t)strlen(keyDataDirLevelUsed), tierMetas[i].used); + httpJsonPairInt64Val(jsonBuf, keyDataDirLevelTotal, (int32_t)strlen(keyDataDirLevelTotal), tierMetas[i].size); + httpJsonToken(jsonBuf, JsonObjEnd); + } + httpJsonToken(jsonBuf, JsonArrEnd); + } + free(tierMetas); + } + + { + SStatisInfo info = dnodeGetStatisInfo(); + { + char* keyReqHttp = "req_http"; + char* keyReqSelect = "req_select"; + char* keyReqInsert = "req_insert"; + httpJsonPairInt64Val(jsonBuf, keyReqHttp, (int32_t)strlen(keyReqHttp), info.httpReqNum); + httpJsonPairInt64Val(jsonBuf, keyReqSelect, (int32_t)strlen(keyReqSelect), info.queryReqNum); + httpJsonPairInt64Val(jsonBuf, keyReqInsert, (int32_t)strlen(keyReqInsert), info.submitReqNum); + } + } + + httpJsonToken(jsonBuf, JsonObjEnd); + + httpWriteJsonBufEnd(jsonBuf); + pContext->reqType = HTTP_REQTYPE_OTHERS; + httpFreeJsonBuf(pContext); + return false; +} \ No newline at end of file diff --git a/src/plugins/http/src/httpParser.c b/src/plugins/http/src/httpParser.c index ba88a2b9cd..02f21037b8 100644 --- a/src/plugins/http/src/httpParser.c +++ b/src/plugins/http/src/httpParser.c @@ -101,13 +101,17 @@ char *httpGetStatusDesc(int32_t statusCode) { } static void httpCleanupString(HttpString *str) { - free(str->str); - str->str = NULL; - str->pos = 0; - str->size = 0; + if (str->str) { + free(str->str); + str->str = NULL; + str->pos = 0; + str->size = 0; + } } static int32_t httpAppendString(HttpString *str, const char *s, int32_t len) { + char *new_str = NULL; + if (str->size == 0) { str->pos = 0; str->size = len + 1; @@ -115,7 +119,16 @@ static int32_t httpAppendString(HttpString *str, const char *s, int32_t len) { } else if (str->pos + len + 1 >= str->size) { str->size += len; str->size *= 4; - str->str = realloc(str->str, str->size); + + new_str = realloc(str->str, str->size); + if (new_str == NULL && str->str) { + // if str->str was not NULL originally, + // the old allocated memory was left unchanged, + // see man 3 realloc + free(str->str); + } + + str->str = new_str; } else { } @@ -317,7 +330,7 @@ static int32_t httpOnParseHeaderField(HttpParser *parser, const char *key, const static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) { HttpContext *pContext = parser->pContext; - HttpString * buf = &parser->body; + HttpString *buf = &parser->body; if (parser->parseCode != TSDB_CODE_SUCCESS) return -1; if (buf->size <= 0) { @@ -326,6 +339,7 @@ static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) { } int32_t newSize = buf->pos + len + 1; + char *newStr = NULL; if (newSize >= buf->size) { if (buf->size >= HTTP_BUFFER_SIZE) { httpError("context:%p, fd:%d, failed parse body, exceeding buffer size %d", pContext, pContext->fd, buf->size); @@ -336,7 +350,12 @@ static int32_t httpOnBody(HttpParser *parser, const char *chunk, int32_t len) { newSize = MAX(newSize, HTTP_BUFFER_INIT); newSize *= 4; newSize = MIN(newSize, HTTP_BUFFER_SIZE); - buf->str = realloc(buf->str, newSize); + newStr = realloc(buf->str, newSize); + if (newStr == NULL && buf->str) { + free(buf->str); + } + + buf->str = newStr; buf->size = newSize; if (buf->str == NULL) { @@ -374,13 +393,20 @@ static HTTP_PARSER_STATE httpTopStack(HttpParser *parser) { static int32_t httpPushStack(HttpParser *parser, HTTP_PARSER_STATE state) { HttpStack *stack = &parser->stacks; + int8_t *newStacks = NULL; if (stack->size == 0) { stack->pos = 0; stack->size = 32; stack->stacks = malloc(stack->size * sizeof(int8_t)); } else if (stack->pos + 1 > stack->size) { stack->size *= 2; - stack->stacks = realloc(stack->stacks, stack->size * sizeof(int8_t)); + + newStacks = realloc(stack->stacks, stack->size * sizeof(int8_t)); + if (newStacks == NULL && stack->stacks) { + free(stack->stacks); + } + + stack->stacks = newStacks; } else { } diff --git a/src/plugins/http/src/httpSystem.c b/src/plugins/http/src/httpSystem.c index 203db21895..085863f4e4 100644 --- a/src/plugins/http/src/httpSystem.c +++ b/src/plugins/http/src/httpSystem.c @@ -30,6 +30,7 @@ #include "httpGcHandle.h" #include "httpRestHandle.h" #include "httpTgHandle.h" +#include "httpMetricsHandle.h" #ifndef _ADMIN void adminInitHandle(HttpServer* pServer) {} @@ -52,7 +53,7 @@ int32_t httpInitSystem() { gcInitHandle(&tsHttpServer); tgInitHandle(&tsHttpServer); opInitHandle(&tsHttpServer); - + metricsInitHandle(&tsHttpServer); return 0; } diff --git a/src/plugins/http/src/httpUtil.c b/src/plugins/http/src/httpUtil.c index a8031d3fd8..ade50bdad6 100644 --- a/src/plugins/http/src/httpUtil.c +++ b/src/plugins/http/src/httpUtil.c @@ -188,13 +188,17 @@ bool httpMallocMultiCmds(HttpContext *pContext, int32_t cmdSize, int32_t bufferS bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize) { HttpSqlCmds *multiCmds = pContext->multiCmds; - if (cmdSize > HTTP_MAX_CMD_SIZE) { + if (cmdSize <= 0 || cmdSize > HTTP_MAX_CMD_SIZE) { httpError("context:%p, fd:%d, user:%s, mulitcmd size:%d large then %d", pContext, pContext->fd, pContext->user, cmdSize, HTTP_MAX_CMD_SIZE); return false; } - multiCmds->cmds = (HttpSqlCmd *)realloc(multiCmds->cmds, (size_t)cmdSize * sizeof(HttpSqlCmd)); + HttpSqlCmd *new_cmds = (HttpSqlCmd *)realloc(multiCmds->cmds, (size_t)cmdSize * sizeof(HttpSqlCmd)); + if (new_cmds == NULL && multiCmds->cmds) { + free(multiCmds->cmds); + } + multiCmds->cmds = new_cmds; if (multiCmds->cmds == NULL) { httpError("context:%p, fd:%d, user:%s, malloc cmds:%d error", pContext, pContext->fd, pContext->user, cmdSize); return false; @@ -208,13 +212,17 @@ bool httpReMallocMultiCmdsSize(HttpContext *pContext, int32_t cmdSize) { bool httpReMallocMultiCmdsBuffer(HttpContext *pContext, int32_t bufferSize) { HttpSqlCmds *multiCmds = pContext->multiCmds; - if (bufferSize > HTTP_MAX_BUFFER_SIZE) { + if (bufferSize <= 0 || bufferSize > HTTP_MAX_BUFFER_SIZE) { httpError("context:%p, fd:%d, user:%s, mulitcmd buffer size:%d large then %d", pContext, pContext->fd, pContext->user, bufferSize, HTTP_MAX_BUFFER_SIZE); return false; } - multiCmds->buffer = (char *)realloc(multiCmds->buffer, (size_t)bufferSize); + char *new_buffer = (char *)realloc(multiCmds->buffer, (size_t)bufferSize); + if (new_buffer == NULL && multiCmds->buffer) { + free(multiCmds->buffer); + } + multiCmds->buffer = new_buffer; if (multiCmds->buffer == NULL) { httpError("context:%p, fd:%d, user:%s, malloc buffer:%d error", pContext, pContext->fd, pContext->user, bufferSize); return false; diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 996d925756..56fab57e26 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -333,6 +333,8 @@ enum OPERATOR_TYPE_E { OP_Distinct = 20, OP_Join = 21, OP_StateWindow = 22, + OP_AllTimeWindow = 23, + OP_AllMultiTableTimeInterval = 24, }; typedef struct SOperatorInfo { @@ -554,11 +556,13 @@ SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOpera SOperatorInfo* createProjectOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream); SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); +SOperatorInfo* createAllTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); +SOperatorInfo* createAllMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput); SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv); diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index d2802d9fe0..ce607f0fe2 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -39,7 +39,6 @@ #define GET_QID(_r) (((SQInfo*)((_r)->qinfo))->qId) #define curTimeWindowIndex(_winres) ((_winres)->curIndex) -#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!(sq)))? (_q)->pExpr1[1].base.param[0].i64:1) int32_t getOutputInterResultBufSize(SQueryAttr* pQueryAttr); @@ -60,6 +59,7 @@ SResultRowCellInfo* getResultCell(const SResultRow* pRow, int32_t index, int32_t void* destroyQueryFuncExpr(SExprInfo* pExprInfo, int32_t numOfExpr); void* freeColumnInfo(SColumnInfo* pColumnInfo, int32_t numOfCols); +int32_t getRowNumForMultioutput(SQueryAttr* pQueryAttr, bool topBottomQuery, bool stable); static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pResultRowInfo, int32_t slot) { assert(pResultRowInfo != NULL && slot >= 0 && slot < pResultRowInfo->size); @@ -70,7 +70,7 @@ static FORCE_INLINE char* getPosInResultPage(SQueryAttr* pQueryAttr, tFilePage* int32_t offset) { assert(rowOffset >= 0 && pQueryAttr != NULL); - int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery); + int32_t numOfRows = (int32_t)getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery); return ((char *)page->data) + rowOffset + offset * numOfRows; } diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index dad05df22a..c19628eb37 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -3708,27 +3708,59 @@ static void interp_function_impl(SQLFunctionCtx *pCtx) { } } else { // no data generated yet - if (pCtx->size == 1) { + if (pCtx->size < 1) { return; } // check the timestamp in input buffer TSKEY skey = GET_TS_DATA(pCtx, 0); - TSKEY ekey = GET_TS_DATA(pCtx, 1); - - // no data generated yet - if (!(skey < pCtx->startTs && ekey > pCtx->startTs)) { - return; - } - - assert(pCtx->start.key == INT64_MIN && skey < pCtx->startTs && ekey > pCtx->startTs); if (type == TSDB_FILL_PREV) { + if (skey > pCtx->startTs) { + return; + } + + if (pCtx->size > 1) { + TSKEY ekey = GET_TS_DATA(pCtx, 1); + if (ekey > skey && ekey <= pCtx->startTs) { + skey = ekey; + } + } assignVal(pCtx->pOutput, pCtx->pInput, pCtx->outputBytes, pCtx->inputType); } else if (type == TSDB_FILL_NEXT) { - char* val = ((char*)pCtx->pInput) + pCtx->inputBytes; + TSKEY ekey = skey; + char* val = NULL; + + if (ekey < pCtx->startTs) { + if (pCtx->size > 1) { + ekey = GET_TS_DATA(pCtx, 1); + if (ekey < pCtx->startTs) { + return; + } + + val = ((char*)pCtx->pInput) + pCtx->inputBytes; + } else { + return; + } + } else { + val = (char*)pCtx->pInput; + } + assignVal(pCtx->pOutput, val, pCtx->outputBytes, pCtx->inputType); } else if (type == TSDB_FILL_LINEAR) { + if (pCtx->size <= 1) { + return; + } + + TSKEY ekey = GET_TS_DATA(pCtx, 1); + + // no data generated yet + if (!(skey < pCtx->startTs && ekey > pCtx->startTs)) { + return; + } + + assert(pCtx->start.key == INT64_MIN && skey < pCtx->startTs && ekey > pCtx->startTs); + char *start = GET_INPUT_DATA(pCtx, 0); char *end = GET_INPUT_DATA(pCtx, 1); @@ -4030,12 +4062,15 @@ static void mergeTableBlockDist(SResultRowCellInfo* pResInfo, const STableBlockD pDist->maxRows = pSrc->maxRows; pDist->minRows = pSrc->minRows; - int32_t numSteps = tsMaxRowsInFileBlock/TSDB_BLOCK_DIST_STEP_ROWS; - pDist->dataBlockInfos = taosArrayInit(numSteps, sizeof(SFileBlockInfo)); - taosArraySetSize(pDist->dataBlockInfos, numSteps); + int32_t maxSteps = TSDB_MAX_MAX_ROW_FBLOCK/TSDB_BLOCK_DIST_STEP_ROWS; + if (TSDB_MAX_MAX_ROW_FBLOCK % TSDB_BLOCK_DIST_STEP_ROWS != 0) { + ++maxSteps; + } + pDist->dataBlockInfos = taosArrayInit(maxSteps, sizeof(SFileBlockInfo)); + taosArraySetSize(pDist->dataBlockInfos, maxSteps); } - size_t steps = taosArrayGetSize(pDist->dataBlockInfos); + size_t steps = taosArrayGetSize(pSrc->dataBlockInfos); for (int32_t i = 0; i < steps; ++i) { int32_t srcNumBlocks = ((SFileBlockInfo*)taosArrayGet(pSrc->dataBlockInfos, i))->numBlocksOfStep; SFileBlockInfo* blockInfo = (SFileBlockInfo*)taosArrayGet(pDist->dataBlockInfos, i); @@ -4047,9 +4082,9 @@ void block_func_merge(SQLFunctionCtx* pCtx) { STableBlockDist info = {0}; int32_t len = *(int32_t*) pCtx->pInput; blockDistInfoFromBinary(((char*)pCtx->pInput) + sizeof(int32_t), len, &info); - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); mergeTableBlockDist(pResInfo, &info); + taosArrayDestroy(info.dataBlockInfos); pResInfo->numOfRes = 1; pResInfo->hasResult = DATA_SET_FLAG; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 22e4f87ef9..9000bcdf77 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -448,6 +448,44 @@ static void prepareResultListBuffer(SResultRowInfo* pResultRowInfo, SQueryRuntim pResultRowInfo->capacity = (int32_t)newCapacity; } +static bool chkResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, char *pData, + int16_t bytes, bool masterscan, uint64_t uid) { + bool existed = false; + SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, uid); + + SResultRow **p1 = + (SResultRow **)taosHashGet(pRuntimeEnv->pResultRowHashTable, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); + + // in case of repeat scan/reverse scan, no new time window added. + if (QUERY_IS_INTERVAL_QUERY(pRuntimeEnv->pQueryAttr)) { + if (!masterscan) { // the *p1 may be NULL in case of sliding+offset exists. + return p1 != NULL; + } + + if (p1 != NULL) { + if (pResultRowInfo->size == 0) { + existed = false; + assert(pResultRowInfo->curPos == -1); + } else if (pResultRowInfo->size == 1) { + existed = (pResultRowInfo->pResult[0] == (*p1)); + } else { // check if current pResultRowInfo contains the existed pResultRow + SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, uid, pResultRowInfo); + int64_t* index = taosHashGet(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes)); + if (index != NULL) { + existed = true; + } else { + existed = false; + } + } + } + + return existed; + } + + return p1 != NULL; +} + + static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, int64_t tid, char* pData, int16_t bytes, bool masterscan, uint64_t tableGroupId) { bool existed = false; @@ -592,6 +630,35 @@ static STimeWindow getActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t return w; } +// get the correct time window according to the handled timestamp +static STimeWindow getCurrentActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t ts, SQueryAttr *pQueryAttr) { + STimeWindow w = {0}; + + if (pResultRowInfo->curPos == -1) { // the first window, from the previous stored value + getInitialStartTimeWindow(pQueryAttr, ts, &w); + + if (pQueryAttr->interval.intervalUnit == 'n' || pQueryAttr->interval.intervalUnit == 'y') { + w.ekey = taosTimeAdd(w.skey, pQueryAttr->interval.interval, pQueryAttr->interval.intervalUnit, pQueryAttr->precision) - 1; + } else { + w.ekey = w.skey + pQueryAttr->interval.interval - 1; + } + } else { + w = getResultRow(pResultRowInfo, pResultRowInfo->curPos)->win; + } + + /* + * query border check, skey should not be bounded by the query time range, since the value skey will + * be used as the time window index value. So we only change ekey of time window accordingly. + */ + if (w.ekey > pQueryAttr->window.ekey && QUERY_IS_ASC_QUERY(pQueryAttr)) { + w.ekey = pQueryAttr->window.ekey; + } + + return w; +} + + + // a new buffer page for each table. Needs to opt this design static int32_t addNewWindowResultBuf(SResultRow *pWindowRes, SDiskbasedResultBuf *pResultBuf, int32_t tid, uint32_t size) { if (pWindowRes->pageId != -1) { @@ -637,6 +704,14 @@ static int32_t addNewWindowResultBuf(SResultRow *pWindowRes, SDiskbasedResultBuf return 0; } +static bool chkWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, STimeWindow *win, + bool masterscan, SResultRow **pResult, int64_t groupId, SQLFunctionCtx* pCtx, + int32_t numOfOutput, int32_t* rowCellInfoOffset) { + assert(win->skey <= win->ekey); + + return chkResultRowFromKey(pRuntimeEnv, pResultRowInfo, (char *)&win->skey, TSDB_KEYSIZE, masterscan, groupId); +} + static int32_t setResultOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, int64_t tid, STimeWindow *win, bool masterscan, SResultRow **pResult, int64_t tableGroupId, SQLFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowCellInfoOffset) { @@ -707,7 +782,7 @@ static FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_se } } - assert(forwardStep > 0); + assert(forwardStep >= 0); return forwardStep; } @@ -764,6 +839,8 @@ static void doUpdateResultRowIndex(SResultRowInfo*pResultRowInfo, TSKEY lastKey, pResultRowInfo->curPos = i + 1; // current not closed result object } } + + //pResultRowInfo->prevSKey = pResultRowInfo->pResult[pResultRowInfo->curIndex]->win.skey; } static void updateResultRowInfoActiveIndex(SResultRowInfo* pResultRowInfo, SQueryAttr* pQueryAttr, TSKEY lastKey) { @@ -813,7 +890,7 @@ static int32_t getNumOfRowsInTimeWindow(SQueryRuntimeEnv* pRuntimeEnv, SDataBloc } } - assert(num > 0); + assert(num >= 0); return num; } @@ -973,6 +1050,11 @@ static int32_t getNextQualifiedWindow(SQueryAttr* pQueryAttr, STimeWindow *pNext } } + /* interp query with fill should not skip time window */ + if (pQueryAttr->pointInterpQuery && pQueryAttr->fillType != TSDB_FILL_NONE) { + return startPos; + } + /* * This time window does not cover any data, try next time window, * this case may happen when the time window is too small @@ -1485,6 +1567,82 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul updateResultRowInfoActiveIndex(pResultRowInfo, pQueryAttr, pRuntimeEnv->current->lastKey); } + +static void hashAllIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pSDataBlock, int32_t tableGroupId) { + STableIntervalOperatorInfo* pInfo = (STableIntervalOperatorInfo*) pOperatorInfo->info; + + SQueryRuntimeEnv* pRuntimeEnv = pOperatorInfo->pRuntimeEnv; + int32_t numOfOutput = pOperatorInfo->numOfOutput; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order); + bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr); + + TSKEY* tsCols = NULL; + if (pSDataBlock->pDataBlock != NULL) { + SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, 0); + tsCols = (int64_t*) pColDataInfo->pData; + assert(tsCols[0] == pSDataBlock->info.window.skey && + tsCols[pSDataBlock->info.rows - 1] == pSDataBlock->info.window.ekey); + } + + int32_t startPos = ascQuery? 0 : (pSDataBlock->info.rows - 1); + TSKEY ts = getStartTsKey(pQueryAttr, &pSDataBlock->info.window, tsCols, pSDataBlock->info.rows); + + STimeWindow win = getCurrentActiveTimeWindow(pResultRowInfo, ts, pQueryAttr); + bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); + + SResultRow* pResult = NULL; + int32_t forwardStep = 0; + int32_t ret = 0; + + while (1) { + // null data, failed to allocate more memory buffer + ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, + tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); + if (ret != TSDB_CODE_SUCCESS) { + longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + TSKEY ekey = reviseWindowEkey(pQueryAttr, &win); + forwardStep = getNumOfRowsInTimeWindow(pRuntimeEnv, &pSDataBlock->info, tsCols, startPos, ekey, binarySearchForKey, true); + + // window start(end) key interpolation + doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); + + int32_t prevEndPos = (forwardStep - 1) * step + startPos; + startPos = getNextQualifiedWindow(pQueryAttr, &win, &pSDataBlock->info, tsCols, binarySearchForKey, prevEndPos); + if (startPos < 0) { + if (win.skey <= pQueryAttr->window.ekey) { + int32_t code = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, + pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); + if (code != TSDB_CODE_SUCCESS || pResult == NULL) { + longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + } + + startPos = pSDataBlock->info.rows - 1; + + // window start(end) key interpolation + doWindowBorderInterpolation(pOperatorInfo, pSDataBlock, pInfo->pCtx, pResult, &win, startPos, forwardStep); + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &win, startPos, forwardStep, tsCols, pSDataBlock->info.rows, numOfOutput); + } + + break; + } + setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); + } + + if (pQueryAttr->timeWindowInterpo) { + int32_t rowIndex = ascQuery? (pSDataBlock->info.rows-1):0; + saveDataBlockLastRow(pRuntimeEnv, &pSDataBlock->info, pSDataBlock->pDataBlock, rowIndex); + } + + updateResultRowInfoActiveIndex(pResultRowInfo, pQueryAttr, pRuntimeEnv->current->lastKey); +} + + + static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pInfo, SSDataBlock *pSDataBlock) { SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; STableQueryInfo* item = pRuntimeEnv->current; @@ -1981,6 +2139,12 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); break; } + case OP_AllMultiTableTimeInterval: { + pRuntimeEnv->proot = + createAllMultiTableTimeIntervalOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); + break; + } case OP_TimeWindow: { pRuntimeEnv->proot = createTimeIntervalOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); @@ -1990,6 +2154,15 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf } break; } + case OP_AllTimeWindow: { + pRuntimeEnv->proot = + createAllTimeIntervalOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType; + if (opType != OP_DummyInput && opType != OP_Join) { + setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); + } + break; + } case OP_Groupby: { pRuntimeEnv->proot = createGroupbyOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); @@ -2533,7 +2706,7 @@ static void getIntermediateBufInfo(SQueryRuntimeEnv* pRuntimeEnv, int32_t* ps, i SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; int32_t MIN_ROWS_PER_PAGE = 4; - *rowsize = (int32_t)(pQueryAttr->resultRowSize * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)); + *rowsize = (int32_t)(pQueryAttr->resultRowSize * getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)); int32_t overhead = sizeof(tFilePage); // one page contains at least two rows @@ -2907,6 +3080,8 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa // check if this data block is required to load if ((*status) != BLK_DATA_ALL_NEEDED) { + bool needFilter = true; + // the pCtx[i] result is belonged to previous time window since the outputBuf has not been set yet, // the filter result may be incorrect. So in case of interval query, we need to set the correct time output buffer if (QUERY_IS_INTERVAL_QUERY(pQueryAttr)) { @@ -2916,10 +3091,16 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa TSKEY k = ascQuery? pBlock->info.window.skey : pBlock->info.window.ekey; STimeWindow win = getActiveTimeWindow(pTableScanInfo->pResultRowInfo, k, pQueryAttr); - if (setResultOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pBlock->info.tid, &win, masterScan, &pResult, groupId, - pTableScanInfo->pCtx, pTableScanInfo->numOfOutput, - pTableScanInfo->rowCellInfoOffset) != TSDB_CODE_SUCCESS) { - longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + if (pQueryAttr->pointInterpQuery) { + needFilter = chkWindowOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, &win, masterScan, &pResult, groupId, + pTableScanInfo->pCtx, pTableScanInfo->numOfOutput, + pTableScanInfo->rowCellInfoOffset); + } else { + if (setResultOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pBlock->info.tid, &win, masterScan, &pResult, groupId, + pTableScanInfo->pCtx, pTableScanInfo->numOfOutput, + pTableScanInfo->rowCellInfoOffset) != TSDB_CODE_SUCCESS) { + longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + } } } else if (pQueryAttr->stableQuery && (!pQueryAttr->tsCompQuery) && (!pQueryAttr->diffQuery)) { // stable aggregate, not interval aggregate or normal column aggregate doSetTableGroupOutputBuf(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pTableScanInfo->pCtx, @@ -2927,7 +3108,11 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa pRuntimeEnv->current->groupIndex); } - (*status) = doFilterByBlockTimeWindow(pTableScanInfo, pBlock); + if (needFilter) { + (*status) = doFilterByBlockTimeWindow(pTableScanInfo, pBlock); + } else { + (*status) = BLK_DATA_ALL_NEEDED; + } } SDataBlockInfo* pBlockInfo = &pBlock->info; @@ -3437,7 +3622,7 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOf // re-estabilish output buffer pointer. int32_t functionId = pBInfo->pCtx[i].functionId; if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) { - pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[0].pOutput; + pBInfo->pCtx[i].ptsOutputBuf = pBInfo->pCtx[i-1].pOutput; } } } @@ -4538,6 +4723,7 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr; pQueryAttr->tsdb = tsdb; + if (tsdb != NULL) { int32_t code = setupQueryHandle(tsdb, pRuntimeEnv, pQInfo->qId, pQueryAttr->stableQuery); if (code != TSDB_CODE_SUCCESS) { @@ -4830,6 +5016,9 @@ static SSDataBlock* doBlockInfoScan(void* param, bool* newgroup) { tableBlockDist.numOfTables = (int32_t)pOperator->pRuntimeEnv->tableqinfoGroupInfo.numOfTables; int32_t numRowSteps = tsMaxRowsInFileBlock / TSDB_BLOCK_DIST_STEP_ROWS; + if (tsMaxRowsInFileBlock % TSDB_BLOCK_DIST_STEP_ROWS != 0) { + ++numRowSteps; + } tableBlockDist.dataBlockInfos = taosArrayInit(numRowSteps, sizeof(SFileBlockInfo)); taosArraySetSize(tableBlockDist.dataBlockInfos, numRowSteps); tableBlockDist.maxRows = INT_MIN; @@ -4946,7 +5135,7 @@ void setTableScanFilterOperatorInfo(STableScanInfo* pTableScanInfo, SOperatorInf pTableScanInfo->pCtx = pAggInfo->binfo.pCtx; pTableScanInfo->pResultRowInfo = &pAggInfo->binfo.resultRowInfo; pTableScanInfo->rowCellInfoOffset = pAggInfo->binfo.rowCellInfoOffset; - } else if (pDownstream->operatorType == OP_TimeWindow) { + } else if (pDownstream->operatorType == OP_TimeWindow || pDownstream->operatorType == OP_AllTimeWindow) { STableIntervalOperatorInfo *pIntervalInfo = pDownstream->info; pTableScanInfo->pCtx = pIntervalInfo->pCtx; @@ -4960,7 +5149,7 @@ void setTableScanFilterOperatorInfo(STableScanInfo* pTableScanInfo, SOperatorInf pTableScanInfo->pResultRowInfo = &pGroupbyInfo->binfo.resultRowInfo; pTableScanInfo->rowCellInfoOffset = pGroupbyInfo->binfo.rowCellInfoOffset; - } else if (pDownstream->operatorType == OP_MultiTableTimeInterval) { + } else if (pDownstream->operatorType == OP_MultiTableTimeInterval || pDownstream->operatorType == OP_AllMultiTableTimeInterval) { STableIntervalOperatorInfo *pInfo = pDownstream->info; pTableScanInfo->pCtx = pInfo->pCtx; @@ -5104,7 +5293,7 @@ SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SMultiwayMergeInfo* pInfo = calloc(1, sizeof(SMultiwayMergeInfo)); pInfo->resultRowFactor = - (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery, false)); + (int32_t)(getRowNumForMultioutput(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery, false)); pRuntimeEnv->scanFlag = MERGE_STAGE; // TODO init when creating pCtx @@ -5579,6 +5768,66 @@ static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) { return pIntervalInfo->pRes->info.rows == 0? NULL:pIntervalInfo->pRes; } +static SSDataBlock* doAllIntervalAgg(void* param, bool* newgroup) { + SOperatorInfo* pOperator = (SOperatorInfo*) param; + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + + STableIntervalOperatorInfo* pIntervalInfo = pOperator->info; + + SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; + if (pOperator->status == OP_RES_TO_RETURN) { + toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pIntervalInfo->pRes); + + if (pIntervalInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) { + pOperator->status = OP_EXEC_DONE; + } + + return pIntervalInfo->pRes; + } + + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + int32_t order = pQueryAttr->order.order; + STimeWindow win = pQueryAttr->window; + + SOperatorInfo* upstream = pOperator->upstream[0]; + + while(1) { + publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); + SSDataBlock* pBlock = upstream->exec(upstream, newgroup); + publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC); + + if (pBlock == NULL) { + break; + } + + setTagValue(pOperator, pRuntimeEnv->current->pTable, pIntervalInfo->pCtx, pOperator->numOfOutput); + + // the pDataBlock are always the same one, no need to call this again + setInputDataBlock(pOperator, pIntervalInfo->pCtx, pBlock, pQueryAttr->order.order); + hashAllIntervalAgg(pOperator, &pIntervalInfo->resultRowInfo, pBlock, 0); + } + + // restore the value + pQueryAttr->order.order = order; + pQueryAttr->window = win; + + pOperator->status = OP_RES_TO_RETURN; + closeAllResultRows(&pIntervalInfo->resultRowInfo); + setQueryStatus(pRuntimeEnv, QUERY_COMPLETED); + finalizeQueryResult(pOperator, pIntervalInfo->pCtx, &pIntervalInfo->resultRowInfo, pIntervalInfo->rowCellInfoOffset); + + initGroupResInfo(&pRuntimeEnv->groupResInfo, &pIntervalInfo->resultRowInfo); + toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pIntervalInfo->pRes); + + if (pIntervalInfo->pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) { + pOperator->status = OP_EXEC_DONE; + } + + return pIntervalInfo->pRes->info.rows == 0? NULL:pIntervalInfo->pRes; +} + static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; if (pOperator->status == OP_EXEC_DONE) { @@ -5634,6 +5883,63 @@ static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) { return pIntervalInfo->pRes; } +static SSDataBlock* doAllSTableIntervalAgg(void* param, bool* newgroup) { + SOperatorInfo* pOperator = (SOperatorInfo*) param; + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + + STableIntervalOperatorInfo* pIntervalInfo = pOperator->info; + SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; + + if (pOperator->status == OP_RES_TO_RETURN) { + copyToSDataBlock(pRuntimeEnv, 3000, pIntervalInfo->pRes, pIntervalInfo->rowCellInfoOffset); + if (pIntervalInfo->pRes->info.rows == 0 || !hasRemainData(&pRuntimeEnv->groupResInfo)) { + pOperator->status = OP_EXEC_DONE; + } + + return pIntervalInfo->pRes; + } + + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; + int32_t order = pQueryAttr->order.order; + + SOperatorInfo* upstream = pOperator->upstream[0]; + + while(1) { + publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); + SSDataBlock* pBlock = upstream->exec(upstream, newgroup); + publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC); + + if (pBlock == NULL) { + break; + } + + // the pDataBlock are always the same one, no need to call this again + STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current; + + setTagValue(pOperator, pTableQueryInfo->pTable, pIntervalInfo->pCtx, pOperator->numOfOutput); + setInputDataBlock(pOperator, pIntervalInfo->pCtx, pBlock, pQueryAttr->order.order); + setIntervalQueryRange(pRuntimeEnv, pBlock->info.window.skey); + + hashAllIntervalAgg(pOperator, &pTableQueryInfo->resInfo, pBlock, pTableQueryInfo->groupIndex); + } + + pOperator->status = OP_RES_TO_RETURN; + pQueryAttr->order.order = order; // TODO : restore the order + doCloseAllTimeWindow(pRuntimeEnv); + setQueryStatus(pRuntimeEnv, QUERY_COMPLETED); + + copyToSDataBlock(pRuntimeEnv, 3000, pIntervalInfo->pRes, pIntervalInfo->rowCellInfoOffset); + if (pIntervalInfo->pRes->info.rows == 0 || !hasRemainData(&pRuntimeEnv->groupResInfo)) { + pOperator->status = OP_EXEC_DONE; + } + + return pIntervalInfo->pRes; +} + + + static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorInfo *pInfo, SSDataBlock *pSDataBlock) { SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; @@ -6016,7 +6322,7 @@ SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOpera SAggOperatorInfo* pInfo = calloc(1, sizeof(SAggOperatorInfo)); SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; - int32_t numOfRows = (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)); + int32_t numOfRows = (int32_t)(getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)); pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, numOfRows); pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset); @@ -6255,6 +6561,32 @@ SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOp appendUpstream(pOperator, upstream); return pOperator; } + + +SOperatorInfo* createAllTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { + STableIntervalOperatorInfo* pInfo = calloc(1, sizeof(STableIntervalOperatorInfo)); + + pInfo->pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->rowCellInfoOffset); + pInfo->pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity); + initResultRowInfo(&pInfo->resultRowInfo, 8, TSDB_DATA_TYPE_INT); + + SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + + pOperator->name = "AllTimeIntervalAggOperator"; + pOperator->operatorType = OP_AllTimeWindow; + pOperator->blockingOptr = true; + pOperator->status = OP_IN_EXECUTING; + pOperator->pExpr = pExpr; + pOperator->numOfOutput = numOfOutput; + pOperator->info = pInfo; + pOperator->pRuntimeEnv = pRuntimeEnv; + pOperator->exec = doAllIntervalAgg; + pOperator->cleanup = destroyBasicOperatorInfo; + + appendUpstream(pOperator, upstream); + return pOperator; +} + SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SStateWindowOperatorInfo* pInfo = calloc(1, sizeof(SStateWindowOperatorInfo)); pInfo->colIndex = -1; @@ -6277,7 +6609,6 @@ SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOpe appendUpstream(pOperator, upstream); return pOperator; - } SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SSWindowOperatorInfo* pInfo = calloc(1, sizeof(SSWindowOperatorInfo)); @@ -6329,6 +6660,32 @@ SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRunti return pOperator; } +SOperatorInfo* createAllMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { + STableIntervalOperatorInfo* pInfo = calloc(1, sizeof(STableIntervalOperatorInfo)); + + pInfo->pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->rowCellInfoOffset); + pInfo->pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity); + initResultRowInfo(&pInfo->resultRowInfo, 8, TSDB_DATA_TYPE_INT); + + SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); + pOperator->name = "AllMultiTableTimeIntervalOperator"; + pOperator->operatorType = OP_AllMultiTableTimeInterval; + pOperator->blockingOptr = true; + pOperator->status = OP_IN_EXECUTING; + pOperator->pExpr = pExpr; + pOperator->numOfOutput = numOfOutput; + pOperator->info = pInfo; + pOperator->pRuntimeEnv = pRuntimeEnv; + + pOperator->exec = doAllSTableIntervalAgg; + pOperator->cleanup = destroyBasicOperatorInfo; + + appendUpstream(pOperator, upstream); + + return pOperator; +} + + SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SGroupbyOperatorInfo* pInfo = calloc(1, sizeof(SGroupbyOperatorInfo)); pInfo->colIndex = -1; // group by column index @@ -6339,7 +6696,7 @@ SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; pQueryAttr->resultRowSize = (pQueryAttr->resultRowSize * - (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery))); + (int32_t)(getRowNumForMultioutput(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery))); pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity); initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT); diff --git a/src/query/src/qFill.c b/src/query/src/qFill.c index 7dd73c9fe4..1a86bbae36 100644 --- a/src/query/src/qFill.c +++ b/src/query/src/qFill.c @@ -206,6 +206,12 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, void** data, int32_t outputR } else { assert(pFillInfo->currentKey == ts); initBeforeAfterDataBuf(pFillInfo, prev); + if (pFillInfo->type == TSDB_FILL_NEXT && (pFillInfo->index + 1) < pFillInfo->numOfRows) { + initBeforeAfterDataBuf(pFillInfo, next); + ++pFillInfo->index; + copyCurrentRowIntoBuf(pFillInfo, srcData, *next); + --pFillInfo->index; + } // assign rows to dst buffer for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) { @@ -227,6 +233,12 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, void** data, int32_t outputR } else if (pFillInfo->type == TSDB_FILL_LINEAR) { assignVal(output, src, pCol->col.bytes, pCol->col.type); memcpy(*prev + pCol->col.offset, src, pCol->col.bytes); + } else if (pFillInfo->type == TSDB_FILL_NEXT) { + if (*next) { + assignVal(output, *next + pCol->col.offset, pCol->col.bytes, pCol->col.type); + } else { + setNull(output, pCol->col.type, pCol->col.bytes); + } } else { assignVal(output, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type); } diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index e01f41276f..b8a5ee7699 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -567,10 +567,18 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { } } else if (pQueryAttr->interval.interval > 0) { if (pQueryAttr->stableQuery) { - op = OP_MultiTableTimeInterval; + if (pQueryAttr->pointInterpQuery) { + op = OP_AllMultiTableTimeInterval; + } else { + op = OP_MultiTableTimeInterval; + } taosArrayPush(plan, &op); - } else { - op = OP_TimeWindow; + } else { + if (pQueryAttr->pointInterpQuery) { + op = OP_AllTimeWindow; + } else { + op = OP_TimeWindow; + } taosArrayPush(plan, &op); if (pQueryAttr->pExpr2 != NULL) { @@ -578,7 +586,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { taosArrayPush(plan, &op); } - if (pQueryAttr->fillType != TSDB_FILL_NONE && (!pQueryAttr->pointInterpQuery)) { + if (pQueryAttr->fillType != TSDB_FILL_NONE) { op = OP_Fill; taosArrayPush(plan, &op); } diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index a3d2e424d2..4caf351799 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -30,6 +30,18 @@ typedef struct SCompSupporter { int32_t order; } SCompSupporter; +int32_t getRowNumForMultioutput(SQueryAttr* pQueryAttr, bool topBottomQuery, bool stable) { + if (pQueryAttr && (!stable)) { + for (int16_t i = 0; i < pQueryAttr->numOfOutput; ++i) { + if (pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_TOP || pQueryAttr->pExpr1[i].base.functionId == TSDB_FUNC_BOTTOM) { + return (int32_t)pQueryAttr->pExpr1[i].base.param[0].i64; + } + } + } + + return 1; +} + int32_t getOutputInterResultBufSize(SQueryAttr* pQueryAttr) { int32_t size = 0; diff --git a/src/query/tests/rangeMergeTest.cpp b/src/query/tests/rangeMergeTest.cpp index e65508a300..f7fc558ccf 100644 --- a/src/query/tests/rangeMergeTest.cpp +++ b/src/query/tests/rangeMergeTest.cpp @@ -330,7 +330,7 @@ void intDataTest() { filterAddRange(h, ra + i, TSDB_RELATION_AND); } filterGetRangeNum(h, &num); - ASSERT_EQ(num, 0); + ASSERT_EQ(num, 1); filterFreeRangeCtx(h); diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index e958a8e5ec..c93a3f929d 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -1133,8 +1133,8 @@ static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) { } else { // for asynchronous API SRpcEpSet *pEpSet = NULL; - if (pContext->epSet.inUse != pContext->oldInUse || pContext->redirect) - pEpSet = &pContext->epSet; + //if (pContext->epSet.inUse != pContext->oldInUse || pContext->redirect) + pEpSet = &pContext->epSet; (*pRpc->cfp)(pMsg, pEpSet); } diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index 0449ecac8b..2549518249 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -397,7 +397,11 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin SThreadObj *pThreadObj = pClientObj->pThreadObj[index]; SOCKET fd = taosOpenTcpClientSocket(ip, port, pThreadObj->ip); +#if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32) + if (fd == (SOCKET)-1) return NULL; +#else if (fd <= 0) return NULL; +#endif struct sockaddr_in sin; uint16_t localPort = 0; diff --git a/src/tfs/inc/tfsint.h b/src/tfs/inc/tfsint.h index 619ef6df73..3c5dccc63b 100644 --- a/src/tfs/inc/tfsint.h +++ b/src/tfs/inc/tfsint.h @@ -65,12 +65,7 @@ SDisk *tfsFreeDisk(SDisk *pDisk); int tfsUpdateDiskInfo(SDisk *pDisk); // ttier.c ====================================================== -typedef struct { - int64_t size; - int64_t used; - int64_t free; - int16_t nAvailDisks; // # of Available disks -} STierMeta; + typedef struct STier { pthread_spinlock_t lock; int level; diff --git a/src/tfs/src/tfs.c b/src/tfs/src/tfs.c index 61fbc61448..43ccb324b2 100644 --- a/src/tfs/src/tfs.c +++ b/src/tfs/src/tfs.c @@ -101,7 +101,7 @@ int tfsInit(SDiskCfg *pDiskCfg, int ndisk) { return -1; } - tfsUpdateInfo(NULL); + tfsUpdateInfo(NULL, NULL, 0); for (int level = 0; level < TFS_NLEVEL(); level++) { tfsPosNextId(TFS_TIER_AT(level)); } @@ -119,7 +119,7 @@ void tfsDestroy() { } } -void tfsUpdateInfo(SFSMeta *pFSMeta) { +void tfsUpdateInfo(SFSMeta *pFSMeta, STierMeta *tierMetas, int8_t numTiers) { SFSMeta fsMeta; STierMeta tierMeta; @@ -130,11 +130,16 @@ void tfsUpdateInfo(SFSMeta *pFSMeta) { memset(pFSMeta, 0, sizeof(*pFSMeta)); for (int level = 0; level < TFS_NLEVEL(); level++) { + STierMeta *pTierMeta = &tierMeta; + if (tierMetas && level < numTiers) { + pTierMeta = tierMetas + level; + } + STier *pTier = TFS_TIER_AT(level); - tfsUpdateTierInfo(pTier, &tierMeta); - pFSMeta->tsize += tierMeta.size; - pFSMeta->avail += tierMeta.free; - pFSMeta->used += tierMeta.used; + tfsUpdateTierInfo(pTier, pTierMeta); + pFSMeta->tsize += pTierMeta->size; + pFSMeta->avail += pTierMeta->free; + pFSMeta->used += pTierMeta->used; } tfsLock(); @@ -595,7 +600,7 @@ void taosGetDisk() { SFSMeta fsMeta; if (tscEmbedded) { - tfsUpdateInfo(&fsMeta); + tfsUpdateInfo(&fsMeta, NULL, 0); tsTotalDataDirGB = (float)(fsMeta.tsize / unit); tsUsedDataDirGB = (float)(fsMeta.used / unit); tsAvailDataDirGB = (float)(fsMeta.avail / unit); diff --git a/src/tsdb/inc/tsdbMeta.h b/src/tsdb/inc/tsdbMeta.h index 9a8de01f71..51801c843c 100644 --- a/src/tsdb/inc/tsdbMeta.h +++ b/src/tsdb/inc/tsdbMeta.h @@ -24,8 +24,7 @@ typedef struct STable { tstr* name; // NOTE: there a flexible string here uint64_t suid; struct STable* pSuper; // super table pointer - uint8_t numOfSchemas; - STSchema* schema[TSDB_MAX_TABLE_SCHEMAS]; + SArray* schema; STSchema* tagSchema; SKVRow tagVal; SSkipList* pIndex; // For TSDB_SUPER_TABLE, it is the skiplist index @@ -107,10 +106,9 @@ static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, if (lock) TSDB_RLOCK_TABLE(pDTable); if (_version < 0) { // get the latest version of schema - pTSchema = pDTable->schema[pDTable->numOfSchemas - 1]; + pTSchema = *(STSchema **)taosArrayGetLast(pDTable->schema); } else { // get the schema with version - void* ptr = taosbsearch(&_version, pDTable->schema, pDTable->numOfSchemas, sizeof(STSchema*), - tsdbCompareSchemaVersion, TD_EQ); + void* ptr = taosArraySearch(pDTable->schema, &_version, tsdbCompareSchemaVersion, TD_EQ); if (ptr == NULL) { terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION; goto _exit; diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index e53d2826c7..68450301d8 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -37,6 +37,8 @@ static void tsdbScanAndTryFixDFilesHeader(STsdbRepo *pRepo, int32_t *nExpired); static int tsdbProcessExpiredFS(STsdbRepo *pRepo); static int tsdbCreateMeta(STsdbRepo *pRepo); +// For backward compatibility +bool tsdbForceKeepFile = false; // ================== CURRENT file header info static int tsdbEncodeFSHeader(void **buf, SFSHeader *pHeader) { int tlen = 0; @@ -1048,6 +1050,26 @@ static int tsdbRestoreMeta(STsdbRepo *pRepo) { return -1; } + if (tsdbForceKeepFile) { + struct stat tfstat; + + // Get real file size + if (fstat(pfs->cstatus->pmf->fd, &tfstat) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + tsdbCloseMFile(pfs->cstatus->pmf); + tfsClosedir(tdir); + regfree(®ex); + return -1; + } + + if (pfs->cstatus->pmf->info.size != tfstat.st_size) { + int64_t tfsize = pfs->cstatus->pmf->info.size; + pfs->cstatus->pmf->info.size = tfstat.st_size; + tsdbInfo("vgId:%d file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo), + TSDB_FILE_FULL_NAME(pfs->cstatus->pmf), tfsize, pfs->cstatus->pmf->info.size); + } + } + tsdbCloseMFile(pfs->cstatus->pmf); } } else if (code == REG_NOMATCH) { @@ -1212,6 +1234,24 @@ static int tsdbRestoreDFileSet(STsdbRepo *pRepo) { return -1; } + if (tsdbForceKeepFile) { + struct stat tfstat; + + // Get real file size + if (fstat(pDFile->fd, &tfstat) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + taosArrayDestroy(fArray); + return -1; + } + + if (pDFile->info.size != tfstat.st_size) { + int64_t tfsize = pDFile->info.size; + pDFile->info.size = tfstat.st_size; + tsdbInfo("vgId:%d file %s header size is changed from %" PRId64 " to %" PRId64, REPO_ID(pRepo), + TSDB_FILE_FULL_NAME(pDFile), tfsize, pDFile->info.size); + } + } + tsdbCloseDFile(pDFile); index++; } diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 21150c66e2..96e86a6d99 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -43,6 +43,8 @@ static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable); static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable); static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid); static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema); +static int tsdbAddSchema(STable *pTable, STSchema *pSchema); +static void tsdbFreeTableSchema(STable *pTable); // ------------------ OUTER FUNCTIONS ------------------ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) { @@ -722,17 +724,10 @@ void tsdbUpdateTableSchema(STsdbRepo *pRepo, STable *pTable, STSchema *pSchema, STsdbMeta *pMeta = pRepo->tsdbMeta; STable *pCTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable; - ASSERT(schemaVersion(pSchema) > schemaVersion(pCTable->schema[pCTable->numOfSchemas - 1])); + ASSERT(schemaVersion(pSchema) > schemaVersion(*(STSchema **)taosArrayGetLast(pCTable->schema))); TSDB_WLOCK_TABLE(pCTable); - if (pCTable->numOfSchemas < TSDB_MAX_TABLE_SCHEMAS) { - pCTable->schema[pCTable->numOfSchemas++] = pSchema; - } else { - ASSERT(pCTable->numOfSchemas == TSDB_MAX_TABLE_SCHEMAS); - tdFreeSchema(pCTable->schema[0]); - memmove(pCTable->schema, pCTable->schema + 1, sizeof(STSchema *) * (TSDB_MAX_TABLE_SCHEMAS - 1)); - pCTable->schema[pCTable->numOfSchemas - 1] = pSchema; - } + tsdbAddSchema(pCTable, pSchema); if (schemaNCols(pSchema) > pMeta->maxCols) pMeta->maxCols = schemaNCols(pSchema); if (schemaTLen(pSchema) > pMeta->maxRowBytes) pMeta->maxRowBytes = schemaTLen(pSchema); @@ -828,9 +823,7 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST TABLE_TID(pTable) = -1; TABLE_SUID(pTable) = -1; pTable->pSuper = NULL; - pTable->numOfSchemas = 1; - pTable->schema[0] = tdDupSchema(pCfg->schema); - if (pTable->schema[0] == NULL) { + if (tsdbAddSchema(pTable, tdDupSchema(pCfg->schema)) < 0) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _err; } @@ -841,7 +834,8 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST } pTable->tagVal = NULL; STColumn *pCol = schemaColAt(pTable->tagSchema, DEFAULT_TAG_INDEX_COLUMN); - pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL, SL_ALLOW_DUP_KEY, getTagIndexKey); + pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, colType(pCol), (uint8_t)(colBytes(pCol)), NULL, + SL_ALLOW_DUP_KEY, getTagIndexKey); if (pTable->pIndex == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _err; @@ -870,9 +864,7 @@ static STable *tsdbCreateTableFromCfg(STableCfg *pCfg, bool isSuper, STable *pST } } else { TABLE_SUID(pTable) = -1; - pTable->numOfSchemas = 1; - pTable->schema[0] = tdDupSchema(pCfg->schema); - if (pTable->schema[0] == NULL) { + if (tsdbAddSchema(pTable, tdDupSchema(pCfg->schema)) < 0) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _err; } @@ -906,9 +898,7 @@ static void tsdbFreeTable(STable *pTable) { TABLE_UID(pTable)); tfree(TABLE_NAME(pTable)); if (TABLE_TYPE(pTable) != TSDB_CHILD_TABLE) { - for (int i = 0; i < TSDB_MAX_TABLE_SCHEMAS; i++) { - tdFreeSchema(pTable->schema[i]); - } + tsdbFreeTableSchema(pTable); if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { tdFreeSchema(pTable->tagSchema); @@ -1260,9 +1250,10 @@ static int tsdbEncodeTable(void **buf, STable *pTable) { tlen += taosEncodeFixedU64(buf, TABLE_SUID(pTable)); tlen += tdEncodeKVRow(buf, pTable->tagVal); } else { - tlen += taosEncodeFixedU8(buf, pTable->numOfSchemas); - for (int i = 0; i < pTable->numOfSchemas; i++) { - tlen += tdEncodeSchema(buf, pTable->schema[i]); + tlen += taosEncodeFixedU8(buf, (uint8_t)taosArrayGetSize(pTable->schema)); + for (int i = 0; i < taosArrayGetSize(pTable->schema); i++) { + STSchema *pSchema = taosArrayGetP(pTable->schema, i); + tlen += tdEncodeSchema(buf, pSchema); } if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { @@ -1293,9 +1284,12 @@ static void *tsdbDecodeTable(void *buf, STable **pRTable) { buf = taosDecodeFixedU64(buf, &TABLE_SUID(pTable)); buf = tdDecodeKVRow(buf, &(pTable->tagVal)); } else { - buf = taosDecodeFixedU8(buf, &(pTable->numOfSchemas)); - for (int i = 0; i < pTable->numOfSchemas; i++) { - buf = tdDecodeSchema(buf, &(pTable->schema[i])); + uint8_t nSchemas; + buf = taosDecodeFixedU8(buf, &nSchemas); + for (int i = 0; i < nSchemas; i++) { + STSchema *pSchema; + buf = tdDecodeSchema(buf, &pSchema); + tsdbAddSchema(pTable, pSchema); } if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) { @@ -1457,3 +1451,38 @@ static int tsdbCheckTableTagVal(SKVRow *pKVRow, STSchema *pSchema) { return 0; } + +static int tsdbAddSchema(STable *pTable, STSchema *pSchema) { + ASSERT(TABLE_TYPE(pTable) != TSDB_CHILD_TABLE); + + if (pTable->schema == NULL) { + pTable->schema = taosArrayInit(TSDB_MAX_TABLE_SCHEMAS, sizeof(SSchema *)); + if (pTable->schema == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + } + + ASSERT(taosArrayGetSize(pTable->schema) == 0 || + schemaVersion(pSchema) > schemaVersion(*(STSchema **)taosArrayGetLast(pTable->schema))); + + if (taosArrayPush(pTable->schema, &pSchema) == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + return -1; + } + + return 0; +} + +static void tsdbFreeTableSchema(STable *pTable) { + ASSERT(pTable != NULL); + + if (pTable->schema) { + for (size_t i = 0; i < taosArrayGetSize(pTable->schema); i++) { + STSchema *pSchema = taosArrayGetP(pTable->schema, i); + tdFreeSchema(pSchema); + } + + taosArrayDestroy(pTable->schema); + } +} \ No newline at end of file diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 4735efceb8..9cc9b7224c 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -2705,7 +2705,7 @@ static void destroyHelper(void* param) { free(param); } -static bool loadBlockOfActiveTable(STsdbQueryHandle* pQueryHandle) { +static bool loadBlockOfActiveTable(STsdbQueryHandle* pQueryHandle) { if (pQueryHandle->checkFiles) { // check if the query range overlaps with the file data block bool exists = true; diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index d03ce6e0f1..f146ec0b8b 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -81,6 +81,7 @@ typedef struct { extern SGlobalCfg tsGlobalConfig[]; extern int32_t tsGlobalConfigNum; extern char * tsCfgStatusStr[]; +extern bool tsdbForceKeepFile; void taosReadGlobalLogCfg(); bool taosReadGlobalCfg(); diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 7577451f88..a3c01d2be7 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -280,25 +280,26 @@ int WCSPatternMatch(const wchar_t *patterStr, const wchar_t *str, size_t size, c int32_t compareStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; - - char pattern[128] = {0}; + + assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN); + char *pattern = calloc(varDataLen(pRight) + 1, sizeof(char)); memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); - assert(varDataLen(pRight) < 128); size_t sz = varDataLen(pLeft); - char *buf = malloc(sz + 1); - memcpy(buf, varDataVal(pLeft), sz); + char *buf = malloc(sz + 1); + memcpy(buf, varDataVal(pLeft), sz); buf[sz] = 0; int32_t ret = patternMatch(pattern, buf, sz, &pInfo); free(buf); + free(pattern); return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } int32_t taosArrayCompareString(const void* a, const void* b) { const char* x = *(const char**)a; const char* y = *(const char**)b; - + return compareLenPrefixedStr(x, y); } @@ -307,19 +308,19 @@ int32_t taosArrayCompareString(const void* a, const void* b) { // return taosArraySearchString(arr, pLeft, taosArrayCompareString, TD_EQ) == NULL ? 0 : 1; //} int32_t compareFindItemInSet(const void *pLeft, const void* pRight) { - return NULL != taosHashGet((SHashObj *)pRight, varDataVal(pLeft), varDataLen(pLeft)) ? 1 : 0; + return NULL != taosHashGet((SHashObj *)pRight, varDataVal(pLeft), varDataLen(pLeft)) ? 1 : 0; } int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) { SPatternCompareInfo pInfo = {'%', '_'}; - wchar_t pattern[128] = {0}; - assert(TSDB_PATTERN_STRING_MAX_LEN < 128); + assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN * TSDB_NCHAR_SIZE); + wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t)); memcpy(pattern, varDataVal(pRight), varDataLen(pRight)); - assert(varDataLen(pRight) < 128); - + int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo); + free(pattern); return (ret == TSDB_PATTERN_MATCH) ? 0 : 1; } diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index 5b2c860122..d4853c0825 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -101,7 +101,14 @@ function runQueryPerfTest { python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT + echo "=========== taosdemo performance: 4 int columns, 10000 tables, 100000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type | tee -a $PERFORMANCE_TEST_REPORT + + echo "=========== taosdemo performance: 400 int columns, 400 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 400 -D 400 -B 200 -t 10000 -r 1000 | tee -a $PERFORMANCE_TEST_REPORT + + echo "=========== taosdemo performance: 1900 int columns, 1900 double columns, 200 binary(128) columns, 10000 tables, 1000 recoreds per table ===========" | tee -a $PERFORMANCE_TEST_REPORT + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -b $branch -T $type -i 1900 -D 1900 -B 200 -t 10000 -r 1000 | tee -a $PERFORMANCE_TEST_REPORT } diff --git a/tests/pytest/alter/alterColMultiTimes.py b/tests/pytest/alter/alterColMultiTimes.py new file mode 100644 index 0000000000..173ca8158d --- /dev/null +++ b/tests/pytest/alter/alterColMultiTimes.py @@ -0,0 +1,67 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import random +import string +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def genColList(self): + ''' + generate column list + ''' + col_list = list() + for i in range(1, 18): + col_list.append(f'c{i}') + return col_list + + def genIncreaseValue(self, input_value): + ''' + add ', 1' to end of value every loop + ''' + value_list = list(input_value) + value_list.insert(-1, ", 1") + return ''.join(value_list) + + def insertAlter(self): + ''' + after each alter and insert, when execute 'select * from {tbname};' taosd will coredump + ''' + tbname = ''.join(random.choice(string.ascii_letters.lower()) for i in range(7)) + input_value = '(now, 1)' + tdSql.execute(f'create table {tbname} (ts timestamp, c0 int);') + tdSql.execute(f'insert into {tbname} values {input_value};') + for col in self.genColList(): + input_value = self.genIncreaseValue(input_value) + tdSql.execute(f'alter table {tbname} add column {col} int;') + tdSql.execute(f'insert into {tbname} values {input_value};') + tdSql.query(f'select * from {tbname};') + tdSql.checkRows(18) + + def run(self): + tdSql.prepare() + self.insertAlter() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index 376567b7e8..ec44a85d5b 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -17742,4 +17742,370 @@ fun:taosGetFqdn fun:taosCheckGlobalCfg fun:taos_init_imp -} \ No newline at end of file +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/bin/python3.8 + fun:PyObject_GetItem + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:PyCode_NewWithPosOnlyArgs + fun:PyCode_New + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/parsing.cpython-38-x86_64-linux-gnu.so + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/parsing.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/local/lib/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun: malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun: malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + obj:/usr/bin/python3.8 + fun:PyObject_CallFunctionObjArgs + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyObject_GetAttr + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8) + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8) + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8) + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8) + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8) + obj:/usr/bin/python3.8) + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/np_datetime.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/tslibs/ccalendar.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/interval.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/hashtable.cpython-38-x86_64-linux-gnu.so + obj:/usr/local/lib/python3.8/dist-packages/pandas/_libs/hashtable.cpython-38-x86_64-linux-gnu.so + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall +} +{ + + Memcheck:Leak + match-leak-kinds: definite + ... + obj:/usr/local/lib/python3.8/dist-packages/pandas/* + ... +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyObject_MakeTpCall + fun:_PyEval_EvalFrameDefault + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault + obj:/usr/bin/python3.8 + fun:PyObject_GetAttr + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_New + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall +} diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 6d7f1d00bf..137069e6b6 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -284,7 +284,7 @@ python3 ./test.py -f alter/alterTabAddTagWithNULL.py python3 ./test.py -f alter/alterTimestampColDataProcess.py # client -python3 ./test.py -f client/client.py +#python3 ./test.py -f client/client.py python3 ./test.py -f client/version.py python3 ./test.py -f client/alterDatabase.py python3 ./test.py -f client/noConnectionErrorTest.py @@ -343,6 +343,7 @@ python3 ./test.py -f functions/function_twa.py -r 1 python3 ./test.py -f functions/function_twa_test2.py python3 ./test.py -f functions/function_stddev_td2555.py python3 ./test.py -f functions/showOfflineThresholdIs864000.py +python3 ./test.py -f functions/function_interp.py python3 ./test.py -f insert/metadataUpdate.py python3 ./test.py -f query/last_cache.py python3 ./test.py -f query/last_row_cache.py @@ -385,6 +386,7 @@ python3 ./test.py -f query/querySession.py python3 test.py -f alter/alter_create_exception.py python3 ./test.py -f insert/flushwhiledrop.py python3 ./test.py -f insert/schemalessInsert.py +python3 ./test.py -f alter/alterColMultiTimes.py #======================p4-end=============== diff --git a/tests/pytest/functions/function_interp.py b/tests/pytest/functions/function_interp.py new file mode 100644 index 0000000000..810c90279c --- /dev/null +++ b/tests/pytest/functions/function_interp.py @@ -0,0 +1,46 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + tdSql.execute("create table t(ts timestamp, k int)") + tdSql.execute("insert into t values('2021-1-1 1:1:1', 12);") + + tdSql.query("select interp(*) from t where ts='2021-1-1 1:1:1'") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 12) + + tdSql.error("select interp(*) from t where ts >'2021-1-1 1:1:1' and ts < now interval(1s) fill(next)") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/queryWildcardLength.py b/tests/pytest/query/queryWildcardLength.py index d15085f751..1fc46fe7d6 100644 --- a/tests/pytest/query/queryWildcardLength.py +++ b/tests/pytest/query/queryWildcardLength.py @@ -157,19 +157,6 @@ class TDTestCase: tdSql.execute(f'create table {table_name}_sub1 using {table_name} tags ("{table_name}", "{table_name}")') tdSql.execute(f'insert into {table_name}_sub1 values (now, "{table_name}", "{table_name}");') - # TODO sc1 leave a bug ---> TD-5918 - # sql_list = [f'select * from {table_name} where bi1 like "{hp_name}"', - # f'select * from {table_name} where bi1 like "{lp_name}"', - # f'select * from {table_name} where bi1 like "{ul_name}"', - # f'select * from {table_name} where nc1 like "{hp_name}"', - # f'select * from {table_name} where nc1 like "{lp_name}"', - # f'select * from {table_name} where nc1 like "{ul_name}"', - # f'select * from {table_name} where si1 like "{hp_name}"', - # f'select * from {table_name} where si1 like "{lp_name}"', - # f'select * from {table_name} where si1 like "{ul_name}"', - # f'select * from {table_name} where sc1 like "{hp_name}"', - # f'select * from {table_name} where sc1 like "{lp_name}"', - # f'select * from {table_name} where sc1 like "{ul_name}"'] sql_list = [f'select * from {table_name} where bi1 like "{hp_name}"', f'select * from {table_name} where bi1 like "{lp_name}"', f'select * from {table_name} where bi1 like "{ul_name}"', @@ -178,7 +165,11 @@ class TDTestCase: f'select * from {table_name} where nc1 like "{ul_name}"', f'select * from {table_name} where si1 like "{hp_name}"', f'select * from {table_name} where si1 like "{lp_name}"', - f'select * from {table_name} where si1 like "{ul_name}"'] + f'select * from {table_name} where si1 like "{ul_name}"', + f'select * from {table_name} where sc1 like "{hp_name}"', + f'select * from {table_name} where sc1 like "{lp_name}"', + f'select * from {table_name} where sc1 like "{ul_name}"'] + for sql in sql_list: tdSql.query(sql) if len(table_name) >= 1: @@ -211,7 +202,6 @@ class TDTestCase: tdSql.close() tdLog.success("%s successfully executed" % __file__) - tdCases.addWindows(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py index f069bb8f70..643886f434 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestSupportNanoInsert.py @@ -47,7 +47,6 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath + "/build/bin/" - # insert: create one or mutiple tables per sql and insert multiple rows per sql # insert data from a special timestamp # check stable stb0 @@ -90,7 +89,6 @@ class TDTestCase: os.system( "%staosdemo -f tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json -y " % binPath) - tdSql.execute("use nsdb2") tdSql.query("show stables") tdSql.checkData(0, 4, 100) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py index 393ced14fd..da02f45fa1 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanosubscribe.py @@ -103,7 +103,6 @@ class TDTestCase: os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") subTimes0 = self.subTimes("all_subscribe_res0.txt") - print("pass") self.assertCheck("all_subscribe_res0.txt",subTimes0 ,202) diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index 6b5681dfbc..4a5abd49d8 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -19,11 +19,16 @@ import json import sys class taosdemoPerformace: - def __init__(self, commitID, dbName, branch, type): + def __init__(self, commitID, dbName, branch, type, numOfTables, numOfRows, numOfInt, numOfDouble, numOfBinary): self.commitID = commitID self.dbName = dbName self.branch = branch self.type = type + self.numOfTables = numOfTables + self.numOfRows = numOfRows + self.numOfInt = numOfInt + self.numOfDouble = numOfDouble + self.numOfBinary = numOfBinary self.host = "127.0.0.1" self.user = "root" self.password = "taosdata" @@ -51,14 +56,14 @@ class taosdemoPerformace: stb = { "name": "meters", "child_table_exists": "no", - "childtable_count": 10000, + "childtable_count": self.numOfTables, "childtable_prefix": "stb_", "auto_create_table": "no", "data_source": "rand", "batch_create_tbl_num": 10, "insert_mode": "taosc", - "insert_rows": 100000, - "interlace_rows": 100, + "insert_rows": self.numOfRows, + "interlace_rows": 0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, @@ -68,7 +73,9 @@ class taosdemoPerformace: "sample_file": "./sample.csv", "tags_file": "", "columns": [ - {"type": "INT", "count": 4} + {"type": "INT", "count": self.numOfInt}, + {"type": "DOUBLE", "count": self.numOfDouble}, + {"type": "BINARY", "len": 128, "count": self.numOfBinary} ], "tags": [ {"type": "INT", "count": 1}, @@ -76,6 +83,7 @@ class taosdemoPerformace: ] } + stables = [] stables.append(stb) @@ -163,21 +171,21 @@ class taosdemoPerformace: cursor.execute("create database if not exists %s" % self.dbName) cursor.execute("use %s" % self.dbName) - cursor.execute("create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50), avg_delay float, max_delay float, min_delay float, branch binary(50), type binary(20))") - print("==================== taosdemo performance ====================") + cursor.execute("create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50), avg_delay float, max_delay float, min_delay float, branch binary(50), type binary(20), numoftables int, numofrows int, numofint int, numofdouble int, numofbinary int)") print("create tables time: %f" % float(self.createTableTime)) print("insert records time: %f" % float(self.insertRecordsTime)) print("records per second: %f" % float(self.recordsPerSecond)) print("avg delay: %f" % float(self.avgDelay)) print("max delay: %f" % float(self.maxDelay)) print("min delay: %f" % float(self.minDelay)) - cursor.execute("insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f, '%s', '%s')" % + cursor.execute("insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f, '%s', '%s', %d, %d, %d, %d, %d)" % (float(self.createTableTime), float(self.insertRecordsTime), float(self.recordsPerSecond), - self.commitID, float(self.avgDelay), float(self.maxDelay), float(self.minDelay), self.branch, self.type)) + self.commitID, float(self.avgDelay), float(self.maxDelay), float(self.minDelay), self.branch, + self.type, self.numOfTables, self.numOfRows, self.numOfInt, self.numOfDouble, self.numOfBinary)) cursor.close() cursor1 = self.conn.cursor() - cursor1.execute("drop database if exists %s" % self.insertDB) + # cursor1.execute("drop database if exists %s" % self.insertDB) cursor1.close() if __name__ == '__main__': @@ -209,8 +217,43 @@ if __name__ == '__main__': default='glibc', type=str, help='build type (default: glibc)') + parser.add_argument( + '-i', + '--num-of-int', + action='store', + default=4, + type=int, + help='num of int columns (default: 4)') + parser.add_argument( + '-D', + '--num-of-double', + action='store', + default=0, + type=int, + help='num of double columns (default: 4)') + parser.add_argument( + '-B', + '--num-of-binary', + action='store', + default=0, + type=int, + help='num of binary columns (default: 4)') + parser.add_argument( + '-t', + '--num-of-tables', + action='store', + default=10000, + type=int, + help='num of tables (default: 10000)') + parser.add_argument( + '-r', + '--num-of-rows', + action='store', + default=100000, + type=int, + help='num of rows (default: 100000)') args = parser.parse_args() - perftest = taosdemoPerformace(args.commit_id, args.database_name, args.git_branch, args.build_type) + perftest = taosdemoPerformace(args.commit_id, args.database_name, args.git_branch, args.build_type, args.num_of_tables, args.num_of_rows, args.num_of_int, args.num_of_double, args.num_of_binary) perftest.insertData() perftest.createTablesAndStoreData() diff --git a/tests/script/general/parser/fill.sim b/tests/script/general/parser/fill.sim index d109dd50f7..3413a0b596 100644 --- a/tests/script/general/parser/fill.sim +++ b/tests/script/general/parser/fill.sim @@ -1050,6 +1050,27 @@ sql_error select min(c3) from m_fl_mt0 interval(10w) fill(value, 20) sql_error select max(c3) from m_fl_mt0 interval(1n) fill(prev) sql_error select min(c3) from m_fl_mt0 interval(1y) fill(value, 20) +sql create table nexttb1 (ts timestamp, f1 int); +sql insert into nexttb1 values ('2021-08-08 1:1:1', NULL); +sql insert into nexttb1 values ('2021-08-08 1:1:5', 3); + +sql select last(*) from nexttb1 where ts >= '2021-08-08 1:1:1' and ts < '2021-08-08 1:1:10' interval(1s) fill(next); +if $rows != 9 then + return -1 +endi +if $data00 != @21-08-08 01:01:01.000@ then + return -1 +endi +if $data01 != @21-08-08 01:01:01.000@ then + return -1 +endi +if $data02 != 3 then + return -1 +endi + + + + print =============== clear #sql drop database $db #sql show databases @@ -1057,4 +1078,4 @@ print =============== clear # return -1 #endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim index 5edadad3a6..0c93fe919a 100644 --- a/tests/script/general/parser/function.sim +++ b/tests/script/general/parser/function.sim @@ -1148,3 +1148,21 @@ endi sql select derivative(test_column_alias_name, 1s, 0) from (select avg(k) test_column_alias_name from t1 interval(1s)); + +sql create table smeters (ts timestamp, current float, voltage int); +sql insert into smeters values ('2021-08-08 10:10:10', 10, 1); +sql insert into smeters values ('2021-08-08 10:10:12', 10, 2); + +sql select stddev(voltage) from smeters where ts>='2021-08-08 10:10:10.000' and ts < '2021-08-08 10:10:20.000' and current=10 interval(1000a); +if $rows != 2 then + return -1 +endi +if $data00 != @21-08-08 10:10:10.000@ then + return -1 +endi +if $data10 != @21-08-08 10:10:12.000@ then + return -1 +endi + + + diff --git a/tests/script/general/parser/interp.sim b/tests/script/general/parser/interp.sim index 2dd3204e57..74febff063 100644 --- a/tests/script/general/parser/interp.sim +++ b/tests/script/general/parser/interp.sim @@ -55,6 +55,9 @@ while $i < $halfNum endw print ====== tables created +sql create table ap1 (ts timestamp, pav float); +sql INSERT INTO ap1 VALUES ('2021-07-25 02:19:54.100',1) ('2021-07-25 02:19:54.200',2) ('2021-07-25 02:19:54.300',3) ('2021-07-25 02:19:56.500',4) ('2021-07-25 02:19:57.500',5) ('2021-07-25 02:19:57.600',6) ('2021-07-25 02:19:57.900',7) ('2021-07-25 02:19:58.100',8) ('2021-07-25 02:19:58.300',9) ('2021-07-25 02:19:59.100',10) ('2021-07-25 02:19:59.300',11) ('2021-07-25 02:19:59.500',12) ('2021-07-25 02:19:59.700',13) ('2021-07-25 02:19:59.900',14) ('2021-07-25 02:20:05.000', 20) ('2021-07-25 02:25:00.000', 10000); + run general/parser/interp_test.sim print ================== restart server to commit data into disk @@ -65,6 +68,7 @@ print ================== server restart completed run general/parser/interp_test.sim + print ================= TD-5931 sql create stable st5931(ts timestamp, f int) tags(t int) sql create table ct5931 using st5931 tags(1) diff --git a/tests/script/general/parser/interp_test.sim b/tests/script/general/parser/interp_test.sim index 81a77995fb..845afb0173 100644 --- a/tests/script/general/parser/interp_test.sim +++ b/tests/script/general/parser/interp_test.sim @@ -927,4 +927,1323 @@ endi if $data44 != @18-11-25 19:06:00.000@ then return -1 -endi \ No newline at end of file +endi + + + + + + + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' interval(1s) fill(linear); +if $rows != 6 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 3.31818 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 3.77273 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 4.50000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 7.50000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 9.87500 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' interval(1s) fill(value, 1); +if $rows != 6 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != 1.00000 then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 1.00000 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 1.00000 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 1.00000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 1.00000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 1.00000 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' interval(1s) fill(NULL); +if $rows != 6 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != NULL then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != NULL then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != NULL then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != NULL then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' interval(1s) fill(prev); +if $rows != 6 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 3.00000 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 3.00000 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 4.00000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 7.00000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 9.00000 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:00' interval(1s) fill(next); +if $rows != 6 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != 1.00000 then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 4.00000 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 4.00000 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 5.00000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 8.00000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 10.00000 then + return -1 +endi + + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:19:56' interval(1s) fill(linear); +if $rows != 0 then + return -1 +endi +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:19:56' interval(1s) fill(prev); +if $rows != 2 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 3.00000 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:19:56' interval(1s) fill(next); +if $rows != 2 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != 1.00000 then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:19:57' interval(1s) fill(linear); +if $rows != 3 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 3.31818 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 3.77273 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:19:57' interval(1s) fill(prev); +if $rows != 3 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 3.00000 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 3.00000 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:19:57' interval(1s) fill(next); +if $rows != 3 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != 1.00000 then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 4.00000 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 4.00000 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:20:03' interval(1s) fill(linear); +if $rows != 10 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 3.31818 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 3.77273 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 4.50000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 7.50000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 9.87500 then + return -1 +endi +if $data60 != @21-07-25 02:20:00.000@ then + return -1 +endi +if $data61 != NULL then + return -1 +endi +if $data70 != @21-07-25 02:20:01.000@ then + return -1 +endi +if $data71 != NULL then + return -1 +endi +if $data80 != @21-07-25 02:20:02.000@ then + return -1 +endi +if $data81 != NULL then + return -1 +endi +if $data90 != @21-07-25 02:20:03.000@ then + return -1 +endi +if $data91 != NULL then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:20:03' interval(1s) fill(prev); +if $rows != 10 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 3.00000 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 3.00000 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 4.00000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 7.00000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 9.00000 then + return -1 +endi +if $data60 != @21-07-25 02:20:00.000@ then + return -1 +endi +if $data61 != 14.00000 then + return -1 +endi +if $data70 != @21-07-25 02:20:01.000@ then + return -1 +endi +if $data71 != 14.00000 then + return -1 +endi +if $data80 != @21-07-25 02:20:02.000@ then + return -1 +endi +if $data81 != 14.00000 then + return -1 +endi +if $data90 != @21-07-25 02:20:03.000@ then + return -1 +endi +if $data91 != 14.00000 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:20:03' interval(1s) fill(next); +if $rows != 10 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != 1.00000 then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 4.00000 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 4.00000 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 5.00000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 8.00000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 10.00000 then + return -1 +endi +if $data60 != @21-07-25 02:20:00.000@ then + return -1 +endi +if $data61 != NULL then + return -1 +endi +if $data70 != @21-07-25 02:20:01.000@ then + return -1 +endi +if $data71 != NULL then + return -1 +endi +if $data80 != @21-07-25 02:20:02.000@ then + return -1 +endi +if $data81 != NULL then + return -1 +endi +if $data90 != @21-07-25 02:20:03.000@ then + return -1 +endi +if $data91 != NULL then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:20:05' interval(1s) fill(linear); +if $rows != 12 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 3.31818 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 3.77273 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 4.50000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 7.50000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 9.87500 then + return -1 +endi +if $data60 != @21-07-25 02:20:00.000@ then + return -1 +endi +if $data61 != 14.11765 then + return -1 +endi +if $data70 != @21-07-25 02:20:01.000@ then + return -1 +endi +if $data71 != 15.29412 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:20:05' interval(1s) fill(prev); +if $rows != 12 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 3.00000 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 3.00000 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 4.00000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 7.00000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 9.00000 then + return -1 +endi +if $data60 != @21-07-25 02:20:00.000@ then + return -1 +endi +if $data61 != 14.00000 then + return -1 +endi +if $data70 != @21-07-25 02:20:01.000@ then + return -1 +endi +if $data71 != 14.00000 then + return -1 +endi +if $data80 != @21-07-25 02:20:02.000@ then + return -1 +endi +if $data81 != 14.00000 then + return -1 +endi +if $data90 != @21-07-25 02:20:03.000@ then + return -1 +endi +if $data91 != 14.00000 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:20:05' interval(1s) fill(next); +if $rows != 12 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != 1.00000 then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 4.00000 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 4.00000 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 5.00000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 8.00000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 10.00000 then + return -1 +endi +if $data60 != @21-07-25 02:20:00.000@ then + return -1 +endi +if $data61 != 20.00000 then + return -1 +endi +if $data70 != @21-07-25 02:20:01.000@ then + return -1 +endi +if $data71 != 20.00000 then + return -1 +endi +if $data80 != @21-07-25 02:20:02.000@ then + return -1 +endi +if $data81 != 20.00000 then + return -1 +endi +if $data90 != @21-07-25 02:20:03.000@ then + return -1 +endi +if $data91 != 20.00000 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:20:02' and ts<='2021-07-25 02:20:05' interval(1s) fill(value, 1); +if $rows != 4 then + return -1 +endi +if $data00 != @21-07-25 02:20:02.000@ then + return -1 +endi +if $data01 != 1.00000 then + return -1 +endi +if $data10 != @21-07-25 02:20:03.000@ then + return -1 +endi +if $data11 != 1.00000 then + return -1 +endi +if $data20 != @21-07-25 02:20:04.000@ then + return -1 +endi +if $data21 != 1.00000 then + return -1 +endi +if $data30 != @21-07-25 02:20:05.000@ then + return -1 +endi +if $data31 != 20.00000 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:20:02' and ts<='2021-07-25 02:20:05' interval(1s) fill(null); +if $rows != 4 then + return -1 +endi +if $data00 != @21-07-25 02:20:02.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @21-07-25 02:20:03.000@ then + return -1 +endi +if $data11 != NULL then + return -1 +endi +if $data20 != @21-07-25 02:20:04.000@ then + return -1 +endi +if $data21 != NULL then + return -1 +endi +if $data30 != @21-07-25 02:20:05.000@ then + return -1 +endi +if $data31 != 20.00000 then + return -1 +endi + + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:20:25' interval(1s) fill(linear); +if $rows != 32 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 3.31818 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 3.77273 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 4.50000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 7.50000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 9.87500 then + return -1 +endi +if $data60 != @21-07-25 02:20:00.000@ then + return -1 +endi +if $data61 != 14.11765 then + return -1 +endi +if $data70 != @21-07-25 02:20:01.000@ then + return -1 +endi +if $data71 != 15.29412 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:20:25' interval(1s) fill(prev); +if $rows != 32 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 3.00000 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 3.00000 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 4.00000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 7.00000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 9.00000 then + return -1 +endi +if $data60 != @21-07-25 02:20:00.000@ then + return -1 +endi +if $data61 != 14.00000 then + return -1 +endi +if $data70 != @21-07-25 02:20:01.000@ then + return -1 +endi +if $data71 != 14.00000 then + return -1 +endi +if $data80 != @21-07-25 02:20:02.000@ then + return -1 +endi +if $data81 != 14.00000 then + return -1 +endi +if $data90 != @21-07-25 02:20:03.000@ then + return -1 +endi +if $data91 != 14.00000 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:20:25' interval(1s) fill(next); +if $rows != 32 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != 1.00000 then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 4.00000 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 4.00000 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 5.00000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 8.00000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 10.00000 then + return -1 +endi +if $data60 != @21-07-25 02:20:00.000@ then + return -1 +endi +if $data61 != 20.00000 then + return -1 +endi +if $data70 != @21-07-25 02:20:01.000@ then + return -1 +endi +if $data71 != 20.00000 then + return -1 +endi +if $data80 != @21-07-25 02:20:02.000@ then + return -1 +endi +if $data81 != 20.00000 then + return -1 +endi +if $data90 != @21-07-25 02:20:03.000@ then + return -1 +endi +if $data91 != 20.00000 then + return -1 +endi + + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:25:00' interval(1s) fill(linear); +if $rows != 307 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 3.31818 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 3.77273 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 4.50000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 7.50000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 9.87500 then + return -1 +endi +if $data60 != @21-07-25 02:20:00.000@ then + return -1 +endi +if $data61 != 14.11765 then + return -1 +endi +if $data70 != @21-07-25 02:20:01.000@ then + return -1 +endi +if $data71 != 15.29412 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:25:00' interval(1s) fill(prev); +if $rows != 307 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 3.00000 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 3.00000 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 4.00000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 7.00000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 9.00000 then + return -1 +endi +if $data60 != @21-07-25 02:20:00.000@ then + return -1 +endi +if $data61 != 14.00000 then + return -1 +endi +if $data70 != @21-07-25 02:20:01.000@ then + return -1 +endi +if $data71 != 14.00000 then + return -1 +endi +if $data80 != @21-07-25 02:20:02.000@ then + return -1 +endi +if $data81 != 14.00000 then + return -1 +endi +if $data90 != @21-07-25 02:20:03.000@ then + return -1 +endi +if $data91 != 14.00000 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 02:25:00' interval(1s) fill(next); +if $rows != 307 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != 1.00000 then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 4.00000 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 4.00000 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 5.00000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 8.00000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 10.00000 then + return -1 +endi +if $data60 != @21-07-25 02:20:00.000@ then + return -1 +endi +if $data61 != 20.00000 then + return -1 +endi +if $data70 != @21-07-25 02:20:01.000@ then + return -1 +endi +if $data71 != 20.00000 then + return -1 +endi +if $data80 != @21-07-25 02:20:02.000@ then + return -1 +endi +if $data81 != 20.00000 then + return -1 +endi +if $data90 != @21-07-25 02:20:03.000@ then + return -1 +endi +if $data91 != 20.00000 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 03:25:00' interval(1s) fill(linear); +if $rows != 3907 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 3.31818 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 3.77273 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 4.50000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 7.50000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 9.87500 then + return -1 +endi +if $data60 != @21-07-25 02:20:00.000@ then + return -1 +endi +if $data61 != 14.11765 then + return -1 +endi +if $data70 != @21-07-25 02:20:01.000@ then + return -1 +endi +if $data71 != 15.29412 then + return -1 +endi + + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 03:25:00' interval(1s) fill(prev); +if $rows != 3907 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != NULL then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 3.00000 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 3.00000 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 4.00000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 7.00000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 9.00000 then + return -1 +endi +if $data60 != @21-07-25 02:20:00.000@ then + return -1 +endi +if $data61 != 14.00000 then + return -1 +endi +if $data70 != @21-07-25 02:20:01.000@ then + return -1 +endi +if $data71 != 14.00000 then + return -1 +endi +if $data80 != @21-07-25 02:20:02.000@ then + return -1 +endi +if $data81 != 14.00000 then + return -1 +endi +if $data90 != @21-07-25 02:20:03.000@ then + return -1 +endi +if $data91 != 14.00000 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<='2021-07-25 03:25:00' interval(1s) fill(next); +if $rows != 3907 then + return -1 +endi +if $data00 != @21-07-25 02:19:54.000@ then + return -1 +endi +if $data01 != 1.00000 then + return -1 +endi +if $data10 != @21-07-25 02:19:55.000@ then + return -1 +endi +if $data11 != 4.00000 then + return -1 +endi +if $data20 != @21-07-25 02:19:56.000@ then + return -1 +endi +if $data21 != 4.00000 then + return -1 +endi +if $data30 != @21-07-25 02:19:57.000@ then + return -1 +endi +if $data31 != 5.00000 then + return -1 +endi +if $data40 != @21-07-25 02:19:58.000@ then + return -1 +endi +if $data41 != 8.00000 then + return -1 +endi +if $data50 != @21-07-25 02:19:59.000@ then + return -1 +endi +if $data51 != 10.00000 then + return -1 +endi +if $data60 != @21-07-25 02:20:00.000@ then + return -1 +endi +if $data61 != 20.00000 then + return -1 +endi +if $data70 != @21-07-25 02:20:01.000@ then + return -1 +endi +if $data71 != 20.00000 then + return -1 +endi +if $data80 != @21-07-25 02:20:02.000@ then + return -1 +endi +if $data81 != 20.00000 then + return -1 +endi +if $data90 != @21-07-25 02:20:03.000@ then + return -1 +endi +if $data91 != 20.00000 then + return -1 +endi + +sql select interp(pav) from ap1 where ts> '2021-07-25 02:19:54' and ts<'2021-07-25 02:20:07' interval(1s); +if $rows != 1 then + return -1 +endi +if $data00 != @21-07-25 02:20:05.000@ then + return -1 +endi +if $data01 != 20.00000 then + return -1 +endi +