Merge branch 'main' of https://github.com/taosdata/TDengine into fix/TD-33020
This commit is contained in:
commit
2eb0d34d0f
42
Jenkinsfile2
42
Jenkinsfile2
|
@ -6,6 +6,7 @@ node {
|
|||
file_zh_changed = ''
|
||||
file_en_changed = ''
|
||||
file_no_doc_changed = '1'
|
||||
file_only_tdgpt_change_except = '1'
|
||||
def abortPreviousBuilds() {
|
||||
def currentJobName = env.JOB_NAME
|
||||
def currentBuildNumber = env.BUILD_NUMBER.toInteger()
|
||||
|
@ -73,9 +74,19 @@ def check_docs(){
|
|||
''',
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
file_only_tdgpt_change_except = sh (
|
||||
script: '''
|
||||
cd ${WKC}
|
||||
git --no-pager diff --name-only FETCH_HEAD `git merge-base FETCH_HEAD ${CHANGE_TARGET}`|grep -v "^docs/en/"|grep -v "^docs/zh/"|grep -v ".md$" | grep -v "forecastoperator.c\\|anomalywindowoperator.c" |grep -v "tsim/analytics" |grep -v "tdgpt_cases.task" || :
|
||||
''',
|
||||
returnStdout: true
|
||||
).trim()
|
||||
|
||||
echo "file_zh_changed: ${file_zh_changed}"
|
||||
echo "file_en_changed: ${file_en_changed}"
|
||||
echo "file_no_doc_changed: ${file_no_doc_changed}"
|
||||
echo "file_only_tdgpt_change_except: ${file_only_tdgpt_change_except}"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -385,7 +396,7 @@ def run_win_test() {
|
|||
}
|
||||
|
||||
pipeline {
|
||||
agent none
|
||||
agent any
|
||||
options { skipDefaultCheckout() }
|
||||
environment{
|
||||
WKDIR = '/var/lib/jenkins/workspace'
|
||||
|
@ -462,6 +473,10 @@ pipeline {
|
|||
WIN_COMMUNITY_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community"
|
||||
WIN_SYSTEM_TEST_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community\\tests\\system-test"
|
||||
}
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { file_only_tdgpt_change_except != '' }
|
||||
}
|
||||
steps {
|
||||
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
|
||||
timeout(time: 126, unit: 'MINUTES'){
|
||||
|
@ -475,6 +490,10 @@ pipeline {
|
|||
}
|
||||
stage('mac test') {
|
||||
agent{label " Mac_catalina "}
|
||||
when {
|
||||
beforeAgent true
|
||||
expression { file_only_tdgpt_change_except != '' }
|
||||
}
|
||||
steps {
|
||||
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
|
||||
timeout(time: 60, unit: 'MINUTES'){
|
||||
|
@ -550,6 +569,15 @@ pipeline {
|
|||
cd ${WKC}/tests/parallel_test
|
||||
./run_scan_container.sh -d ${WKDIR} -b ${BRANCH_NAME}_${BUILD_ID} -f ${WKDIR}/tmp/${BRANCH_NAME}_${BUILD_ID}/docs_changed.txt ''' + extra_param + '''
|
||||
'''
|
||||
if ( file_no_doc_changed =~ /forecastoperator.c|anomalywindowoperator.c|tsim\/analytics|tdgpt_cases.task/ ) {
|
||||
sh '''
|
||||
cd ${WKC}/tests/parallel_test
|
||||
export DEFAULT_RETRY_TIME=2
|
||||
date
|
||||
timeout 600 time ./run.sh -e -m /home/m.json -t tdgpt_cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 300 ''' + extra_param + '''
|
||||
'''
|
||||
}
|
||||
if ( file_only_tdgpt_change_except != '' ) {
|
||||
sh '''
|
||||
cd ${WKC}/tests/parallel_test
|
||||
export DEFAULT_RETRY_TIME=2
|
||||
|
@ -559,20 +587,8 @@ pipeline {
|
|||
}
|
||||
}
|
||||
}
|
||||
/*catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
|
||||
timeout(time: 15, unit: 'MINUTES'){
|
||||
script {
|
||||
sh '''
|
||||
echo "packaging ..."
|
||||
date
|
||||
rm -rf ${WKC}/release/*
|
||||
cd ${WKC}/packaging
|
||||
./release.sh -v cluster -n 3.0.0.100 -s static
|
||||
'''
|
||||
}
|
||||
}
|
||||
}*/
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -169,7 +169,7 @@ ALTER TABLE [db_name.]tb_name alter_table_clause
|
|||
|
||||
alter_table_clause: {
|
||||
alter_table_options
|
||||
| SET TAG tag_name = new_tag_value
|
||||
| SET TAG tag_name = new_tag_value,tag_name2=new_tag2_value...
|
||||
}
|
||||
|
||||
alter_table_options:
|
||||
|
|
|
@ -107,12 +107,33 @@ node_url 是提供服务的 Anode 的 IP 和 PORT组成的字符串, 例如:`c
|
|||
列出集群中所有的数据分析节点,包括其 `FQDN`, `PORT`, `STATUS`等属性。
|
||||
```sql
|
||||
SHOW ANODES;
|
||||
|
||||
taos> show anodes;
|
||||
id | url | status | create_time | update_time |
|
||||
==================================================================================================================
|
||||
1 | 192.168.0.1:6090 | ready | 2024-11-28 18:44:27.089 | 2024-11-28 18:44:27.089 |
|
||||
Query OK, 1 row(s) in set (0.037205s)
|
||||
|
||||
```
|
||||
|
||||
#### 查看提供的时序数据分析服务
|
||||
|
||||
```SQL
|
||||
SHOW ANODES FULL;
|
||||
|
||||
taos> show anodes full;
|
||||
id | type | algo |
|
||||
============================================================================
|
||||
1 | anomaly-detection | shesd |
|
||||
1 | anomaly-detection | iqr |
|
||||
1 | anomaly-detection | ksigma |
|
||||
1 | anomaly-detection | lof |
|
||||
1 | anomaly-detection | grubbs |
|
||||
1 | anomaly-detection | ad_encoder |
|
||||
1 | forecast | holtwinters |
|
||||
1 | forecast | arima |
|
||||
Query OK, 8 row(s) in set (0.008796s)
|
||||
|
||||
```
|
||||
|
||||
#### 刷新集群中的分析算法缓存
|
||||
|
|
|
@ -9,7 +9,7 @@ Autoencoder<sup>[1]</sup>: TDgpt 内置使用自编码器(Autoencoder)的异
|
|||
--- 在 options 中增加 model 的名称,ad_autoencoder_foo, 针对 foo 数据集(表)训练的采用自编码器的异常检测模型进行异常检测
|
||||
SELECT COUNT(*), _WSTART
|
||||
FROM foo
|
||||
ANOMALY_DETECTION(col1, 'algo=encoder, model=ad_autoencoder_foo');
|
||||
ANOMALY_WINDOW(col1, 'algo=encoder, model=ad_autoencoder_foo');
|
||||
```
|
||||
|
||||
### 参考文献
|
||||
|
|
|
@ -52,7 +52,7 @@ class _MyAnomalyDetectionService(AbstractAnomalyDetectionService):
|
|||
|
||||
```SQL
|
||||
--- 对 col 列进行异常检测,通过指定 algo 参数为 myad 来调用新添加的异常检测类
|
||||
SELECT COUNT(*) FROM foo ANOMALY_DETECTION(col, 'algo=myad')
|
||||
SELECT COUNT(*) FROM foo ANOMALY_WINDOW(col, 'algo=myad')
|
||||
```
|
||||
如果是第一次启动该 Anode, 请按照 [TDgpt 安装部署](../../management/) 里的步骤先将该 Anode 添加到 TDengine 系统中。
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ Anode的主要目录结构如下图所示
|
|||
.
|
||||
├── cfg
|
||||
├── model
|
||||
│ └── ac_detection
|
||||
│ └── ad_autoencoder
|
||||
├── release
|
||||
├── script
|
||||
└── taosanalytics
|
||||
|
@ -63,7 +63,7 @@ Anode采用算法自动加载模式,因此只识别符合命名约定的 Pytho
|
|||
|
||||
```SQL
|
||||
--- algo 后面的参数 name 即为类属性 `name`
|
||||
SELECT COUNT(*) FROM foo ANOMALY_DETECTION(col_name, 'algo=name')
|
||||
SELECT COUNT(*) FROM foo ANOMALY_WINDOW(col_name, 'algo=name')
|
||||
```
|
||||
|
||||
## 添加具有模型的分析算法
|
||||
|
@ -72,11 +72,32 @@ SELECT COUNT(*) FROM foo ANOMALY_DETECTION(col_name, 'algo=name')
|
|||
将具有模型的分析算法添加到 Anode 中,首先需要在 `model` 目录中建立该算法对应的目录(目录名称可自拟),将采用该算法针对不同的输入时间序列数据生成的训练模型均需要保存在该目录下,同时目录名称要在分析算法中确定,以便能够固定加载该目录下的分析模型。为了确保模型能够正常读取加载,存储的模型使用`joblib`库进行序列化保存。
|
||||
|
||||
下面以自编码器(Autoencoder)为例,说明如何添加要预先训练的模型进行异常检测。
|
||||
首先我们在`model`目录中创建一个目录 -- `ad_detection`,该目录将用来保存所有使用自编码器训练的模型。然后,我们使用自编码器对 foo 表的时间序列数据进行训练,得到模型 ad_autoencoder_foo,使用 `joblib`序列化以后保存在`ad_detection` 目录中。
|
||||
首先我们在 `model `目录中创建一个目录 -- `ad_autoencoder` (见上图目录结构),该目录将用来保存所有使用自编码器训练的模型。然后,我们使用自编码器对 foo 表的时间序列数据进行训练,得到模型 针对 foo 表的模型,我们将其命名为 `ad_autoencoder_foo`,使用 `joblib`序列化该模型以后保存在 `ad_autoencoder` 目录中。如下图所示,ad_autoencoder_foo 由两个文件构成,分别是模型文件 (ad_autoencoder_foo.dat) 和模型文件描述文件 (ad_autoencoder_foo.info)。
|
||||
|
||||
使用 SQL 调用已经保存的模型,需要在调用参数中指定模型名称``model=ad_autoencoder_foo`,而 `algo=encoder` 是确定调用的自编码器生成的模型(这里的`encoder`说明调用的是自编码器算法模型,该名称是添加算法的时候在代码中定义)以便能够调用该模型。
|
||||
```bash
|
||||
.
|
||||
├── cfg
|
||||
├── model
|
||||
│ └── ad_autoencoder
|
||||
│ ├── ad_autoencoder_foo.dat
|
||||
│ └── ad_autoencoder_foo.info
|
||||
├── release
|
||||
├── script
|
||||
└── taosanalytics
|
||||
├── algo
|
||||
│ ├── ad
|
||||
│ └── fc
|
||||
├── misc
|
||||
└── test
|
||||
|
||||
```
|
||||
|
||||
接下来说明如何使用 SQL 调用该模型。
|
||||
通过设置参数 `algo=ad_encoder` 告诉分析平台要调用自编码器算法训练的模型(自编码器算法在可用算法列表中),因此直接指定即可。此外还需要指定自编码器针对某数据集训练的确定的模型,此时我们需要使用已经保存的模型 `ad_autoencoder_foo` ,因此需要添加参数 `model=ad_autoencoder_foo` 以便能够调用该模型。
|
||||
|
||||
```SQL
|
||||
--- 在 options 中增加 model 的名称,ad_autoencoder_foo, 针对 foo 数据集(表)训练的采用自编码器的异常检测模型进行异常检测
|
||||
SELECT COUNT(*), _WSTART FROM foo ANOMALY_DETECTION(col1, 'algo=encoder, model=ad_autoencoder_foo');
|
||||
SELECT COUNT(*), _WSTART
|
||||
FROM foo
|
||||
ANOMALY_WINDOW(col1, 'algo=ad_encoder, model=ad_autoencoder_foo');
|
||||
```
|
||||
|
|
|
@ -89,6 +89,9 @@ taos -h h1.taos.com -s "use db; show tables;"
|
|||
|
||||
也可以通过配置文件中的参数设置来控制 TDengine CLI 的行为。可用配置参数请参考[客户端配置](../../components/taosc)
|
||||
|
||||
## 错误代码表
|
||||
在 TDengine 3.3.5.0 版本后 TDengine CLI 在返回的错误信息中包含了具体的错误代码,用户可到 TDengine 官网的错误代码详细说明页面查找具体原因及解决措施,见:[错误码参考表](../error_code/)
|
||||
|
||||
## TDengine CLI TAB 键补全
|
||||
|
||||
- TAB 键前为空命令状态下按 TAB 键,会列出 TDengine CLI 支持的所有命令
|
||||
|
|
|
@ -171,7 +171,7 @@ ALTER TABLE [db_name.]tb_name alter_table_clause
|
|||
|
||||
alter_table_clause: {
|
||||
alter_table_options
|
||||
| SET TAG tag_name = new_tag_value
|
||||
| SET TAG tag_name = new_tag_value,tag_name2=new_tag2_value...
|
||||
}
|
||||
|
||||
alter_table_options:
|
||||
|
|
|
@ -12,30 +12,30 @@ description: TDengine 服务端的错误码列表和详细说明
|
|||
## rpc
|
||||
|
||||
| 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 |
|
||||
| ---------- | -------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- |
|
||||
| 0x8000000B | 无法正常收发请求 | 1. 网络不通 2. 多次重试、依然不能执行REQ | 1. 检查网络 2. 分析日志,具体原因比较复杂 |
|
||||
| 0x80000013 | 客户端和服务端之间的时间时间相差太大, 默认900s | 1. 客户端和服务端不在同一个时区 2. 客户端和服务端在同一个时区,但是两者的时间不同步、相差太大 | 1. 调整到同一个时区 2. 校准客户端和服务端的时间 |
|
||||
| 0x80000015 | 无法解析FQDN | 设置了无效的fqdn | 检查fqdn 的设置 |
|
||||
| 0x80000017 | 当前端口被占用 | 端口P已经被某个服务占用的情况下,新启的服务依然尝试绑定端口P | 1. 改动新服务的服务端口 2. 杀死之前占用端口的服务 |
|
||||
| 0x80000018 | 由于网络抖动/ REQ 请求时间过长导致系统主动摘掉REQ 所使用的conn | 1. 网络抖动 2. REQ 请求时间过长,大于900s | 1. 设置系统的最大超时时长 2. 检查REQ的请求时长 |
|
||||
| 0x80000019 | 暂时没有用到这个错误码 | | |
|
||||
| 0x80000020 | 多次重试之后,所有dnode 依然都链接不上 | 1. 所有的节点都挂了 2. 有节点挂了,但是存活的节点都不是master 节点 | 1. 查看taosd 的状态、分析taosd 挂掉的原因或者分析存活的taosd 为什么不是主 |
|
||||
| 0x80000021 | 多次重试之后,所有dnode 依然都链接不上 | 1. 网络异常 2. req请求时间太长,服务端可能发生死锁等问题。系统自己断开了链接 | 1. 检查网络 2. 检查req 的执行时间 |
|
||||
| 0x80000022 | 达到了可用链接上线。 | 1. 并发太高、占用链接已经到达上线。 2. 服务端的BUG,导致conn 一直不释放, | 1. 提高tsNumOfRpcSessions这个值。 2. tsTimeToGetAvailableConn 3. 分析服务端不释放的conn 的原因 |
|
||||
| ---------- | -----------------------------| --- | --- |
|
||||
| 0x8000000B | Unable to establish connection | 1.网络不通 2.多次重试、依然不能执行请求 | 1.检查网络 2.分析日志,具体原因比较复杂 |
|
||||
| 0x80000013 | Client and server's time is not synchronized | 1.客户端和服务端不在同一个时区 2.客户端和服务端在同一个时区,但是两者的时间不同步,相差超过 900 秒 | 1.调整到同一个时区 2.校准客户端和服务端的时间|
|
||||
| 0x80000015 | Unable to resolve FQDN | 设置了无效的 fqdn | 检查fqdn 的设置 |
|
||||
| 0x80000017 | Port already in use | 端口已经被某个服务占用的情况下,新启的服务依然尝试绑定该端口 | 1.改动新服务的服务端口 2.杀死之前占用端口的服务 |
|
||||
| 0x80000018 | Conn is broken | 由于网络抖动或者请求时间过长(超过 900 秒),导致系统主动摘掉连接 | 1.设置系统的最大超时时长 2.检查请求时长 |
|
||||
| 0x80000019 | Conn read timeout | 未启用 | |
|
||||
| 0x80000020 | some vnode/qnode/mnode(s) out of service | 多次重试之后,仍然无法连接到集群,可能是所有的节点都宕机了,或者存活的节点不是 Leader 节点 | 1.查看 taosd 的状态、分析 taosd 宕机的原因 2.分析存活的 taosd 为什么无法选取 Leader |
|
||||
| 0x80000021 | some vnode/qnode/mnode(s) conn is broken | 多次重试之后,仍然无法连接到集群,可能是网络异常、请求时间太长、服务端死锁等问题 | 1.检查网络 2.请求的执行时间 |
|
||||
| 0x80000022 | rpc open too many session | 1.并发太高导致占用链接已经到达上限 2.服务端的 BUG,导致连接一直不释放 | 1.调整配置参数 numOfRpcSessions 2.调整配置参数 timeToGetAvailableConn 3.分析服务端不释放的连接的原因 |
|
||||
|
||||
|
||||
## common
|
||||
|
||||
| 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 |
|
||||
| ---------- | --------------------------------- | -------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 0x80000100 | Operation not supported | 1. 操作不被支持、不允许的场景 | 1. 检查操作是否有误,确认该功能是否被支持 |
|
||||
| 0x80000102 | Out of Memory | 1. 客户端或服务端内存分配失败的场景 | 1. 检查客户端、服务端内存是否充足 |
|
||||
| ---------- | -----------------------------| --- | --- |
|
||||
| 0x80000100 | Operation not supported | 操作不被支持、不允许的场景 | 检查操作是否有误,确认该功能是否被支持 |
|
||||
| 0x80000102 | Out of Memory | 客户端或服务端内存分配失败的场景 | 检查客户端、服务端内存是否充足 |
|
||||
| 0x80000104 | Data file corrupted | 1.存储数据文件损坏 2.udf 文件无法创建 | 1.联系涛思客户支持 2.确认服务端对临时目录有读写创建文件权限 |
|
||||
| 0x80000106 | too many Ref Objs | 无可用ref资源 | 保留现场和日志,github 上报 issue |
|
||||
| 0x80000107 | Ref ID is removed | 引用的ref资源已经释放 | 保留现场和日志,github 上报 issue |
|
||||
| 0x80000108 | Invalid Ref ID | 无效ref ID | 保留现场和日志,github 上报 issue |
|
||||
| 0x8000010A | Ref is not there | ref 信息不存在 | 保留现场和日志,github 上报 issue |
|
||||
| 0x80000110 | | | |
|
||||
| 0x80000110 | Unexpected generic error | 系统内部错误 | 保留现场和日志,github 上报 issue |
|
||||
| 0x80000111 | Action in progress | 操作进行中 | 1.等待操作完成 2.根据需要取消操作 3.当超出合理时间仍然未完成可保留现场和日志,或联系客户支持 |
|
||||
| 0x80000112 | Out of range | 配置参数超出允许值范围 | 更改参数 |
|
||||
| 0x80000115 | Invalid message | 消息错误 | 1. 检查是否存在节点间版本不一致 2. 保留现场和日志,github上报issue |
|
||||
|
@ -309,11 +309,11 @@ description: TDengine 服务端的错误码列表和详细说明
|
|||
|
||||
| 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 |
|
||||
| ---------- | ---------------------------- | ----------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------ |
|
||||
| 0x80000903 | Sync timeout | 场景1:发生了切主;旧主节点上已经开始协商但尚未达成一致的请求将超时。 场景2:从节点响应超时,导致协商超时。 | 检查集群状态,例如:show vgroups;查看服务端日志,以及服务端节点之间的网络状况。 |
|
||||
| 0x80000903 | Sync timeout | 场景1:发生了切主 旧主节点上已经开始协商但尚未达成一致的请求将超时。 场景2:从节点响应超时,导致协商超时。 | 检查集群状态,例如:show vgroups 查看服务端日志,以及服务端节点之间的网络状况。 |
|
||||
| 0x8000090C | Sync leader is unreachable | 场景1:选主过程中 场景2:客户端请求路由到了从节点,且重定向失败 场景3:客户端或服务端网络配置错误 | 检查集群状态、网络配置、应用程序访问状态等。查看服务端日志,以及服务端节点之间的网络状况。 |
|
||||
| 0x8000090F | Sync new config error | 成员变更配置错误 | 内部错误,用户无法干预 |
|
||||
| 0x80000911 | Sync not ready to propose | 场景1:恢复未完成 | 检查集群状态,例如:show vgroups。查看服务端日志,以及服务端节点之间的网络状况。 |
|
||||
| 0x80000914 | Sync leader is restoring | 场景1:发生了切主;选主后,日志重演中 | 检查集群状态,例如:show vgroups。查看服务端日志,观察恢复进度。 |
|
||||
| 0x80000914 | Sync leader is restoring | 场景1:发生了切主 选主后,日志重演中 | 检查集群状态,例如:show vgroups。查看服务端日志,观察恢复进度。 |
|
||||
| 0x80000915 | Sync invalid snapshot msg | 快照复制消息错误 | 服务端内部错误 |
|
||||
| 0x80000916 | Sync buffer is full | 场景1:客户端请求并发数特别大,超过了服务端处理能力,或者因为网络和CPU资源严重不足,或者网络连接问题等。 | 检查集群状态,系统资源使用率(例如磁盘IO、CPU、网络通信等),以及节点之间网络连接状况。 |
|
||||
| 0x80000917 | Sync write stall | 场景1:状态机执行被阻塞,例如因系统繁忙,磁盘IO资源严重不足,或落盘失败等 | 检查集群状态,系统资源使用率(例如磁盘IO和CPU等),以及是否发生了落盘失败等。 |
|
||||
|
|
|
@ -178,6 +178,7 @@ typedef enum _mgmt_table {
|
|||
#define TSDB_ALTER_TABLE_DROP_TAG_INDEX 12
|
||||
#define TSDB_ALTER_TABLE_UPDATE_COLUMN_COMPRESS 13
|
||||
#define TSDB_ALTER_TABLE_ADD_COLUMN_WITH_COMPRESS_OPTION 14
|
||||
#define TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL 15
|
||||
|
||||
#define TSDB_FILL_NONE 0
|
||||
#define TSDB_FILL_NULL 1
|
||||
|
@ -985,7 +986,6 @@ typedef struct SEpSet {
|
|||
SEp eps[TSDB_MAX_REPLICA];
|
||||
} SEpSet;
|
||||
|
||||
|
||||
int32_t tEncodeSEpSet(SEncoder* pEncoder, const SEpSet* pEp);
|
||||
int32_t tDecodeSEpSet(SDecoder* pDecoder, SEpSet* pEp);
|
||||
int32_t taosEncodeSEpSet(void** buf, const SEpSet* pEp);
|
||||
|
@ -3259,6 +3259,16 @@ int32_t tEncodeSVDropTbBatchRsp(SEncoder* pCoder, const SVDropTbBatchRsp* pRsp);
|
|||
int32_t tDecodeSVDropTbBatchRsp(SDecoder* pCoder, SVDropTbBatchRsp* pRsp);
|
||||
|
||||
// TDMT_VND_ALTER_TABLE =====================
|
||||
typedef struct SMultiTagUpateVal {
|
||||
char* tagName;
|
||||
int32_t colId;
|
||||
int8_t tagType;
|
||||
int8_t tagFree;
|
||||
uint32_t nTagVal;
|
||||
uint8_t* pTagVal;
|
||||
int8_t isNull;
|
||||
SArray* pTagArray;
|
||||
} SMultiTagUpateVal;
|
||||
typedef struct {
|
||||
char* tbName;
|
||||
int8_t action;
|
||||
|
@ -3288,11 +3298,13 @@ typedef struct {
|
|||
int64_t ctimeMs; // fill by vnode
|
||||
int8_t source; // TD_REQ_FROM_TAOX-taosX or TD_REQ_FROM_APP-taosClient
|
||||
uint32_t compress; // TSDB_ALTER_TABLE_UPDATE_COLUMN_COMPRESS
|
||||
SArray* pMultiTag; // TSDB_ALTER_TABLE_ADD_MULTI_TAGS
|
||||
} SVAlterTbReq;
|
||||
|
||||
int32_t tEncodeSVAlterTbReq(SEncoder* pEncoder, const SVAlterTbReq* pReq);
|
||||
int32_t tDecodeSVAlterTbReq(SDecoder* pDecoder, SVAlterTbReq* pReq);
|
||||
int32_t tDecodeSVAlterTbReqSetCtime(SDecoder* pDecoder, SVAlterTbReq* pReq, int64_t ctimeMs);
|
||||
void tfreeMultiTagUpateVal(void* pMultiTag);
|
||||
|
||||
typedef struct {
|
||||
int32_t code;
|
||||
|
|
|
@ -264,8 +264,18 @@ typedef struct SAlterTableStmt {
|
|||
SDataType dataType;
|
||||
SValueNode* pVal;
|
||||
SColumnOptions* pColOptions;
|
||||
SNodeList* pNodeListTagValue;
|
||||
} SAlterTableStmt;
|
||||
|
||||
typedef struct SAlterTableMultiStmt {
|
||||
ENodeType type;
|
||||
char dbName[TSDB_DB_NAME_LEN];
|
||||
char tableName[TSDB_TABLE_NAME_LEN];
|
||||
int8_t alterType;
|
||||
|
||||
SNodeList* pNodeListTagValue;
|
||||
} SAlterTableMultiStmt;
|
||||
|
||||
typedef struct SCreateUserStmt {
|
||||
ENodeType type;
|
||||
char userName[TSDB_USER_LEN];
|
||||
|
|
|
@ -640,6 +640,7 @@ int32_t taosGetErrSize();
|
|||
#define TSDB_CODE_QRY_FILTER_WRONG_OPTR_TYPE TAOS_DEF_ERROR_CODE(0, 0x0735)
|
||||
#define TSDB_CODE_QRY_FILTER_RANGE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0736)
|
||||
#define TSDB_CODE_QRY_FILTER_INVALID_TYPE TAOS_DEF_ERROR_CODE(0, 0x0737)
|
||||
#define TSDB_CODE_QRY_TASK_SUCC_TO_PARTSUSS TAOS_DEF_ERROR_CODE(0, 0x0738)
|
||||
|
||||
// grant
|
||||
#define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800)
|
||||
|
|
|
@ -0,0 +1,134 @@
|
|||
import pytest
|
||||
import subprocess
|
||||
import os
|
||||
from versionCheckAndUninstallforPytest import UninstallTaos
|
||||
import platform
|
||||
import re
|
||||
import time
|
||||
import signal
|
||||
import logging
|
||||
|
||||
|
||||
|
||||
system = platform.system()
|
||||
current_path = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
with open("%s/test_server_unix_tdgpt" % current_path) as f:
|
||||
cases = f.read().splitlines()
|
||||
|
||||
OEM = ["ProDB"]
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def get_config(request):
|
||||
verMode = request.config.getoption("--verMode")
|
||||
taosVersion = request.config.getoption("--tVersion")
|
||||
baseVersion = request.config.getoption("--baseVersion")
|
||||
sourcePath = request.config.getoption("--sourcePath")
|
||||
config = {
|
||||
"verMode": verMode,
|
||||
"taosVersion": taosVersion,
|
||||
"baseVersion": baseVersion,
|
||||
"sourcePath": sourcePath,
|
||||
"system": platform.system(),
|
||||
"arch": platform.machine()
|
||||
}
|
||||
return config
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def setup_module(get_config):
|
||||
def run_cmd(command):
|
||||
print("CMD:", command)
|
||||
result = subprocess.run(command, capture_output=True, text=True, shell=True)
|
||||
print("STDOUT:", result.stdout)
|
||||
print("STDERR:", result.stderr)
|
||||
print("Return Code:", result.returncode)
|
||||
assert result.returncode == 0
|
||||
return result
|
||||
|
||||
# setup before module tests
|
||||
config = get_config
|
||||
if config["system"] == "Windows":
|
||||
cmd = r"mkdir ..\..\debug\build\bin"
|
||||
else:
|
||||
cmd = "mkdir -p ../../debug/build/bin/"
|
||||
subprocess.getoutput(cmd)
|
||||
if config["system"] == "Linux" or config["system"] == "Darwin" : # add tmq_sim
|
||||
cmd = "cp -rf ../../../debug/build/bin/tmq_sim ../../debug/build/bin/."
|
||||
subprocess.getoutput(cmd)
|
||||
if config["system"] == "Darwin":
|
||||
cmd = "sudo cp -rf /usr/local/bin/taos* ../../debug/build/bin/"
|
||||
elif config["system"] == "Windows":
|
||||
cmd = r"xcopy C:\TDengine\taos*.exe ..\..\debug\build\bin /Y"
|
||||
else:
|
||||
if config["baseVersion"] in OEM:
|
||||
cmd = '''sudo find /usr/bin -name 'prodb*' -exec sh -c 'for file; do cp "$file" "../../debug/build/bin/taos${file##/usr/bin/%s}"; done' sh {} +''' % (
|
||||
config["baseVersion"].lower())
|
||||
else:
|
||||
cmd = "sudo cp /usr/bin/taos* ../../debug/build/bin/"
|
||||
run_cmd(cmd)
|
||||
if config["baseVersion"] in OEM: # mock OEM
|
||||
cmd = "sed -i 's/taos.cfg/%s.cfg/g' ../../tests/pytest/util/dnodes.py" % config["baseVersion"].lower()
|
||||
run_cmd(cmd)
|
||||
cmd = "sed -i 's/taosdlog.0/%sdlog.0/g' ../../tests/pytest/util/dnodes.py" % config["baseVersion"].lower()
|
||||
run_cmd(cmd)
|
||||
cmd = "sed -i 's/taos.cfg/%s.cfg/g' ../../tests/army/frame/server/dnode.py" % config["baseVersion"].lower()
|
||||
run_cmd(cmd)
|
||||
cmd = "sed -i 's/taosdlog.0/%sdlog.0/g' ../../tests/army/frame/server/dnode.py" % config["baseVersion"].lower()
|
||||
run_cmd(cmd)
|
||||
cmd = "ln -s /usr/bin/prodb /usr/local/bin/taos"
|
||||
subprocess.getoutput(cmd)
|
||||
|
||||
# yield
|
||||
#
|
||||
# name = "taos"
|
||||
# if config["baseVersion"] in OEM:
|
||||
# name = config["baseVersion"].lower()
|
||||
# subprocess.getoutput("rm /usr/local/bin/taos")
|
||||
# subprocess.getoutput("pkill taosd")
|
||||
# UninstallTaos(config["taosVersion"], config["verMode"], True, name)
|
||||
|
||||
|
||||
# use pytest fixture to exec case
|
||||
@pytest.fixture(params=cases)
|
||||
def run_command(request):
|
||||
commands = request.param
|
||||
if commands.strip().startswith("#"):
|
||||
pytest.skip("This case has been marked as skipped")
|
||||
d, command = commands.strip().split(",")
|
||||
if system == "Windows":
|
||||
cmd = r"cd %s\..\..\tests\%s && %s" % (current_path, d, command)
|
||||
else:
|
||||
cmd = "cd %s/../../tests/%s&&sudo %s" % (current_path, d, command)
|
||||
print(cmd)
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, shell=True)
|
||||
return {
|
||||
"command": command,
|
||||
"stdout": result.stdout,
|
||||
"stderr": result.stderr,
|
||||
"returncode": result.returncode
|
||||
}
|
||||
|
||||
|
||||
class TestServer:
|
||||
|
||||
@pytest.mark.all
|
||||
def test_execute_cases(self, setup_module, run_command):
|
||||
# assert the result
|
||||
if run_command['returncode'] != 0:
|
||||
print(f"Running command: {run_command['command']}")
|
||||
print("STDOUT:", run_command['stdout'])
|
||||
print("STDERR:", run_command['stderr'])
|
||||
print("Return Code:", run_command['returncode'])
|
||||
else:
|
||||
print(f"Running command: {run_command['command']}")
|
||||
if len(run_command['stdout']) > 1000:
|
||||
print("STDOUT:", run_command['stdout'][:1000] + "...")
|
||||
else:
|
||||
print("STDOUT:", run_command['stdout'])
|
||||
print("STDERR:", run_command['stderr'])
|
||||
print("Return Code:", run_command['returncode'])
|
||||
|
||||
assert run_command[
|
||||
'returncode'] == 0, f"Command '{run_command['command']}' failed with return code {run_command['returncode']}"
|
|
@ -0,0 +1 @@
|
|||
system-test,python3 ./test.py -f 9-tdgpt/test_gpt.py
|
|
@ -0,0 +1,94 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Define log file and result files
|
||||
LOG_FILE="test_server.log"
|
||||
SUCCESS_FILE="success.txt"
|
||||
FAILED_FILE="failed.txt"
|
||||
|
||||
# Initialize/clear result files
|
||||
> "$SUCCESS_FILE"
|
||||
> "$FAILED_FILE"
|
||||
> "$LOG_FILE"
|
||||
|
||||
# Switch to the target directory
|
||||
TARGET_DIR="../../tests/system-test/"
|
||||
|
||||
echo "===== Changing Directory to $TARGET_DIR =====" | tee -a "$LOG_FILE"
|
||||
|
||||
if cd "$TARGET_DIR"; then
|
||||
echo "Successfully changed directory to $TARGET_DIR" | tee -a "$LOG_FILE"
|
||||
else
|
||||
echo "ERROR: Failed to change directory to $TARGET_DIR" | tee -a "$LOG_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Define the Python commands to execute :case list
|
||||
commands=(
|
||||
"python3 ./test.py -f 2-query/join.py"
|
||||
"python3 ./test.py -f 1-insert/insert_column_value.py"
|
||||
"python3 ./test.py -f 2-query/primary_ts_base_5.py"
|
||||
"python3 ./test.py -f 2-query/case_when.py"
|
||||
"python3 ./test.py -f 2-query/partition_limit_interval.py"
|
||||
"python3 ./test.py -f 2-query/fill.py"
|
||||
"python3 ./test.py -f query/query_basic.py -N 3"
|
||||
"python3 ./test.py -f 7-tmq/basic5.py"
|
||||
"python3 ./test.py -f 8-stream/stream_basic.py"
|
||||
"python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3"
|
||||
)
|
||||
|
||||
# Counters
|
||||
total=${#commands[@]}
|
||||
success_count=0
|
||||
fail_count=0
|
||||
|
||||
# Execute each command
|
||||
for cmd in "${commands[@]}"
|
||||
do
|
||||
echo "===== Executing Command: $cmd =====" | tee -a "$LOG_FILE"
|
||||
# Execute the command and append output and errors to the log file
|
||||
eval "$cmd" >> "$LOG_FILE" 2>&1
|
||||
exit_code=$?
|
||||
|
||||
if [ $exit_code -eq 0 ]; then
|
||||
echo "SUCCESS: $cmd" | tee -a "$LOG_FILE"
|
||||
echo "$cmd" >> "$SUCCESS_FILE"
|
||||
((success_count++))
|
||||
else
|
||||
echo "FAILED: $cmd" | tee -a "$LOG_FILE"
|
||||
echo "$cmd" >> "$FAILED_FILE"
|
||||
((fail_count++))
|
||||
fi
|
||||
echo "" | tee -a "$LOG_FILE" # Add an empty line for separation
|
||||
done
|
||||
|
||||
# Generate the final report
|
||||
echo "===== Test Completed =====" | tee -a "$LOG_FILE"
|
||||
echo "Total Commands Executed: $total" | tee -a "$LOG_FILE"
|
||||
echo "Successful: $success_count" | tee -a "$LOG_FILE"
|
||||
echo "Failed: $fail_count" | tee -a "$LOG_FILE"
|
||||
|
||||
if [ $fail_count -ne 0 ]; then
|
||||
echo "" | tee -a "$LOG_FILE"
|
||||
echo "The following commands failed:" | tee -a "$LOG_FILE"
|
||||
cat "$FAILED_FILE" | tee -a "$LOG_FILE"
|
||||
else
|
||||
echo "All commands executed successfully." | tee -a "$LOG_FILE"
|
||||
fi
|
||||
|
||||
# Optional: Generate a separate report file
|
||||
echo "" > "report.txt"
|
||||
echo "===== Test Report =====" >> "report.txt"
|
||||
echo "Total Commands Executed: $total" >> "report.txt"
|
||||
echo "Successful: $success_count" >> "report.txt"
|
||||
echo "Failed: $fail_count" >> "report.txt"
|
||||
|
||||
if [ $fail_count -ne 0 ]; then
|
||||
echo "" >> "report.txt"
|
||||
echo "The following commands failed:" >> "report.txt"
|
||||
cat "$FAILED_FILE" >> "report.txt"
|
||||
else
|
||||
echo "All commands executed successfully." >> "report.txt"
|
||||
fi
|
||||
|
||||
echo "Detailed logs can be found in $LOG_FILE"
|
||||
echo "Test report can be found in report.txt"
|
|
@ -620,7 +620,10 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
|
|||
cJSON* type = cJSON_CreateString("alter");
|
||||
RAW_NULL_CHECK(type);
|
||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type));
|
||||
cJSON* tableType = cJSON_CreateString(vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_TAG_VAL ? "child" : "normal");
|
||||
cJSON* tableType = cJSON_CreateString(vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_TAG_VAL ||
|
||||
vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL
|
||||
? "child"
|
||||
: "normal");
|
||||
RAW_NULL_CHECK(tableType);
|
||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableType", tableType));
|
||||
cJSON* tableName = cJSON_CreateString(vAlterTbReq.tbName);
|
||||
|
@ -752,9 +755,9 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
|
|||
}
|
||||
|
||||
cJSON* colValue = cJSON_CreateString(buf);
|
||||
taosMemoryFree(buf);
|
||||
RAW_NULL_CHECK(colValue);
|
||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colValue", colValue));
|
||||
taosMemoryFree(buf);
|
||||
}
|
||||
|
||||
cJSON* isNullCJson = cJSON_CreateBool(isNull);
|
||||
|
@ -762,6 +765,58 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
|
|||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colValueNull", isNullCJson));
|
||||
break;
|
||||
}
|
||||
case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL: {
|
||||
int32_t nTags = taosArrayGetSize(vAlterTbReq.pMultiTag);
|
||||
if (nTags <= 0) {
|
||||
uError("processAlterTable parse multi tags error");
|
||||
goto end;
|
||||
}
|
||||
|
||||
cJSON* tags = cJSON_CreateArray();
|
||||
RAW_NULL_CHECK(tags);
|
||||
for (int32_t i = 0; i < nTags; i++) {
|
||||
cJSON* member = cJSON_CreateObject();
|
||||
RAW_NULL_CHECK(member);
|
||||
|
||||
SMultiTagUpateVal* pTagVal = taosArrayGet(vAlterTbReq.pMultiTag, i);
|
||||
cJSON* tagName = cJSON_CreateString(pTagVal->tagName);
|
||||
RAW_NULL_CHECK(tagName);
|
||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(member, "colName", tagName));
|
||||
|
||||
if (pTagVal->tagType == TSDB_DATA_TYPE_JSON) {
|
||||
uError("processAlterTable isJson false");
|
||||
goto end;
|
||||
}
|
||||
bool isNull = pTagVal->isNull;
|
||||
if (!isNull) {
|
||||
char* buf = NULL;
|
||||
int64_t bufSize = 0;
|
||||
if (pTagVal->tagType == TSDB_DATA_TYPE_VARBINARY) {
|
||||
bufSize = pTagVal->nTagVal * 2 + 2 + 3;
|
||||
} else {
|
||||
bufSize = pTagVal->nTagVal + 3;
|
||||
}
|
||||
buf = taosMemoryCalloc(bufSize, 1);
|
||||
RAW_NULL_CHECK(buf);
|
||||
if (dataConverToStr(buf, bufSize, pTagVal->tagType, pTagVal->pTagVal, pTagVal->nTagVal, NULL) !=
|
||||
TSDB_CODE_SUCCESS) {
|
||||
taosMemoryFree(buf);
|
||||
goto end;
|
||||
}
|
||||
cJSON* colValue = cJSON_CreateString(buf);
|
||||
taosMemoryFree(buf);
|
||||
RAW_NULL_CHECK(colValue);
|
||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(member, "colValue", colValue));
|
||||
}
|
||||
cJSON* isNullCJson = cJSON_CreateBool(isNull);
|
||||
RAW_NULL_CHECK(isNullCJson);
|
||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(member, "colValueNull", isNullCJson));
|
||||
RAW_FALSE_CHECK(cJSON_AddItemToArray(tags, member));
|
||||
}
|
||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tags", tags));
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_ALTER_TABLE_UPDATE_COLUMN_COMPRESS: {
|
||||
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
|
||||
RAW_NULL_CHECK(colName);
|
||||
|
@ -775,6 +830,9 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
|
|||
|
||||
end:
|
||||
uDebug("alter table return");
|
||||
if (vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL) {
|
||||
taosArrayDestroy(vAlterTbReq.pMultiTag);
|
||||
}
|
||||
tDecoderClear(&decoder);
|
||||
*pJson = json;
|
||||
}
|
||||
|
|
|
@ -233,7 +233,7 @@ int32_t smlBuildSuperTableInfo(SSmlHandle *info, SSmlLineInfo *currElement, SSml
|
|||
goto END;
|
||||
}
|
||||
SML_CHECK_CODE(smlBuildSTableMeta(info->dataFormat, sMeta));
|
||||
for (int i = 1; i < pTableMeta->tableInfo.numOfTags + pTableMeta->tableInfo.numOfColumns; i++) {
|
||||
for (int i = 0; i < pTableMeta->tableInfo.numOfTags + pTableMeta->tableInfo.numOfColumns; i++) {
|
||||
SSchema *col = pTableMeta->schema + i;
|
||||
SSmlKv kv = {.key = col->name, .keyLen = strlen(col->name), .type = col->type};
|
||||
if (col->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
|
@ -772,22 +772,26 @@ END:
|
|||
RETURN
|
||||
}
|
||||
|
||||
static int32_t smlCheckMeta(SSchema *schema, int32_t length, SArray *cols, bool isTag) {
|
||||
static int32_t smlCheckMeta(SSchema *schema, int32_t length, SArray *cols) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
SHashObj *hashTmp = taosHashInit(length, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
SML_CHECK_NULL(hashTmp);
|
||||
int32_t i = 0;
|
||||
for (; i < length; i++) {
|
||||
SML_CHECK_CODE(taosHashPut(hashTmp, schema[i].name, strlen(schema[i].name), &i, SHORT_BYTES));
|
||||
for (int32_t i = 0; i < length; i++) {
|
||||
SML_CHECK_CODE(taosHashPut(hashTmp, schema[i].name, strlen(schema[i].name), &schema[i], sizeof(SSchema)));
|
||||
}
|
||||
i = isTag ? 0 : 1;
|
||||
for (; i < taosArrayGetSize(cols); i++) {
|
||||
for (int32_t i = 0; i < taosArrayGetSize(cols); i++) {
|
||||
SSmlKv *kv = (SSmlKv *)taosArrayGet(cols, i);
|
||||
SML_CHECK_NULL(kv);
|
||||
if (taosHashGet(hashTmp, kv->key, kv->keyLen) == NULL) {
|
||||
SSchema *sTmp = taosHashGet(hashTmp, kv->key, kv->keyLen);
|
||||
if (sTmp == NULL) {
|
||||
SML_CHECK_CODE(TSDB_CODE_SML_INVALID_DATA);
|
||||
}
|
||||
if (IS_VAR_DATA_TYPE(kv->type) && kv->length + VARSTR_HEADER_SIZE > sTmp->bytes){
|
||||
uError("column %s (type %s) bytes invalid. db bytes:%d, kv bytes:%zu", sTmp->name,
|
||||
tDataTypes[sTmp->type].name, sTmp->bytes, kv->length);
|
||||
SML_CHECK_CODE(TSDB_CODE_INTERNAL_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
END:
|
||||
|
@ -1132,8 +1136,8 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
|||
}
|
||||
|
||||
if (needCheckMeta) {
|
||||
SML_CHECK_CODE(smlCheckMeta(&(pTableMeta->schema[pTableMeta->tableInfo.numOfColumns]), pTableMeta->tableInfo.numOfTags, sTableData->tags, true));
|
||||
SML_CHECK_CODE(smlCheckMeta(&(pTableMeta->schema[0]), pTableMeta->tableInfo.numOfColumns, sTableData->cols, false));
|
||||
SML_CHECK_CODE(smlCheckMeta(&(pTableMeta->schema[pTableMeta->tableInfo.numOfColumns]), pTableMeta->tableInfo.numOfTags, sTableData->tags));
|
||||
SML_CHECK_CODE(smlCheckMeta(&(pTableMeta->schema[0]), pTableMeta->tableInfo.numOfColumns, sTableData->cols));
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(sTableData->tableMeta);
|
||||
|
|
|
@ -10511,6 +10511,21 @@ int32_t tEncodeSVAlterTbReq(SEncoder *pEncoder, const SVAlterTbReq *pReq) {
|
|||
TAOS_CHECK_EXIT(tEncodeBinary(pEncoder, pReq->pTagVal, pReq->nTagVal));
|
||||
}
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL: {
|
||||
int32_t nTags = taosArrayGetSize(pReq->pMultiTag);
|
||||
TAOS_CHECK_EXIT(tEncodeI32v(pEncoder, nTags));
|
||||
for (int32_t i = 0; i < nTags; i++) {
|
||||
SMultiTagUpateVal *pTag = taosArrayGet(pReq->pMultiTag, i);
|
||||
TAOS_CHECK_EXIT(tEncodeI32v(pEncoder, pTag->colId));
|
||||
TAOS_CHECK_EXIT(tEncodeCStr(pEncoder, pTag->tagName));
|
||||
TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTag->isNull));
|
||||
TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pTag->tagType));
|
||||
if (!pTag->isNull) {
|
||||
TAOS_CHECK_EXIT(tEncodeBinary(pEncoder, pTag->pTagVal, pTag->nTagVal));
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TSDB_ALTER_TABLE_UPDATE_OPTIONS:
|
||||
TAOS_CHECK_EXIT(tEncodeI8(pEncoder, pReq->updateTTL));
|
||||
if (pReq->updateTTL) {
|
||||
|
@ -10577,6 +10592,28 @@ static int32_t tDecodeSVAlterTbReqCommon(SDecoder *pDecoder, SVAlterTbReq *pReq)
|
|||
TAOS_CHECK_EXIT(tDecodeBinary(pDecoder, &pReq->pTagVal, &pReq->nTagVal));
|
||||
}
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL: {
|
||||
int32_t nTags;
|
||||
TAOS_CHECK_EXIT(tDecodeI32v(pDecoder, &nTags));
|
||||
pReq->pMultiTag = taosArrayInit(nTags, sizeof(SMultiTagUpateVal));
|
||||
if (pReq->pMultiTag == NULL) {
|
||||
TAOS_CHECK_EXIT(terrno);
|
||||
}
|
||||
for (int32_t i = 0; i < nTags; i++) {
|
||||
SMultiTagUpateVal tag;
|
||||
TAOS_CHECK_EXIT(tDecodeI32v(pDecoder, &tag.colId));
|
||||
TAOS_CHECK_EXIT(tDecodeCStr(pDecoder, &tag.tagName));
|
||||
TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &tag.isNull));
|
||||
TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &tag.tagType));
|
||||
if (!tag.isNull) {
|
||||
TAOS_CHECK_EXIT(tDecodeBinary(pDecoder, &tag.pTagVal, &tag.nTagVal));
|
||||
}
|
||||
if (taosArrayPush(pReq->pMultiTag, &tag) == NULL) {
|
||||
TAOS_CHECK_EXIT(terrno);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TSDB_ALTER_TABLE_UPDATE_OPTIONS:
|
||||
TAOS_CHECK_EXIT(tDecodeI8(pDecoder, &pReq->updateTTL));
|
||||
if (pReq->updateTTL) {
|
||||
|
@ -10642,6 +10679,18 @@ _exit:
|
|||
return code;
|
||||
}
|
||||
|
||||
void tfreeMultiTagUpateVal(void *val) {
|
||||
SMultiTagUpateVal *pTag = val;
|
||||
taosMemoryFree(pTag->tagName);
|
||||
for (int i = 0; i < taosArrayGetSize(pTag->pTagArray); ++i) {
|
||||
STagVal *p = (STagVal *)taosArrayGet(pTag->pTagArray, i);
|
||||
if (IS_VAR_DATA_TYPE(p->type)) {
|
||||
taosMemoryFreeClear(p->pData);
|
||||
}
|
||||
}
|
||||
|
||||
taosArrayDestroy(pTag->pTagArray);
|
||||
}
|
||||
int32_t tEncodeSVAlterTbRsp(SEncoder *pEncoder, const SVAlterTbRsp *pRsp) {
|
||||
int32_t code = 0;
|
||||
int32_t lino;
|
||||
|
|
|
@ -2011,6 +2011,250 @@ _err:
|
|||
return terrno != 0 ? terrno : TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
static int metaUpdateTableMultiTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) {
|
||||
SMetaEntry ctbEntry = {0};
|
||||
SMetaEntry stbEntry = {0};
|
||||
void *pVal = NULL;
|
||||
int nVal = 0;
|
||||
int ret;
|
||||
int c;
|
||||
tb_uid_t uid;
|
||||
int64_t oversion;
|
||||
const void *pData = NULL;
|
||||
int nData = 0;
|
||||
SHashObj *pTagTable = NULL;
|
||||
|
||||
// search name index
|
||||
ret = tdbTbGet(pMeta->pNameIdx, pAlterTbReq->tbName, strlen(pAlterTbReq->tbName) + 1, &pVal, &nVal);
|
||||
if (ret < 0) {
|
||||
return terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST;
|
||||
}
|
||||
|
||||
uid = *(tb_uid_t *)pVal;
|
||||
tdbFree(pVal);
|
||||
pVal = NULL;
|
||||
|
||||
// search uid index
|
||||
TBC *pUidIdxc = NULL;
|
||||
|
||||
TAOS_CHECK_RETURN(tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, NULL));
|
||||
if (tdbTbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c) < 0) {
|
||||
metaTrace("meta/table: failed to move to uid index, uid:%" PRId64, uid);
|
||||
}
|
||||
if (c != 0) {
|
||||
tdbTbcClose(pUidIdxc);
|
||||
metaError("meta/table: invalide c: %" PRId32 " update tb tag val failed.", c);
|
||||
return terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST;
|
||||
}
|
||||
|
||||
if (tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData) != 0) {
|
||||
tdbTbcClose(pUidIdxc);
|
||||
metaError("meta/table: failed to get uid index, uid:%" PRId64, uid);
|
||||
return terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST;
|
||||
}
|
||||
oversion = ((SUidIdxVal *)pData)[0].version;
|
||||
|
||||
// search table.db
|
||||
TBC *pTbDbc = NULL;
|
||||
SDecoder dc1 = {0};
|
||||
SDecoder dc2 = {0};
|
||||
|
||||
/* get ctbEntry */
|
||||
TAOS_CHECK_RETURN(tdbTbcOpen(pMeta->pTbDb, &pTbDbc, NULL));
|
||||
if (tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = uid, .version = oversion}), sizeof(STbDbKey), &c) != 0) {
|
||||
metaError("meta/table: failed to move to tb db, uid:%" PRId64, uid);
|
||||
}
|
||||
if (c != 0) {
|
||||
tdbTbcClose(pUidIdxc);
|
||||
tdbTbcClose(pTbDbc);
|
||||
metaError("meta/table: invalide c: %" PRId32 " update tb tag val failed.", c);
|
||||
return terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST;
|
||||
}
|
||||
|
||||
if (tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData) != 0) {
|
||||
metaError("meta/table: failed to get tb db, uid:%" PRId64, uid);
|
||||
tdbTbcClose(pUidIdxc);
|
||||
tdbTbcClose(pTbDbc);
|
||||
return terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST;
|
||||
}
|
||||
|
||||
if ((ctbEntry.pBuf = taosMemoryMalloc(nData)) == NULL) {
|
||||
tdbTbcClose(pUidIdxc);
|
||||
tdbTbcClose(pTbDbc);
|
||||
return terrno;
|
||||
}
|
||||
memcpy(ctbEntry.pBuf, pData, nData);
|
||||
tDecoderInit(&dc1, ctbEntry.pBuf, nData);
|
||||
ret = metaDecodeEntry(&dc1, &ctbEntry);
|
||||
if (ret < 0) {
|
||||
terrno = ret;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
/* get stbEntry*/
|
||||
if (tdbTbGet(pMeta->pUidIdx, &ctbEntry.ctbEntry.suid, sizeof(tb_uid_t), &pVal, &nVal) != 0) {
|
||||
metaError("meta/table: failed to get uid index, uid:%" PRId64, ctbEntry.ctbEntry.suid);
|
||||
}
|
||||
if (!pVal) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if (tdbTbGet(pMeta->pTbDb, &((STbDbKey){.uid = ctbEntry.ctbEntry.suid, .version = ((SUidIdxVal *)pVal)[0].version}),
|
||||
sizeof(STbDbKey), (void **)&stbEntry.pBuf, &nVal) != 0) {
|
||||
metaError("meta/table: failed to get tb db, uid:%" PRId64, ctbEntry.ctbEntry.suid);
|
||||
}
|
||||
tdbFree(pVal);
|
||||
tDecoderInit(&dc2, stbEntry.pBuf, nVal);
|
||||
ret = metaDecodeEntry(&dc2, &stbEntry);
|
||||
if (ret < 0) {
|
||||
terrno = ret;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
int32_t nTagVals = taosArrayGetSize(pAlterTbReq->pMultiTag);
|
||||
pTagTable = taosHashInit(nTagVals, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
if (pTagTable == NULL) {
|
||||
ret = terrno;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
// remove duplicate tag name
|
||||
for (int i = 0; i < nTagVals; i++) {
|
||||
SMultiTagUpateVal *pTagVal = taosArrayGet(pAlterTbReq->pMultiTag, i);
|
||||
ret = taosHashPut(pTagTable, pTagVal->tagName, strlen(pTagVal->tagName), pTagVal, sizeof(*pTagVal));
|
||||
if (ret != 0) {
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
SSchemaWrapper *pTagSchema = &stbEntry.stbEntry.schemaTag;
|
||||
SSchema *pColumn = NULL;
|
||||
int32_t iCol = 0;
|
||||
int32_t count = 0;
|
||||
|
||||
for (;;) {
|
||||
pColumn = NULL;
|
||||
|
||||
if (iCol >= pTagSchema->nCols) break;
|
||||
pColumn = &pTagSchema->pSchema[iCol];
|
||||
if (taosHashGet(pTagTable, pColumn->name, strlen(pColumn->name)) != NULL) {
|
||||
count++;
|
||||
}
|
||||
iCol++;
|
||||
}
|
||||
if (count != taosHashGetSize(pTagTable)) {
|
||||
terrno = TSDB_CODE_VND_COL_NOT_EXISTS;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
ctbEntry.version = version;
|
||||
if (pTagSchema->nCols == 1 && pTagSchema->pSchema[0].type == TSDB_DATA_TYPE_JSON) {
|
||||
terrno = TSDB_CODE_VND_COL_NOT_EXISTS;
|
||||
goto _err;
|
||||
} else {
|
||||
const STag *pOldTag = (const STag *)ctbEntry.ctbEntry.pTags;
|
||||
STag *pNewTag = NULL;
|
||||
SArray *pTagArray = taosArrayInit(pTagSchema->nCols, sizeof(STagVal));
|
||||
if (!pTagArray) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
for (int32_t i = 0; i < pTagSchema->nCols; i++) {
|
||||
SSchema *pCol = &pTagSchema->pSchema[i];
|
||||
SMultiTagUpateVal *pTagVal = taosHashGet(pTagTable, pCol->name, strlen(pCol->name));
|
||||
if (pTagVal == NULL) {
|
||||
STagVal val = {.cid = pCol->colId};
|
||||
if (tTagGet(pOldTag, &val)) {
|
||||
if (taosArrayPush(pTagArray, &val) == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
taosArrayDestroy(pTagArray);
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
STagVal val = {0};
|
||||
val.type = pCol->type;
|
||||
val.cid = pCol->colId;
|
||||
if (pTagVal->isNull) continue;
|
||||
|
||||
if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
val.pData = pTagVal->pTagVal;
|
||||
val.nData = pTagVal->nTagVal;
|
||||
} else {
|
||||
memcpy(&val.i64, pTagVal->pTagVal, pTagVal->nTagVal);
|
||||
}
|
||||
if (taosArrayPush(pTagArray, &val) == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
taosArrayDestroy(pTagArray);
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
}
|
||||
if ((terrno = tTagNew(pTagArray, pTagSchema->version, false, &pNewTag)) < 0) {
|
||||
taosArrayDestroy(pTagArray);
|
||||
goto _err;
|
||||
}
|
||||
ctbEntry.ctbEntry.pTags = (uint8_t *)pNewTag;
|
||||
taosArrayDestroy(pTagArray);
|
||||
}
|
||||
|
||||
metaWLock(pMeta);
|
||||
|
||||
// save to table.db
|
||||
if (metaSaveToTbDb(pMeta, &ctbEntry) < 0) {
|
||||
metaError("meta/table: failed to save to tb db:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid);
|
||||
}
|
||||
|
||||
// save to uid.idx
|
||||
if (metaUpdateUidIdx(pMeta, &ctbEntry) < 0) {
|
||||
metaError("meta/table: failed to update uid idx:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid);
|
||||
}
|
||||
|
||||
if (metaUpdateTagIdx(pMeta, &ctbEntry) < 0) {
|
||||
metaError("meta/table: failed to update tag idx:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid);
|
||||
}
|
||||
|
||||
SCtbIdxKey ctbIdxKey = {.suid = ctbEntry.ctbEntry.suid, .uid = uid};
|
||||
if (tdbTbUpsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), ctbEntry.ctbEntry.pTags,
|
||||
((STag *)(ctbEntry.ctbEntry.pTags))->len, pMeta->txn) < 0) {
|
||||
metaError("meta/table: failed to upsert ctb idx:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid);
|
||||
}
|
||||
|
||||
if (metaUidCacheClear(pMeta, ctbEntry.ctbEntry.suid) < 0) {
|
||||
metaError("meta/table: failed to clear uid cache:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid);
|
||||
}
|
||||
|
||||
if (metaTbGroupCacheClear(pMeta, ctbEntry.ctbEntry.suid) < 0) {
|
||||
metaError("meta/table: failed to clear group cache:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid);
|
||||
}
|
||||
|
||||
if (metaUpdateChangeTime(pMeta, ctbEntry.uid, pAlterTbReq->ctimeMs) < 0) {
|
||||
metaError("meta/table: failed to update change time:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid);
|
||||
}
|
||||
|
||||
metaULock(pMeta);
|
||||
|
||||
tDecoderClear(&dc1);
|
||||
tDecoderClear(&dc2);
|
||||
taosMemoryFree((void *)ctbEntry.ctbEntry.pTags);
|
||||
if (ctbEntry.pBuf) taosMemoryFree(ctbEntry.pBuf);
|
||||
if (stbEntry.pBuf) tdbFree(stbEntry.pBuf);
|
||||
tdbTbcClose(pTbDbc);
|
||||
tdbTbcClose(pUidIdxc);
|
||||
taosHashCleanup(pTagTable);
|
||||
return 0;
|
||||
|
||||
_err:
|
||||
tDecoderClear(&dc1);
|
||||
tDecoderClear(&dc2);
|
||||
if (ctbEntry.pBuf) taosMemoryFree(ctbEntry.pBuf);
|
||||
if (stbEntry.pBuf) tdbFree(stbEntry.pBuf);
|
||||
tdbTbcClose(pTbDbc);
|
||||
tdbTbcClose(pUidIdxc);
|
||||
taosHashCleanup(pTagTable);
|
||||
return -1;
|
||||
}
|
||||
static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pAlterTbReq) {
|
||||
SMetaEntry ctbEntry = {0};
|
||||
SMetaEntry stbEntry = {0};
|
||||
|
@ -2051,7 +2295,9 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
|
|||
}
|
||||
|
||||
if (tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData) != 0) {
|
||||
tdbTbcClose(pUidIdxc);
|
||||
metaError("meta/table: failed to get uid index, uid:%" PRId64, uid);
|
||||
return terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST;
|
||||
}
|
||||
oversion = ((SUidIdxVal *)pData)[0].version;
|
||||
|
||||
|
@ -2074,6 +2320,9 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
|
|||
|
||||
if (tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData) != 0) {
|
||||
metaError("meta/table: failed to get tb db, uid:%" PRId64, uid);
|
||||
tdbTbcClose(pUidIdxc);
|
||||
tdbTbcClose(pTbDbc);
|
||||
return terrno = TSDB_CODE_INVALID_MSG;
|
||||
}
|
||||
|
||||
if ((ctbEntry.pBuf = taosMemoryMalloc(nData)) == NULL) {
|
||||
|
@ -2113,6 +2362,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
|
|||
SSchemaWrapper *pTagSchema = &stbEntry.stbEntry.schemaTag;
|
||||
SSchema *pColumn = NULL;
|
||||
int32_t iCol = 0;
|
||||
|
||||
for (;;) {
|
||||
pColumn = NULL;
|
||||
|
||||
|
@ -2199,11 +2449,6 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
|
|||
metaError("meta/table: failed to update tag idx:%s uid:%" PRId64, ctbEntry.name, ctbEntry.uid);
|
||||
}
|
||||
|
||||
if (NULL == ctbEntry.ctbEntry.pTags) {
|
||||
metaError("meta/table: null tags, update tag val failed.");
|
||||
goto _err;
|
||||
}
|
||||
|
||||
SCtbIdxKey ctbIdxKey = {.suid = ctbEntry.ctbEntry.suid, .uid = uid};
|
||||
if (tdbTbUpsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), ctbEntry.ctbEntry.pTags,
|
||||
((STag *)(ctbEntry.ctbEntry.pTags))->len, pMeta->txn) < 0) {
|
||||
|
@ -2736,6 +2981,9 @@ int metaAlterTable(SMeta *pMeta, int64_t version, SVAlterTbReq *pReq, STableMeta
|
|||
return metaAlterTableColumn(pMeta, version, pReq, pMetaRsp);
|
||||
case TSDB_ALTER_TABLE_UPDATE_TAG_VAL:
|
||||
return metaUpdateTableTagVal(pMeta, version, pReq);
|
||||
case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL:
|
||||
return metaUpdateTableMultiTagVal(pMeta, version, pReq);
|
||||
return terrno = TSDB_CODE_VND_INVALID_TABLE_ACTION;
|
||||
case TSDB_ALTER_TABLE_UPDATE_OPTIONS:
|
||||
return metaUpdateTableOptions(pMeta, version, pReq);
|
||||
case TSDB_ALTER_TABLE_ADD_TAG_INDEX:
|
||||
|
|
|
@ -145,8 +145,12 @@ static int32_t vnodePreProcessAlterTableMsg(SVnode *pVnode, SRpcMsg *pMsg) {
|
|||
SVAlterTbReq vAlterTbReq = {0};
|
||||
int64_t ctimeMs = taosGetTimestampMs();
|
||||
if (tDecodeSVAlterTbReqSetCtime(&dc, &vAlterTbReq, ctimeMs) < 0) {
|
||||
taosArrayDestroy(vAlterTbReq.pMultiTag);
|
||||
vAlterTbReq.pMultiTag = NULL;
|
||||
goto _exit;
|
||||
}
|
||||
taosArrayDestroy(vAlterTbReq.pMultiTag);
|
||||
vAlterTbReq.pMultiTag = NULL;
|
||||
|
||||
code = 0;
|
||||
|
||||
|
@ -666,8 +670,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg
|
|||
}
|
||||
} break;
|
||||
case TDMT_VND_STREAM_TASK_RESET: {
|
||||
if (pVnode->restored && vnodeIsLeader(pVnode) &&
|
||||
(code = tqProcessTaskResetReq(pVnode->pTq, pMsg)) < 0) {
|
||||
if (pVnode->restored && vnodeIsLeader(pVnode) && (code = tqProcessTaskResetReq(pVnode->pTq, pMsg)) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
|
@ -1367,6 +1370,7 @@ static int32_t vnodeProcessAlterTbReq(SVnode *pVnode, int64_t ver, void *pReq, i
|
|||
}
|
||||
|
||||
_exit:
|
||||
taosArrayDestroy(vAlterTbReq.pMultiTag);
|
||||
tEncodeSize(tEncodeSVAlterTbRsp, &vAlterTbRsp, pRsp->contLen, ret);
|
||||
pRsp->pCont = rpcMallocCont(pRsp->contLen);
|
||||
tEncoderInit(&ec, pRsp->pCont, pRsp->contLen);
|
||||
|
|
|
@ -748,7 +748,7 @@ static int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx
|
|||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: {
|
||||
SAggPhysiNode *pAggNode = (SAggPhysiNode *)pNode;
|
||||
EXPLAIN_ROW_NEW(level, EXPLAIN_AGG_FORMAT, (pAggNode->pGroupKeys ? "GroupAggragate" : "Aggragate"));
|
||||
EXPLAIN_ROW_NEW(level, EXPLAIN_AGG_FORMAT, (pAggNode->pGroupKeys ? "GroupAggregate" : "Aggregate"));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
|
||||
if (pResNode->pExecInfo) {
|
||||
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "executorInt.h"
|
||||
typedef struct SDynQueryCtrlExecInfo {
|
||||
int64_t prevBlkNum;
|
||||
int64_t prevBlkRows;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
#include "executorInt.h"
|
||||
|
||||
#define GROUP_CACHE_DEFAULT_MAX_FILE_SIZE 104857600
|
||||
#define GROUP_CACHE_MAX_FILE_FDS 10
|
||||
|
|
|
@ -19,6 +19,9 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "executorInt.h"
|
||||
#include "operator.h"
|
||||
|
||||
#define HASH_JOIN_DEFAULT_PAGE_SIZE 10485760
|
||||
#define HJOIN_DEFAULT_BLK_ROWS_NUM 4096
|
||||
#define HJOIN_BLK_SIZE_LIMIT 10485760
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "executorInt.h"
|
||||
typedef struct SOperatorCostInfo {
|
||||
double openCost;
|
||||
double totalCost;
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "executorInt.h"
|
||||
|
||||
#define GET_TASKID(_t) (((SExecTaskInfo*)(_t))->id.str)
|
||||
|
||||
enum {
|
||||
|
|
|
@ -184,6 +184,10 @@ static bool nextGroupedResult(SOperatorInfo* pOperator) {
|
|||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
SAggOperatorInfo* pAggInfo = pOperator->info;
|
||||
|
||||
if(!pAggInfo) {
|
||||
qError("function:%s, pAggInfo is NULL", __func__);
|
||||
return false;
|
||||
}
|
||||
if (pOperator->blocking && pAggInfo->hasValidBlock) {
|
||||
return false;
|
||||
}
|
||||
|
@ -333,6 +337,10 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) {
|
|||
|
||||
int32_t doAggregateImpl(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (!pOperator || (pOperator->exprSupp.numOfExprs > 0 && pCtx == NULL)) {
|
||||
qError("%s failed at line %d since pCtx is NULL.", __func__, __LINE__);
|
||||
return TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR;
|
||||
}
|
||||
for (int32_t k = 0; k < pOperator->exprSupp.numOfExprs; ++k) {
|
||||
if (functionNeedToExecute(&pCtx[k])) {
|
||||
// todo add a dummy function to avoid process check
|
||||
|
|
|
@ -182,9 +182,17 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
|
|||
int32_t lino = 0;
|
||||
SFillOperatorInfo* pInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
if (pInfo == NULL || pTaskInfo == NULL) {
|
||||
qError("%s failed at line %d since pInfo or pTaskInfo is NULL.", __func__, __LINE__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SResultInfo* pResultInfo = &pOperator->resultInfo;
|
||||
SSDataBlock* pResBlock = pInfo->pFinalRes;
|
||||
if (pResBlock == NULL) {
|
||||
qError("%s failed at line %d since pResBlock is NULL.", __func__, __LINE__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
blockDataCleanup(pResBlock);
|
||||
|
||||
|
|
|
@ -169,6 +169,7 @@ static int32_t forecastCloseBuf(SForecastSupp* pSupp) {
|
|||
code = taosAnalBufWriteOptInt(pBuf, "start", start);
|
||||
if (code != 0) return code;
|
||||
|
||||
|
||||
bool hasEvery = taosAnalGetOptInt(pSupp->algoOpt, "every", &every);
|
||||
if (!hasEvery) {
|
||||
qDebug("forecast every not found from %s, use %" PRId64, pSupp->algoOpt, every);
|
||||
|
|
|
@ -564,7 +564,6 @@ SSDataBlock* doApplyIndefinitFunction1(SOperatorInfo* pOperator) {
|
|||
|
||||
int32_t doApplyIndefinitFunction(SOperatorInfo* pOperator, SSDataBlock** pResBlock) {
|
||||
QRY_PARAM_CHECK(pResBlock);
|
||||
|
||||
SIndefOperatorInfo* pIndefInfo = pOperator->info;
|
||||
SOptrBasicInfo* pInfo = &pIndefInfo->binfo;
|
||||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
|
@ -1178,5 +1177,8 @@ _exit:
|
|||
if(processByRowFunctionCtx) {
|
||||
taosArrayDestroy(processByRowFunctionCtx);
|
||||
}
|
||||
if(code) {
|
||||
qError("project apply functions failed at: %s:%d", __func__, lino);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -412,6 +412,13 @@ int32_t createFunction(const char* pName, SNodeList* pParameterList, SFunctionNo
|
|||
return code;
|
||||
}
|
||||
|
||||
static void resetOutputChangedFunc(SFunctionNode *pFunc, const SFunctionNode* pSrcFunc) {
|
||||
if (funcMgtBuiltins[pFunc->funcId].type == FUNCTION_TYPE_LAST_MERGE) {
|
||||
pFunc->node.resType = pSrcFunc->node.resType;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t createFunctionWithSrcFunc(const char* pName, const SFunctionNode* pSrcFunc, SNodeList* pParameterList, SFunctionNode** ppFunc) {
|
||||
int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)ppFunc);
|
||||
if (NULL == *ppFunc) {
|
||||
|
@ -430,6 +437,7 @@ int32_t createFunctionWithSrcFunc(const char* pName, const SFunctionNode* pSrcFu
|
|||
*ppFunc = NULL;
|
||||
return code;
|
||||
}
|
||||
resetOutputChangedFunc(*ppFunc, pSrcFunc);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
*/
|
||||
|
||||
#include "cmdnodes.h"
|
||||
#include "functionMgt.h"
|
||||
#include "nodesUtil.h"
|
||||
#include "plannodes.h"
|
||||
#include "querynodes.h"
|
||||
|
@ -22,7 +23,6 @@
|
|||
#include "tdatablock.h"
|
||||
#include "thash.h"
|
||||
#include "tref.h"
|
||||
#include "functionMgt.h"
|
||||
|
||||
typedef struct SNodeMemChunk {
|
||||
int32_t availableSize;
|
||||
|
@ -59,12 +59,10 @@ char* getFullJoinTypeString(EJoinType type, EJoinSubType stype) {
|
|||
{"INNER", "INNER", "INNER", "INNER", "INNER", "INNER ANY", "INNER", "INNER"},
|
||||
{"LEFT", "LEFT", "LEFT OUTER", "LEFT SEMI", "LEFT ANTI", "LEFT ANY", "LEFT ASOF", "LEFT WINDOW"},
|
||||
{"RIGHT", "RIGHT", "RIGHT OUTER", "RIGHT SEMI", "RIGHT ANTI", "RIGHT ANY", "RIGHT ASOF", "RIGHT WINDOW"},
|
||||
{"FULL", "FULL", "FULL OUTER", "FULL", "FULL", "FULL ANY", "FULL", "FULL"}
|
||||
};
|
||||
{"FULL", "FULL", "FULL OUTER", "FULL", "FULL", "FULL ANY", "FULL", "FULL"}};
|
||||
return joinFullType[type][stype];
|
||||
}
|
||||
|
||||
|
||||
int32_t mergeJoinConds(SNode** ppDst, SNode** ppSrc) {
|
||||
if (NULL == *ppSrc) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -74,14 +72,16 @@ int32_t mergeJoinConds(SNode** ppDst, SNode** ppSrc) {
|
|||
*ppSrc = NULL;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
if (QUERY_NODE_LOGIC_CONDITION == nodeType(*ppSrc) && ((SLogicConditionNode*)(*ppSrc))->condType == LOGIC_COND_TYPE_AND) {
|
||||
if (QUERY_NODE_LOGIC_CONDITION == nodeType(*ppSrc) &&
|
||||
((SLogicConditionNode*)(*ppSrc))->condType == LOGIC_COND_TYPE_AND) {
|
||||
TSWAP(*ppDst, *ppSrc);
|
||||
}
|
||||
int32_t code = 0;
|
||||
if (QUERY_NODE_LOGIC_CONDITION == nodeType(*ppDst)) {
|
||||
SLogicConditionNode* pDst = (SLogicConditionNode*)*ppDst;
|
||||
if (pDst->condType == LOGIC_COND_TYPE_AND) {
|
||||
if (QUERY_NODE_LOGIC_CONDITION == nodeType(*ppSrc) && ((SLogicConditionNode*)(*ppSrc))->condType == LOGIC_COND_TYPE_AND) {
|
||||
if (QUERY_NODE_LOGIC_CONDITION == nodeType(*ppSrc) &&
|
||||
((SLogicConditionNode*)(*ppSrc))->condType == LOGIC_COND_TYPE_AND) {
|
||||
code = nodesListStrictAppendList(pDst->pParameterList, ((SLogicConditionNode*)(*ppSrc))->pParameterList);
|
||||
((SLogicConditionNode*)(*ppSrc))->pParameterList = NULL;
|
||||
} else {
|
||||
|
@ -115,7 +115,6 @@ int32_t mergeJoinConds(SNode** ppDst, SNode** ppSrc) {
|
|||
return code;
|
||||
}
|
||||
|
||||
|
||||
static int32_t callocNodeChunk(SNodeAllocator* pAllocator, SNodeMemChunk** pOutChunk) {
|
||||
SNodeMemChunk* pNewChunk = taosMemoryCalloc(1, sizeof(SNodeMemChunk) + pAllocator->chunkSize);
|
||||
if (NULL == pNewChunk) {
|
||||
|
@ -155,7 +154,8 @@ static int32_t nodesCallocImpl(int32_t size, void** pOut) {
|
|||
void* p = g_pNodeAllocator->pCurrChunk->pBuf + g_pNodeAllocator->pCurrChunk->usedSize;
|
||||
g_pNodeAllocator->pCurrChunk->usedSize += size;
|
||||
*pOut = p;
|
||||
return TSDB_CODE_SUCCESS;;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
;
|
||||
}
|
||||
|
||||
static int32_t nodesCalloc(int32_t num, int32_t size, void** pOut) {
|
||||
|
@ -237,7 +237,8 @@ void nodesDestroyAllocatorSet() {
|
|||
refId = pAllocator->self;
|
||||
int32_t code = taosRemoveRef(g_allocatorReqRefPool, refId);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
nodesError("failed to remove ref at: %s:%d, rsetId:%d, refId:%"PRId64, __func__, __LINE__, g_allocatorReqRefPool, refId);
|
||||
nodesError("failed to remove ref at: %s:%d, rsetId:%d, refId:%" PRId64, __func__, __LINE__,
|
||||
g_allocatorReqRefPool, refId);
|
||||
}
|
||||
pAllocator = taosIterateRef(g_allocatorReqRefPool, refId);
|
||||
}
|
||||
|
@ -333,7 +334,8 @@ void nodesDestroyAllocator(int64_t allocatorId) {
|
|||
|
||||
int32_t code = taosRemoveRef(g_allocatorReqRefPool, allocatorId);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
nodesError("failed to remove ref at: %s:%d, rsetId:%d, refId:%"PRId64, __func__, __LINE__, g_allocatorReqRefPool, allocatorId);
|
||||
nodesError("failed to remove ref at: %s:%d, rsetId:%d, refId:%" PRId64, __func__, __LINE__, g_allocatorReqRefPool,
|
||||
allocatorId);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -352,198 +354,290 @@ int32_t nodesMakeNode(ENodeType type, SNode** ppNodeOut) {
|
|||
int32_t code = 0;
|
||||
switch (type) {
|
||||
case QUERY_NODE_COLUMN:
|
||||
code = makeNode(type, sizeof(SColumnNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SColumnNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_VALUE:
|
||||
code = makeNode(type, sizeof(SValueNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SValueNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_OPERATOR:
|
||||
code = makeNode(type, sizeof(SOperatorNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SOperatorNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_CONDITION:
|
||||
code = makeNode(type, sizeof(SLogicConditionNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SLogicConditionNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_FUNCTION:
|
||||
code = makeNode(type, sizeof(SFunctionNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SFunctionNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_REAL_TABLE:
|
||||
code = makeNode(type, sizeof(SRealTableNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SRealTableNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_TEMP_TABLE:
|
||||
code = makeNode(type, sizeof(STempTableNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(STempTableNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_JOIN_TABLE:
|
||||
code = makeNode(type, sizeof(SJoinTableNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SJoinTableNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_GROUPING_SET:
|
||||
code = makeNode(type, sizeof(SGroupingSetNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SGroupingSetNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_ORDER_BY_EXPR:
|
||||
code = makeNode(type, sizeof(SOrderByExprNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SOrderByExprNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LIMIT:
|
||||
code = makeNode(type, sizeof(SLimitNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SLimitNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_STATE_WINDOW:
|
||||
code = makeNode(type, sizeof(SStateWindowNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SStateWindowNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_SESSION_WINDOW:
|
||||
code = makeNode(type, sizeof(SSessionWindowNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SSessionWindowNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_INTERVAL_WINDOW:
|
||||
code = makeNode(type, sizeof(SIntervalWindowNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SIntervalWindowNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_NODE_LIST:
|
||||
code = makeNode(type, sizeof(SNodeListNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SNodeListNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_FILL:
|
||||
code = makeNode(type, sizeof(SFillNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SFillNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_RAW_EXPR:
|
||||
code = makeNode(type, sizeof(SRawExprNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SRawExprNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_TARGET:
|
||||
code = makeNode(type, sizeof(STargetNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(STargetNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DATABLOCK_DESC:
|
||||
code = makeNode(type, sizeof(SDataBlockDescNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDataBlockDescNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_SLOT_DESC:
|
||||
code = makeNode(type, sizeof(SSlotDescNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SSlotDescNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_COLUMN_DEF:
|
||||
code = makeNode(type, sizeof(SColumnDefNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SColumnDefNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DOWNSTREAM_SOURCE:
|
||||
code = makeNode(type, sizeof(SDownstreamSourceNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDownstreamSourceNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DATABASE_OPTIONS:
|
||||
code = makeNode(type, sizeof(SDatabaseOptions), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDatabaseOptions), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_TABLE_OPTIONS:
|
||||
code = makeNode(type, sizeof(STableOptions), &pNode); break;
|
||||
code = makeNode(type, sizeof(STableOptions), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_COLUMN_OPTIONS:
|
||||
code = makeNode(type, sizeof(SColumnOptions), &pNode); break;
|
||||
code = makeNode(type, sizeof(SColumnOptions), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_INDEX_OPTIONS:
|
||||
code = makeNode(type, sizeof(SIndexOptions), &pNode); break;
|
||||
code = makeNode(type, sizeof(SIndexOptions), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_EXPLAIN_OPTIONS:
|
||||
code = makeNode(type, sizeof(SExplainOptions), &pNode); break;
|
||||
code = makeNode(type, sizeof(SExplainOptions), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_STREAM_OPTIONS:
|
||||
code = makeNode(type, sizeof(SStreamOptions), &pNode); break;
|
||||
code = makeNode(type, sizeof(SStreamOptions), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LEFT_VALUE:
|
||||
code = makeNode(type, sizeof(SLeftValueNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SLeftValueNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_COLUMN_REF:
|
||||
code = makeNode(type, sizeof(SColumnRefNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SColumnRefNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_WHEN_THEN:
|
||||
code = makeNode(type, sizeof(SWhenThenNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SWhenThenNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_CASE_WHEN:
|
||||
code = makeNode(type, sizeof(SCaseWhenNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCaseWhenNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_EVENT_WINDOW:
|
||||
code = makeNode(type, sizeof(SEventWindowNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SEventWindowNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_COUNT_WINDOW:
|
||||
code = makeNode(type, sizeof(SCountWindowNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCountWindowNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_ANOMALY_WINDOW:
|
||||
code = makeNode(type, sizeof(SAnomalyWindowNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SAnomalyWindowNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_HINT:
|
||||
code = makeNode(type, sizeof(SHintNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SHintNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_VIEW:
|
||||
code = makeNode(type, sizeof(SViewNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SViewNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_WINDOW_OFFSET:
|
||||
code = makeNode(type, sizeof(SWindowOffsetNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SWindowOffsetNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_SET_OPERATOR:
|
||||
code = makeNode(type, sizeof(SSetOperator), &pNode); break;
|
||||
code = makeNode(type, sizeof(SSetOperator), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_SELECT_STMT:
|
||||
code = makeNode(type, sizeof(SSelectStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SSelectStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_VNODE_MODIFY_STMT:
|
||||
code = makeNode(type, sizeof(SVnodeModifyOpStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SVnodeModifyOpStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_DATABASE_STMT:
|
||||
code = makeNode(type, sizeof(SCreateDatabaseStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCreateDatabaseStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DROP_DATABASE_STMT:
|
||||
code = makeNode(type, sizeof(SDropDatabaseStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDropDatabaseStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_ALTER_DATABASE_STMT:
|
||||
code = makeNode(type, sizeof(SAlterDatabaseStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SAlterDatabaseStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_FLUSH_DATABASE_STMT:
|
||||
code = makeNode(type, sizeof(SFlushDatabaseStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SFlushDatabaseStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_TRIM_DATABASE_STMT:
|
||||
code = makeNode(type, sizeof(STrimDatabaseStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(STrimDatabaseStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_S3MIGRATE_DATABASE_STMT:
|
||||
code = makeNode(type, sizeof(SS3MigrateDatabaseStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SS3MigrateDatabaseStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_TABLE_STMT:
|
||||
code = makeNode(type, sizeof(SCreateTableStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCreateTableStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_SUBTABLE_CLAUSE:
|
||||
code = makeNode(type, sizeof(SCreateSubTableClause), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCreateSubTableClause), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_SUBTABLE_FROM_FILE_CLAUSE:
|
||||
code = makeNode(type, sizeof(SCreateSubTableFromFileClause), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCreateSubTableFromFileClause), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_MULTI_TABLES_STMT:
|
||||
code = makeNode(type, sizeof(SCreateMultiTablesStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCreateMultiTablesStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DROP_TABLE_CLAUSE:
|
||||
code = makeNode(type, sizeof(SDropTableClause), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDropTableClause), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DROP_TABLE_STMT:
|
||||
code = makeNode(type, sizeof(SDropTableStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDropTableStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DROP_SUPER_TABLE_STMT:
|
||||
code = makeNode(type, sizeof(SDropSuperTableStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDropSuperTableStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_ALTER_TABLE_STMT:
|
||||
case QUERY_NODE_ALTER_SUPER_TABLE_STMT:
|
||||
code = makeNode(type, sizeof(SAlterTableStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SAlterTableStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_USER_STMT:
|
||||
code = makeNode(type, sizeof(SCreateUserStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCreateUserStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_ALTER_USER_STMT:
|
||||
code = makeNode(type, sizeof(SAlterUserStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SAlterUserStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DROP_USER_STMT:
|
||||
code = makeNode(type, sizeof(SDropUserStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDropUserStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_USE_DATABASE_STMT:
|
||||
code = makeNode(type, sizeof(SUseDatabaseStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SUseDatabaseStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_DNODE_STMT:
|
||||
code = makeNode(type, sizeof(SCreateDnodeStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCreateDnodeStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DROP_DNODE_STMT:
|
||||
code = makeNode(type, sizeof(SDropDnodeStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDropDnodeStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_ALTER_DNODE_STMT:
|
||||
code = makeNode(type, sizeof(SAlterDnodeStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SAlterDnodeStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_ANODE_STMT:
|
||||
code = makeNode(type, sizeof(SCreateAnodeStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCreateAnodeStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DROP_ANODE_STMT:
|
||||
code = makeNode(type, sizeof(SDropAnodeStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDropAnodeStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_UPDATE_ANODE_STMT:
|
||||
code = makeNode(type, sizeof(SUpdateAnodeStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SUpdateAnodeStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_INDEX_STMT:
|
||||
code = makeNode(type, sizeof(SCreateIndexStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCreateIndexStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DROP_INDEX_STMT:
|
||||
code = makeNode(type, sizeof(SDropIndexStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDropIndexStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_QNODE_STMT:
|
||||
case QUERY_NODE_CREATE_BNODE_STMT:
|
||||
case QUERY_NODE_CREATE_SNODE_STMT:
|
||||
case QUERY_NODE_CREATE_MNODE_STMT:
|
||||
code = makeNode(type, sizeof(SCreateComponentNodeStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCreateComponentNodeStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DROP_QNODE_STMT:
|
||||
case QUERY_NODE_DROP_BNODE_STMT:
|
||||
case QUERY_NODE_DROP_SNODE_STMT:
|
||||
case QUERY_NODE_DROP_MNODE_STMT:
|
||||
code = makeNode(type, sizeof(SDropComponentNodeStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDropComponentNodeStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_TOPIC_STMT:
|
||||
code = makeNode(type, sizeof(SCreateTopicStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCreateTopicStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DROP_TOPIC_STMT:
|
||||
code = makeNode(type, sizeof(SDropTopicStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDropTopicStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DROP_CGROUP_STMT:
|
||||
code = makeNode(type, sizeof(SDropCGroupStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDropCGroupStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_ALTER_LOCAL_STMT:
|
||||
code = makeNode(type, sizeof(SAlterLocalStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SAlterLocalStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_EXPLAIN_STMT:
|
||||
code = makeNode(type, sizeof(SExplainStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SExplainStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DESCRIBE_STMT:
|
||||
code = makeNode(type, sizeof(SDescribeStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDescribeStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_RESET_QUERY_CACHE_STMT:
|
||||
code = makeNode(type, sizeof(SNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_COMPACT_DATABASE_STMT:
|
||||
code = makeNode(type, sizeof(SCompactDatabaseStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCompactDatabaseStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_FUNCTION_STMT:
|
||||
code = makeNode(type, sizeof(SCreateFunctionStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCreateFunctionStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DROP_FUNCTION_STMT:
|
||||
code = makeNode(type, sizeof(SDropFunctionStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDropFunctionStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_STREAM_STMT:
|
||||
code = makeNode(type, sizeof(SCreateStreamStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCreateStreamStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DROP_STREAM_STMT:
|
||||
code = makeNode(type, sizeof(SDropStreamStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDropStreamStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PAUSE_STREAM_STMT:
|
||||
code = makeNode(type, sizeof(SPauseStreamStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SPauseStreamStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_RESUME_STREAM_STMT:
|
||||
code = makeNode(type, sizeof(SResumeStreamStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SResumeStreamStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_BALANCE_VGROUP_STMT:
|
||||
code = makeNode(type, sizeof(SBalanceVgroupStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SBalanceVgroupStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_BALANCE_VGROUP_LEADER_STMT:
|
||||
code = makeNode(type, sizeof(SBalanceVgroupLeaderStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SBalanceVgroupLeaderStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_BALANCE_VGROUP_LEADER_DATABASE_STMT:
|
||||
code = makeNode(type, sizeof(SBalanceVgroupLeaderStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SBalanceVgroupLeaderStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_MERGE_VGROUP_STMT:
|
||||
code = makeNode(type, sizeof(SMergeVgroupStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SMergeVgroupStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_REDISTRIBUTE_VGROUP_STMT:
|
||||
code = makeNode(type, sizeof(SRedistributeVgroupStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SRedistributeVgroupStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_SPLIT_VGROUP_STMT:
|
||||
code = makeNode(type, sizeof(SSplitVgroupStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SSplitVgroupStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_SYNCDB_STMT:
|
||||
break;
|
||||
case QUERY_NODE_GRANT_STMT:
|
||||
code = makeNode(type, sizeof(SGrantStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SGrantStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_REVOKE_STMT:
|
||||
code = makeNode(type, sizeof(SRevokeStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SRevokeStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_ALTER_CLUSTER_STMT:
|
||||
code = makeNode(type, sizeof(SAlterClusterStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SAlterClusterStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_SHOW_DNODES_STMT:
|
||||
case QUERY_NODE_SHOW_MNODES_STMT:
|
||||
case QUERY_NODE_SHOW_MODULES_STMT:
|
||||
|
@ -583,191 +677,280 @@ int32_t nodesMakeNode(ENodeType type, SNode** ppNodeOut) {
|
|||
case QUERY_NODE_SHOW_CLUSTER_MACHINES_STMT:
|
||||
case QUERY_NODE_SHOW_ENCRYPTIONS_STMT:
|
||||
case QUERY_NODE_SHOW_TSMAS_STMT:
|
||||
code = makeNode(type, sizeof(SShowStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SShowStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_SHOW_TABLE_TAGS_STMT:
|
||||
code = makeNode(type, sizeof(SShowTableTagsStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SShowTableTagsStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT:
|
||||
code = makeNode(type, sizeof(SShowDnodeVariablesStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SShowDnodeVariablesStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
|
||||
code = makeNode(type, sizeof(SShowCreateDatabaseStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SShowCreateDatabaseStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_SHOW_DB_ALIVE_STMT:
|
||||
case QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT:
|
||||
code = makeNode(type, sizeof(SShowAliveStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SShowAliveStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
|
||||
case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
|
||||
code = makeNode(type, sizeof(SShowCreateTableStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SShowCreateTableStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_SHOW_CREATE_VIEW_STMT:
|
||||
code = makeNode(type, sizeof(SShowCreateViewStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SShowCreateViewStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT:
|
||||
code = makeNode(type, sizeof(SShowTableDistributedStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SShowTableDistributedStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_SHOW_COMPACTS_STMT:
|
||||
code = makeNode(type, sizeof(SShowCompactsStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SShowCompactsStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_SHOW_COMPACT_DETAILS_STMT:
|
||||
code = makeNode(type, sizeof(SShowCompactDetailsStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SShowCompactDetailsStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_KILL_QUERY_STMT:
|
||||
code = makeNode(type, sizeof(SKillQueryStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SKillQueryStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_KILL_TRANSACTION_STMT:
|
||||
case QUERY_NODE_KILL_CONNECTION_STMT:
|
||||
case QUERY_NODE_KILL_COMPACT_STMT:
|
||||
code = makeNode(type, sizeof(SKillStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SKillStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DELETE_STMT:
|
||||
code = makeNode(type, sizeof(SDeleteStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDeleteStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_INSERT_STMT:
|
||||
code = makeNode(type, sizeof(SInsertStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SInsertStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_QUERY:
|
||||
code = makeNode(type, sizeof(SQuery), &pNode); break;
|
||||
code = makeNode(type, sizeof(SQuery), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_RESTORE_DNODE_STMT:
|
||||
case QUERY_NODE_RESTORE_QNODE_STMT:
|
||||
case QUERY_NODE_RESTORE_MNODE_STMT:
|
||||
case QUERY_NODE_RESTORE_VNODE_STMT:
|
||||
code = makeNode(type, sizeof(SRestoreComponentNodeStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SRestoreComponentNodeStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_VIEW_STMT:
|
||||
code = makeNode(type, sizeof(SCreateViewStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCreateViewStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DROP_VIEW_STMT:
|
||||
code = makeNode(type, sizeof(SDropViewStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDropViewStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_CREATE_TSMA_STMT:
|
||||
code = makeNode(type, sizeof(SCreateTSMAStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCreateTSMAStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_DROP_TSMA_STMT:
|
||||
code = makeNode(type, sizeof(SDropTSMAStmt), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDropTSMAStmt), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_TSMA_OPTIONS:
|
||||
code = makeNode(type, sizeof(STSMAOptions), &pNode); break;
|
||||
code = makeNode(type, sizeof(STSMAOptions), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN_SCAN:
|
||||
code = makeNode(type, sizeof(SScanLogicNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SScanLogicNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN_JOIN:
|
||||
code = makeNode(type, sizeof(SJoinLogicNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SJoinLogicNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN_AGG:
|
||||
code = makeNode(type, sizeof(SAggLogicNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SAggLogicNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN_PROJECT:
|
||||
code = makeNode(type, sizeof(SProjectLogicNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SProjectLogicNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN_VNODE_MODIFY:
|
||||
code = makeNode(type, sizeof(SVnodeModifyLogicNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SVnodeModifyLogicNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN_EXCHANGE:
|
||||
code = makeNode(type, sizeof(SExchangeLogicNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SExchangeLogicNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN_MERGE:
|
||||
code = makeNode(type, sizeof(SMergeLogicNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SMergeLogicNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN_WINDOW:
|
||||
code = makeNode(type, sizeof(SWindowLogicNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SWindowLogicNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN_FILL:
|
||||
code = makeNode(type, sizeof(SFillLogicNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SFillLogicNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN_SORT:
|
||||
code = makeNode(type, sizeof(SSortLogicNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SSortLogicNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN_PARTITION:
|
||||
code = makeNode(type, sizeof(SPartitionLogicNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SPartitionLogicNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN_INDEF_ROWS_FUNC:
|
||||
code = makeNode(type, sizeof(SIndefRowsFuncLogicNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SIndefRowsFuncLogicNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN_INTERP_FUNC:
|
||||
code = makeNode(type, sizeof(SInterpFuncLogicNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SInterpFuncLogicNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN_FORECAST_FUNC:
|
||||
code = makeNode(type, sizeof(SForecastFuncLogicNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SForecastFuncLogicNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN_GROUP_CACHE:
|
||||
code = makeNode(type, sizeof(SGroupCacheLogicNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SGroupCacheLogicNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN_DYN_QUERY_CTRL:
|
||||
code = makeNode(type, sizeof(SDynQueryCtrlLogicNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDynQueryCtrlLogicNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_SUBPLAN:
|
||||
code = makeNode(type, sizeof(SLogicSubplan), &pNode); break;
|
||||
code = makeNode(type, sizeof(SLogicSubplan), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_LOGIC_PLAN:
|
||||
code = makeNode(type, sizeof(SQueryLogicPlan), &pNode); break;
|
||||
code = makeNode(type, sizeof(SQueryLogicPlan), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN:
|
||||
code = makeNode(type, sizeof(STagScanPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(STagScanPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN:
|
||||
code = makeNode(type, sizeof(STableScanPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(STableScanPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN:
|
||||
code = makeNode(type, sizeof(STableSeqScanPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(STableSeqScanPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN:
|
||||
code = makeNode(type, sizeof(STableMergeScanPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(STableMergeScanPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN:
|
||||
code = makeNode(type, sizeof(SStreamScanPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SStreamScanPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:
|
||||
code = makeNode(type, sizeof(SSystemTableScanPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SSystemTableScanPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN:
|
||||
code = makeNode(type, sizeof(SBlockDistScanPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SBlockDistScanPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN:
|
||||
code = makeNode(type, sizeof(SLastRowScanPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SLastRowScanPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN:
|
||||
code = makeNode(type, sizeof(STableCountScanPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(STableCountScanPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
|
||||
code = makeNode(type, sizeof(SProjectPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SProjectPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN:
|
||||
code = makeNode(type, sizeof(SSortMergeJoinPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SSortMergeJoinPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_HASH_JOIN:
|
||||
code = makeNode(type, sizeof(SHashJoinPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SHashJoinPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG:
|
||||
code = makeNode(type, sizeof(SAggPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SAggPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE:
|
||||
code = makeNode(type, sizeof(SExchangePhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SExchangePhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE:
|
||||
code = makeNode(type, sizeof(SMergePhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SMergePhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_SORT:
|
||||
code = makeNode(type, sizeof(SSortPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SSortPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT:
|
||||
code = makeNode(type, sizeof(SGroupSortPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SGroupSortPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL:
|
||||
code = makeNode(type, sizeof(SIntervalPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SIntervalPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL:
|
||||
code = makeNode(type, sizeof(SMergeIntervalPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SMergeIntervalPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL:
|
||||
code = makeNode(type, sizeof(SMergeAlignedIntervalPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SMergeAlignedIntervalPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL:
|
||||
code = makeNode(type, sizeof(SStreamIntervalPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SStreamIntervalPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL:
|
||||
code = makeNode(type, sizeof(SStreamFinalIntervalPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SStreamFinalIntervalPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
|
||||
code = makeNode(type, sizeof(SStreamSemiIntervalPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SStreamSemiIntervalPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_MID_INTERVAL:
|
||||
code = makeNode(type, sizeof(SStreamMidIntervalPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SStreamMidIntervalPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_FILL:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL:
|
||||
code = makeNode(type, sizeof(SFillPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SFillPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
|
||||
code = makeNode(type, sizeof(SSessionWinodwPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SSessionWinodwPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
|
||||
code = makeNode(type, sizeof(SStreamSessionWinodwPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SStreamSessionWinodwPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION:
|
||||
code = makeNode(type, sizeof(SStreamSemiSessionWinodwPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SStreamSemiSessionWinodwPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION:
|
||||
code = makeNode(type, sizeof(SStreamFinalSessionWinodwPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SStreamFinalSessionWinodwPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE:
|
||||
code = makeNode(type, sizeof(SStateWinodwPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SStateWinodwPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
|
||||
code = makeNode(type, sizeof(SStreamStateWinodwPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SStreamStateWinodwPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT:
|
||||
code = makeNode(type, sizeof(SEventWinodwPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SEventWinodwPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT:
|
||||
code = makeNode(type, sizeof(SStreamEventWinodwPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SStreamEventWinodwPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT:
|
||||
code = makeNode(type, sizeof(SCountWinodwPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SCountWinodwPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_ANOMALY:
|
||||
code = makeNode(type, sizeof(SAnomalyWindowPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SAnomalyWindowPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT:
|
||||
code = makeNode(type, sizeof(SStreamCountWinodwPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SStreamCountWinodwPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
|
||||
code = makeNode(type, sizeof(SPartitionPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SPartitionPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
|
||||
code = makeNode(type, sizeof(SStreamPartitionPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SStreamPartitionPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC:
|
||||
code = makeNode(type, sizeof(SIndefRowsFuncPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SIndefRowsFuncPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC:
|
||||
code = makeNode(type, sizeof(SInterpFuncLogicNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SInterpFuncLogicNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_FORECAST_FUNC:
|
||||
code = makeNode(type, sizeof(SForecastFuncLogicNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SForecastFuncLogicNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_DISPATCH:
|
||||
code = makeNode(type, sizeof(SDataDispatcherNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDataDispatcherNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_INSERT:
|
||||
code = makeNode(type, sizeof(SDataInserterNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDataInserterNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT:
|
||||
code = makeNode(type, sizeof(SQueryInserterNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SQueryInserterNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_DELETE:
|
||||
code = makeNode(type, sizeof(SDataDeleterNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDataDeleterNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_GROUP_CACHE:
|
||||
code = makeNode(type, sizeof(SGroupCachePhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SGroupCachePhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_DYN_QUERY_CTRL:
|
||||
code = makeNode(type, sizeof(SDynQueryCtrlPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SDynQueryCtrlPhysiNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_SUBPLAN:
|
||||
code = makeNode(type, sizeof(SSubplan), &pNode); break;
|
||||
code = makeNode(type, sizeof(SSubplan), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN:
|
||||
code = makeNode(type, sizeof(SQueryPlan), &pNode); break;
|
||||
code = makeNode(type, sizeof(SQueryPlan), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERP_FUNC:
|
||||
code = makeNode(type, sizeof(SStreamInterpFuncPhysiNode), &pNode); break;
|
||||
code = makeNode(type, sizeof(SStreamInterpFuncPhysiNode), &pNode);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1174,6 +1357,15 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
SAlterTableStmt* pStmt = (SAlterTableStmt*)pNode;
|
||||
nodesDestroyNode((SNode*)pStmt->pOptions);
|
||||
nodesDestroyNode((SNode*)pStmt->pVal);
|
||||
if (pStmt->pNodeListTagValue != NULL) {
|
||||
SNodeList* pNodeList = pStmt->pNodeListTagValue;
|
||||
SNode* pSubNode = NULL;
|
||||
FOREACH(pSubNode, pNodeList) {
|
||||
SAlterTableStmt* pSubAlterTable = (SAlterTableStmt*)pSubNode;
|
||||
nodesDestroyNode((SNode*)pSubAlterTable->pOptions);
|
||||
nodesDestroyNode((SNode*)pSubAlterTable->pVal);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_CREATE_USER_STMT: {
|
||||
|
@ -1891,7 +2083,6 @@ int32_t nodesListStrictAppendList(SNodeList* pTarget, SNodeList* pSrc) {
|
|||
return code;
|
||||
}
|
||||
|
||||
|
||||
int32_t nodesListMakeStrictAppendList(SNodeList** pTarget, SNodeList* pSrc) {
|
||||
if (NULL == *pTarget) {
|
||||
int32_t code = nodesMakeList(pTarget);
|
||||
|
@ -2350,7 +2541,8 @@ static EDealRes doCollect(SCollectColumnsCxt* pCxt, SColumnNode* pCol, SNode* pN
|
|||
static bool isCollectType(ECollectColType collectType, EColumnType colType) {
|
||||
return COLLECT_COL_TYPE_ALL == collectType
|
||||
? true
|
||||
: (COLLECT_COL_TYPE_TAG == collectType ? COLUMN_TYPE_TAG == colType : (COLUMN_TYPE_TAG != colType && COLUMN_TYPE_TBNAME != colType));
|
||||
: (COLLECT_COL_TYPE_TAG == collectType ? COLUMN_TYPE_TAG == colType
|
||||
: (COLUMN_TYPE_TAG != colType && COLUMN_TYPE_TBNAME != colType));
|
||||
}
|
||||
|
||||
static EDealRes collectColumns(SNode* pNode, void* pContext) {
|
||||
|
@ -2370,7 +2562,9 @@ static EDealRes collectColumnsExt(SNode* pNode, void* pContext) {
|
|||
if (QUERY_NODE_COLUMN == nodeType(pNode)) {
|
||||
SColumnNode* pCol = (SColumnNode*)pNode;
|
||||
if (isCollectType(pCxt->collectType, pCol->colType) && 0 != strcmp(pCol->colName, "*") &&
|
||||
(NULL == pCxt->pMultiTableAlias || NULL != (pCxt->pTableAlias = tSimpleHashGet(pCxt->pMultiTableAlias, pCol->tableAlias, strlen(pCol->tableAlias))))) {
|
||||
(NULL == pCxt->pMultiTableAlias ||
|
||||
NULL != (pCxt->pTableAlias =
|
||||
tSimpleHashGet(pCxt->pMultiTableAlias, pCol->tableAlias, strlen(pCol->tableAlias))))) {
|
||||
return doCollect(pCxt, pCol, pNode);
|
||||
}
|
||||
}
|
||||
|
@ -2414,8 +2608,8 @@ int32_t nodesCollectColumns(SSelectStmt* pSelect, ESqlClause clause, const char*
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t nodesCollectColumnsExt(SSelectStmt* pSelect, ESqlClause clause, SSHashObj* pMultiTableAlias, ECollectColType type,
|
||||
SNodeList** pCols) {
|
||||
int32_t nodesCollectColumnsExt(SSelectStmt* pSelect, ESqlClause clause, SSHashObj* pMultiTableAlias,
|
||||
ECollectColType type, SNodeList** pCols) {
|
||||
if (NULL == pSelect || NULL == pCols) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
@ -2543,21 +2737,21 @@ static int32_t funcNodeEqual(const void* pLeft, const void* pRight, size_t len)
|
|||
return nodesEqualNode(*(const SNode**)pLeft, *(const SNode**)pRight) ? 0 : 1;
|
||||
}
|
||||
|
||||
int32_t nodesCollectSelectFuncs(SSelectStmt* pSelect, ESqlClause clause, char* tableAlias, FFuncClassifier classifier, SNodeList* pFuncs) {
|
||||
int32_t nodesCollectSelectFuncs(SSelectStmt* pSelect, ESqlClause clause, char* tableAlias, FFuncClassifier classifier,
|
||||
SNodeList* pFuncs) {
|
||||
if (NULL == pSelect || NULL == pFuncs) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
SCollectFuncsCxt cxt = {.errCode = TSDB_CODE_SUCCESS,
|
||||
.classifier = classifier,
|
||||
.tableAlias = tableAlias,
|
||||
.pFuncs = pFuncs};
|
||||
SCollectFuncsCxt cxt = {
|
||||
.errCode = TSDB_CODE_SUCCESS, .classifier = classifier, .tableAlias = tableAlias, .pFuncs = pFuncs};
|
||||
|
||||
nodesWalkSelectStmt(pSelect, clause, collectFuncs, &cxt);
|
||||
return cxt.errCode;
|
||||
}
|
||||
|
||||
int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, char* tableAlias, FFuncClassifier classifier, SNodeList** pFuncs) {
|
||||
int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, char* tableAlias, FFuncClassifier classifier,
|
||||
SNodeList** pFuncs) {
|
||||
if (NULL == pSelect || NULL == pFuncs) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
|
|
@ -165,6 +165,7 @@ SNode* createInterpTimeRange(SAstCreateContext* pCxt, SNode* pStart, SNode*
|
|||
SNode* createInterpTimePoint(SAstCreateContext* pCxt, SNode* pPoint);
|
||||
SNode* createWhenThenNode(SAstCreateContext* pCxt, SNode* pWhen, SNode* pThen);
|
||||
SNode* createCaseWhenNode(SAstCreateContext* pCxt, SNode* pCase, SNodeList* pWhenThenList, SNode* pElse);
|
||||
SNode* createAlterSingleTagColumnNode(SAstCreateContext* pCtx, SToken* token, SNode* pVal);
|
||||
|
||||
SNode* addWhereClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pWhere);
|
||||
SNode* addPartitionByClause(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pPartitionByList);
|
||||
|
@ -228,6 +229,7 @@ SNode* createAlterTableDropCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_
|
|||
SNode* createAlterTableRenameCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, SToken* pOldColName,
|
||||
SToken* pNewColName);
|
||||
SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, SToken* pTagName, SNode* pVal);
|
||||
SNode* createAlterTableSetMultiTagValue(SAstCreateContext* pCxt, SNode* pRealTable, SNodeList* singleNode);
|
||||
SNode* setAlterSuperTableType(SNode* pStmt);
|
||||
SNode* createUseDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName);
|
||||
SNode* setShowKind(SAstCreateContext* pCxt, SNode* pStmt, EShowKind showKind);
|
||||
|
|
|
@ -370,6 +370,7 @@ cmd ::= DROP STABLE with_opt(A) exists_opt(B) full_table_name(C).
|
|||
cmd ::= ALTER TABLE alter_table_clause(A). { pCxt->pRootNode = A; }
|
||||
cmd ::= ALTER STABLE alter_table_clause(A). { pCxt->pRootNode = setAlterSuperTableType(A); }
|
||||
|
||||
|
||||
alter_table_clause(A) ::= full_table_name(B) alter_table_options(C). { A = createAlterTableModifyOptions(pCxt, B, C); }
|
||||
alter_table_clause(A) ::=
|
||||
full_table_name(B) ADD COLUMN column_name(C) type_name(D) column_options(E). { A = createAlterTableAddModifyColOptions2(pCxt, B, TSDB_ALTER_TABLE_ADD_COLUMN, &C, D, E); }
|
||||
|
@ -387,8 +388,16 @@ alter_table_clause(A) ::=
|
|||
full_table_name(B) MODIFY TAG column_name(C) type_name(D). { A = createAlterTableAddModifyCol(pCxt, B, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &C, D); }
|
||||
alter_table_clause(A) ::=
|
||||
full_table_name(B) RENAME TAG column_name(C) column_name(D). { A = createAlterTableRenameCol(pCxt, B, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &C, &D); }
|
||||
|
||||
|
||||
%type column_tag_value_list { SNodeList* }
|
||||
%destructor column_tag_value_list { nodesDestroyList($$); }
|
||||
column_tag_value(A) ::= column_name(C) NK_EQ tags_literal(D). { A = createAlterSingleTagColumnNode(pCxt, &C, D); }
|
||||
column_tag_value_list(A) ::= column_tag_value(B). { A = createNodeList(pCxt, B); }
|
||||
column_tag_value_list(A) ::= column_tag_value_list(B) NK_COMMA column_tag_value(C). { A = addNodeToList(pCxt, B, C);}
|
||||
|
||||
alter_table_clause(A) ::=
|
||||
full_table_name(B) SET TAG column_name(C) NK_EQ tags_literal(D). { A = createAlterTableSetTag(pCxt, B, &C, D); }
|
||||
full_table_name(B) SET TAG column_tag_value_list(C). { A = createAlterTableSetMultiTagValue(pCxt, B, C); }
|
||||
|
||||
%type multi_create_clause { SNodeList* }
|
||||
%destructor multi_create_clause { nodesDestroyList($$); }
|
||||
|
|
|
@ -2541,6 +2541,21 @@ _err:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
SNode* createAlterSingleTagColumnNode(SAstCreateContext* pCtx, SToken* pTagName, SNode* pVal) {
|
||||
CHECK_PARSER_STATUS(pCtx);
|
||||
SAlterTableStmt* pStmt = NULL;
|
||||
pCtx->errCode = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT, (SNode**)&pStmt);
|
||||
CHECK_MAKE_NODE(pStmt);
|
||||
pStmt->alterType = TSDB_ALTER_TABLE_UPDATE_TAG_VAL;
|
||||
CHECK_NAME(checkColumnName(pCtx, pTagName));
|
||||
COPY_STRING_FORM_ID_TOKEN(pStmt->colName, pTagName);
|
||||
pStmt->pVal = (SValueNode*)pVal;
|
||||
pStmt->pNodeListTagValue = NULL;
|
||||
return (SNode*)pStmt;
|
||||
_err:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, SToken* pTagName, SNode* pVal) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
CHECK_NAME(checkColumnName(pCxt, pTagName));
|
||||
|
@ -2557,6 +2572,19 @@ _err:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
SNode* createAlterTableSetMultiTagValue(SAstCreateContext* pCxt, SNode* pRealTable, SNodeList* pList) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
SAlterTableStmt* pStmt = NULL;
|
||||
pCxt->errCode = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT, (SNode**)&pStmt);
|
||||
|
||||
CHECK_MAKE_NODE(pStmt);
|
||||
pStmt->alterType = TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL;
|
||||
pStmt->pNodeListTagValue = pList;
|
||||
return createAlterTableStmtFinalize(pRealTable, pStmt);
|
||||
_err:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SNode* setAlterSuperTableType(SNode* pStmt) {
|
||||
if (!pStmt) return NULL;
|
||||
setNodeType(pStmt, QUERY_NODE_ALTER_SUPER_TABLE_STMT);
|
||||
|
|
|
@ -239,7 +239,8 @@ static EDealRes collectMetaKeyFromOperator(SCollectMetaKeyFromExprCxt* pCxt, SOp
|
|||
if (TSDB_CODE_SUCCESS != code) return DEAL_RES_CONTINUE;
|
||||
if (pTableName) {
|
||||
SSelectStmt* pSelect = (SSelectStmt*)pCxt->pComCxt->pStmt;
|
||||
pCxt->errCode = collectMetaKeyFromRealTableImpl(pCxt->pComCxt, ((SRealTableNode*)pSelect->pFromTable)->table.dbName, pTableName, AUTH_TYPE_READ);
|
||||
pCxt->errCode = collectMetaKeyFromRealTableImpl(pCxt->pComCxt, ((SRealTableNode*)pSelect->pFromTable)->table.dbName,
|
||||
pTableName, AUTH_TYPE_READ);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
|
||||
|
|
|
@ -97,6 +97,7 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = {
|
|||
.showType = QUERY_NODE_SHOW_DNODES_STMT,
|
||||
.pDbName = TSDB_INFORMATION_SCHEMA_DB,
|
||||
.pTableName = TSDB_INS_TABLE_DNODES,
|
||||
|
||||
.numOfShowCols = 1,
|
||||
.pShowCols = {"*"}
|
||||
},
|
||||
|
@ -1876,9 +1877,8 @@ static EDealRes translateColumnInGroupByClause(STranslateContext* pCxt, SColumnN
|
|||
} else {
|
||||
bool found = false;
|
||||
res = translateColumnWithoutPrefix(pCxt, pCol);
|
||||
if (!(*pCol)->node.asParam &&
|
||||
res != DEAL_RES_CONTINUE &&
|
||||
res != DEAL_RES_END && pCxt->errCode != TSDB_CODE_PAR_AMBIGUOUS_COLUMN) {
|
||||
if (!(*pCol)->node.asParam && res != DEAL_RES_CONTINUE && res != DEAL_RES_END &&
|
||||
pCxt->errCode != TSDB_CODE_PAR_AMBIGUOUS_COLUMN) {
|
||||
res = translateColumnUseAlias(pCxt, pCol, &found);
|
||||
*translateAsAlias = true;
|
||||
}
|
||||
|
@ -3321,9 +3321,11 @@ static int32_t selectCommonType(SDataType* commonType, const SDataType* newType)
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if ((resultType == TSDB_DATA_TYPE_VARCHAR) && (IS_MATHABLE_TYPE(commonType->type) || IS_MATHABLE_TYPE(newType->type))) {
|
||||
if ((resultType == TSDB_DATA_TYPE_VARCHAR) &&
|
||||
(IS_MATHABLE_TYPE(commonType->type) || IS_MATHABLE_TYPE(newType->type))) {
|
||||
commonType->bytes = TMAX(TMAX(commonType->bytes, newType->bytes), QUERY_NUMBER_MAX_DISPLAY_LEN);
|
||||
} else if ((resultType == TSDB_DATA_TYPE_NCHAR) && (IS_MATHABLE_TYPE(commonType->type) || IS_MATHABLE_TYPE(newType->type))) {
|
||||
} else if ((resultType == TSDB_DATA_TYPE_NCHAR) &&
|
||||
(IS_MATHABLE_TYPE(commonType->type) || IS_MATHABLE_TYPE(newType->type))) {
|
||||
commonType->bytes = TMAX(TMAX(commonType->bytes, newType->bytes), QUERY_NUMBER_MAX_DISPLAY_LEN * TSDB_NCHAR_SIZE);
|
||||
} else {
|
||||
commonType->bytes = TMAX(TMAX(commonType->bytes, newType->bytes), TYPE_BYTES[resultType]);
|
||||
|
@ -5528,8 +5530,7 @@ static int32_t translateGroupByList(STranslateContext* pCxt, SSelectStmt* pSelec
|
|||
if (NULL == pSelect->pGroupByList) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
SReplaceGroupByAliasCxt cxt = {
|
||||
.pTranslateCxt = pCxt, .pProjectionList = pSelect->pProjectionList};
|
||||
SReplaceGroupByAliasCxt cxt = {.pTranslateCxt = pCxt, .pProjectionList = pSelect->pProjectionList};
|
||||
nodesRewriteExprsPostOrder(pSelect->pGroupByList, translateGroupPartitionByImpl, &cxt);
|
||||
|
||||
return pCxt->errCode;
|
||||
|
@ -5540,8 +5541,7 @@ static int32_t translatePartitionByList(STranslateContext* pCxt, SSelectStmt* pS
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SReplaceGroupByAliasCxt cxt = {
|
||||
.pTranslateCxt = pCxt, .pProjectionList = pSelect->pProjectionList};
|
||||
SReplaceGroupByAliasCxt cxt = {.pTranslateCxt = pCxt, .pProjectionList = pSelect->pProjectionList};
|
||||
nodesRewriteExprsPostOrder(pSelect->pPartitionByList, translateGroupPartitionByImpl, &cxt);
|
||||
|
||||
return pCxt->errCode;
|
||||
|
@ -9495,7 +9495,8 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable
|
|||
}
|
||||
|
||||
static int32_t checkAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt* pStmt) {
|
||||
if (TSDB_ALTER_TABLE_UPDATE_TAG_VAL == pStmt->alterType) {
|
||||
if (TSDB_ALTER_TABLE_UPDATE_TAG_VAL == pStmt->alterType ||
|
||||
TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL == pStmt->alterType) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE,
|
||||
"Set tag value only available for child table");
|
||||
}
|
||||
|
@ -10521,7 +10522,8 @@ static void getSourceDatabase(SNode* pStmt, int32_t acctId, char* pDbFName) {
|
|||
(void)tNameGetFullDbName(&name, pDbFName);
|
||||
}
|
||||
|
||||
static void getStreamQueryFirstProjectAliasName(SHashObj* pUserAliasSet, char* aliasName, int32_t len, char* defaultName[]) {
|
||||
static void getStreamQueryFirstProjectAliasName(SHashObj* pUserAliasSet, char* aliasName, int32_t len,
|
||||
char* defaultName[]) {
|
||||
for (int32_t i = 0; defaultName[i] != NULL; i++) {
|
||||
if (NULL == taosHashGet(pUserAliasSet, defaultName[i], strlen(defaultName[i]))) {
|
||||
snprintf(aliasName, len, "%s", defaultName[i]);
|
||||
|
@ -10547,8 +10549,8 @@ static int32_t setColumnDefNodePrimaryKey(SColumnDefNode* pNode, bool isPk) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t addIrowTsToCreateStreamQueryImpl(STranslateContext* pCxt, SSelectStmt* pSelect,
|
||||
SHashObj* pUserAliasSet, SNodeList* pCols, SCMCreateStreamReq* pReq) {
|
||||
static int32_t addIrowTsToCreateStreamQueryImpl(STranslateContext* pCxt, SSelectStmt* pSelect, SHashObj* pUserAliasSet,
|
||||
SNodeList* pCols, SCMCreateStreamReq* pReq) {
|
||||
SNode* pProj = nodesListGetNode(pSelect->pProjectionList, 0);
|
||||
if (!pSelect->hasInterpFunc ||
|
||||
(QUERY_NODE_FUNCTION == nodeType(pProj) && 0 == strcmp("_irowts", ((SFunctionNode*)pProj)->functionName))) {
|
||||
|
@ -10990,20 +10992,17 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm
|
|||
|
||||
if (pStmt->pOptions->triggerType == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) {
|
||||
if (pStmt->pOptions->fillHistory) {
|
||||
return generateSyntaxErrMsgExt(
|
||||
&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"When trigger was force window close, Stream unsupported Fill history");
|
||||
}
|
||||
|
||||
if (pStmt->pOptions->ignoreExpired != 1) {
|
||||
return generateSyntaxErrMsgExt(
|
||||
&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"When trigger was force window close, Stream must not set ignore expired 0");
|
||||
}
|
||||
|
||||
if (pStmt->pOptions->ignoreUpdate != 1) {
|
||||
return generateSyntaxErrMsgExt(
|
||||
&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"When trigger was force window close, Stream must not set ignore update 0");
|
||||
}
|
||||
|
||||
|
@ -15183,24 +15182,88 @@ static int32_t rewriteDropSuperTable(STranslateContext* pCxt, SQuery* pQuery) {
|
|||
TAOS_RETURN(0);
|
||||
}
|
||||
|
||||
static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta,
|
||||
SVAlterTbReq* pReq) {
|
||||
SName tbName = {0};
|
||||
SArray* pTsmas = NULL;
|
||||
static int32_t buildUpdateTagValReqImpl2(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta,
|
||||
char* colName, SMultiTagUpateVal* pReq) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (pCxt->pMetaCache) {
|
||||
toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &tbName);
|
||||
code = getTableTsmasFromCache(pCxt->pMetaCache, &tbName, &pTsmas);
|
||||
if (code != TSDB_CODE_SUCCESS) return code;
|
||||
if (pTsmas && pTsmas->size > 0) return TSDB_CODE_TSMA_MUST_BE_DROPPED;
|
||||
SSchema* pSchema = getTagSchema(pTableMeta, colName);
|
||||
if (NULL == pSchema) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "Invalid tag name: %s", colName);
|
||||
}
|
||||
|
||||
SSchema* pSchema = getTagSchema(pTableMeta, pStmt->colName);
|
||||
if (NULL == pSchema) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "Invalid tag name: %s",
|
||||
pStmt->colName);
|
||||
pReq->tagName = taosStrdup(colName);
|
||||
if (NULL == pReq->tagName) {
|
||||
TAOS_CHECK_GOTO(terrno, NULL, _err);
|
||||
}
|
||||
pReq->tagName = taosStrdup(pStmt->colName);
|
||||
|
||||
pReq->pTagArray = taosArrayInit(1, sizeof(STagVal));
|
||||
if (NULL == pReq->pTagArray) {
|
||||
TAOS_CHECK_GOTO(terrno, NULL, _err);
|
||||
}
|
||||
pReq->colId = pSchema->colId;
|
||||
pReq->tagType = pSchema->type;
|
||||
|
||||
STag* pTag = NULL;
|
||||
SToken token;
|
||||
char tokenBuf[TSDB_MAX_TAGS_LEN];
|
||||
const char* tagStr = pStmt->pVal->literal;
|
||||
NEXT_TOKEN_WITH_PREV(tagStr, token);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = checkAndTrimValue(&token, tokenBuf, &pCxt->msgBuf, pSchema->type);
|
||||
if (TSDB_CODE_SUCCESS == code && TK_NK_VARIABLE == token.type) {
|
||||
code = buildSyntaxErrMsg(&pCxt->msgBuf, "not expected tags values", token.z);
|
||||
}
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = parseTagValue(&pCxt->msgBuf, &tagStr, pTableMeta->tableInfo.precision, pSchema, &token, NULL,
|
||||
pReq->pTagArray, &pTag);
|
||||
if (pSchema->type == TSDB_DATA_TYPE_JSON && token.type == TK_NULL && code == TSDB_CODE_SUCCESS) {
|
||||
pReq->tagFree = true;
|
||||
}
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code && tagStr) {
|
||||
NEXT_VALID_TOKEN(tagStr, token);
|
||||
if (token.n != 0) {
|
||||
code = buildSyntaxErrMsg(&pCxt->msgBuf, "not expected tags values", token.z);
|
||||
}
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
if (pSchema->type == TSDB_DATA_TYPE_JSON) {
|
||||
code = buildSyntaxErrMsg(&pCxt->msgBuf, "not expected tags values ", token.z);
|
||||
} else {
|
||||
STagVal* pTagVal = taosArrayGet(pReq->pTagArray, 0);
|
||||
if (pTagVal) {
|
||||
pReq->isNull = false;
|
||||
if (IS_VAR_DATA_TYPE(pSchema->type)) {
|
||||
pReq->nTagVal = pTagVal->nData;
|
||||
pReq->pTagVal = pTagVal->pData;
|
||||
} else {
|
||||
pReq->nTagVal = pSchema->bytes;
|
||||
pReq->pTagVal = (uint8_t*)&pTagVal->i64;
|
||||
}
|
||||
} else {
|
||||
pReq->isNull = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
_err:
|
||||
if (code != 0) {
|
||||
taosArrayDestroy(pReq->pTagArray);
|
||||
taosMemoryFree(pReq->tagName);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
static int32_t buildUpdateTagValReqImpl(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta,
|
||||
char* colName, SVAlterTbReq* pReq) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
SSchema* pSchema = getTagSchema(pTableMeta, colName);
|
||||
if (NULL == pSchema) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "Invalid tag name: %s", colName);
|
||||
}
|
||||
|
||||
pReq->tagName = taosStrdup(colName);
|
||||
if (NULL == pReq->tagName) {
|
||||
return terrno;
|
||||
}
|
||||
|
@ -15261,6 +15324,82 @@ static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pS
|
|||
|
||||
return code;
|
||||
}
|
||||
static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta,
|
||||
SVAlterTbReq* pReq) {
|
||||
SName tbName = {0};
|
||||
SArray* pTsmas = NULL;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (pCxt->pMetaCache) {
|
||||
toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &tbName);
|
||||
code = getTableTsmasFromCache(pCxt->pMetaCache, &tbName, &pTsmas);
|
||||
if (code != TSDB_CODE_SUCCESS) return code;
|
||||
if (pTsmas && pTsmas->size > 0) return TSDB_CODE_TSMA_MUST_BE_DROPPED;
|
||||
}
|
||||
return buildUpdateTagValReqImpl(pCxt, pStmt, pTableMeta, pStmt->colName, pReq);
|
||||
}
|
||||
|
||||
static int32_t buildUpdateMultiTagValReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta,
|
||||
SVAlterTbReq* pReq) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SName tbName = {0};
|
||||
SArray* pTsmas = NULL;
|
||||
SHashObj* pUnique = NULL;
|
||||
if (pCxt->pMetaCache) {
|
||||
toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &tbName);
|
||||
code = getTableTsmasFromCache(pCxt->pMetaCache, &tbName, &pTsmas);
|
||||
if (code != TSDB_CODE_SUCCESS) return code;
|
||||
if (pTsmas && pTsmas->size > 0) return TSDB_CODE_TSMA_MUST_BE_DROPPED;
|
||||
}
|
||||
SNodeList* pNodeList = pStmt->pNodeListTagValue;
|
||||
if (pNodeList == NULL) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE);
|
||||
}
|
||||
|
||||
int32_t nTagValues = pNodeList->length;
|
||||
if (nTagValues == 1) {
|
||||
SAlterTableStmt* head = (SAlterTableStmt*)pNodeList->pHead->pNode;
|
||||
pReq->action = TSDB_ALTER_TABLE_UPDATE_TAG_VAL;
|
||||
return buildUpdateTagValReqImpl(pCxt, head, pTableMeta, head->colName, pReq);
|
||||
} else {
|
||||
pReq->pMultiTag = taosArrayInit(nTagValues, sizeof(SMultiTagUpateVal));
|
||||
if (pReq->pMultiTag == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
pUnique = taosHashInit(nTagValues, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
if (pUnique == NULL) {
|
||||
TAOS_CHECK_GOTO(terrno, NULL, _err);
|
||||
}
|
||||
|
||||
SAlterTableStmt* pTagStmt = NULL;
|
||||
SNode* pNode = NULL;
|
||||
int8_t dummpy = 0;
|
||||
FOREACH(pNode, pNodeList) {
|
||||
SMultiTagUpateVal val = {0};
|
||||
pTagStmt = (SAlterTableStmt*)pNode;
|
||||
|
||||
SMultiTagUpateVal* p = taosHashGet(pUnique, pTagStmt->colName, strlen(pTagStmt->colName));
|
||||
if (p) {
|
||||
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_DUPLICATED_COLUMN);
|
||||
TAOS_CHECK_GOTO(code, NULL, _err);
|
||||
}
|
||||
|
||||
code = taosHashPut(pUnique, pTagStmt->colName, strlen(pTagStmt->colName), &dummpy, sizeof(dummpy));
|
||||
TAOS_CHECK_GOTO(code, NULL, _err);
|
||||
|
||||
code = buildUpdateTagValReqImpl2(pCxt, pTagStmt, pTableMeta, pTagStmt->colName, &val);
|
||||
TAOS_CHECK_GOTO(code, NULL, _err);
|
||||
|
||||
if (taosArrayPush(pReq->pMultiTag, &val) == NULL) {
|
||||
tfreeMultiTagUpateVal((void*)&val);
|
||||
TAOS_CHECK_GOTO(terrno, NULL, _err);
|
||||
}
|
||||
}
|
||||
}
|
||||
_err:
|
||||
taosHashCleanup(pUnique);
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t buildAddColReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta,
|
||||
SVAlterTbReq* pReq) {
|
||||
|
@ -15449,6 +15588,8 @@ static int32_t buildAlterTbReq(STranslateContext* pCxt, SAlterTableStmt* pStmt,
|
|||
case TSDB_ALTER_TABLE_UPDATE_TAG_NAME:
|
||||
case TSDB_ALTER_TABLE_UPDATE_TAG_BYTES:
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE);
|
||||
case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL:
|
||||
return buildUpdateMultiTagValReq(pCxt, pStmt, pTableMeta, pReq);
|
||||
case TSDB_ALTER_TABLE_UPDATE_TAG_VAL:
|
||||
return buildUpdateTagValReq(pCxt, pStmt, pTableMeta, pReq);
|
||||
case TSDB_ALTER_TABLE_ADD_COLUMN:
|
||||
|
@ -15550,6 +15691,10 @@ static void destoryAlterTbReq(SVAlterTbReq* pReq) {
|
|||
taosMemoryFreeClear(p->pData);
|
||||
}
|
||||
}
|
||||
if (pReq->action == TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL) {
|
||||
taosArrayDestroyEx(pReq->pMultiTag, tfreeMultiTagUpateVal);
|
||||
}
|
||||
|
||||
taosArrayDestroy(pReq->pTagArray);
|
||||
if (pReq->tagFree) tTagFree((STag*)pReq->pTagVal);
|
||||
}
|
||||
|
|
|
@ -4259,7 +4259,7 @@ typedef struct SLastRowScanOptSetColDataTypeCxt {
|
|||
int32_t code;
|
||||
} SLastRowScanOptSetColDataTypeCxt;
|
||||
|
||||
static EDealRes lastRowScanOptSetColDataType(SNode* pNode, void* pContext) {
|
||||
static EDealRes lastRowScanOptGetColAndSetDataType(SNode* pNode, void* pContext, bool setType) {
|
||||
if (QUERY_NODE_COLUMN == nodeType(pNode)) {
|
||||
SLastRowScanOptSetColDataTypeCxt* pCxt = pContext;
|
||||
if (pCxt->doAgg) {
|
||||
|
@ -4267,12 +4267,12 @@ static EDealRes lastRowScanOptSetColDataType(SNode* pNode, void* pContext) {
|
|||
if (TSDB_CODE_SUCCESS != pCxt->code) {
|
||||
return DEAL_RES_ERROR;
|
||||
}
|
||||
getLastCacheDataType(&(((SColumnNode*)pNode)->node.resType), pCxt->pkBytes);
|
||||
if (setType) getLastCacheDataType(&(((SColumnNode*)pNode)->node.resType), pCxt->pkBytes);
|
||||
} else {
|
||||
SNode* pCol = NULL;
|
||||
FOREACH(pCol, pCxt->pLastCols) {
|
||||
if (nodesEqualNode(pCol, pNode)) {
|
||||
getLastCacheDataType(&(((SColumnNode*)pNode)->node.resType), pCxt->pkBytes);
|
||||
if (setType) getLastCacheDataType(&(((SColumnNode*)pNode)->node.resType), pCxt->pkBytes);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -4282,6 +4282,14 @@ static EDealRes lastRowScanOptSetColDataType(SNode* pNode, void* pContext) {
|
|||
return DEAL_RES_CONTINUE;
|
||||
}
|
||||
|
||||
static EDealRes lastRowScanOptGetLastCols(SNode* pNode, void* pContext) {
|
||||
return lastRowScanOptGetColAndSetDataType(pNode, pContext, false);
|
||||
}
|
||||
|
||||
static EDealRes lastRowScanOptSetColDataType(SNode* pNode, void* pContext) {
|
||||
return lastRowScanOptGetColAndSetDataType(pNode, pContext, true);
|
||||
}
|
||||
|
||||
static void lastRowScanOptSetLastTargets(SNodeList* pTargets, SNodeList* pLastCols, SNodeList* pLastRowCols, bool erase, int32_t pkBytes) {
|
||||
SNode* pTarget = NULL;
|
||||
WHERE_EACH(pTarget, pTargets) {
|
||||
|
@ -4394,7 +4402,7 @@ static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogic
|
|||
SNode* pParamNode = NULL;
|
||||
if (FUNCTION_TYPE_LAST == funcType) {
|
||||
(void)nodesListErase(pFunc->pParameterList, nodesListGetCell(pFunc->pParameterList, 1));
|
||||
nodesWalkExpr(nodesListGetNode(pFunc->pParameterList, 0), lastRowScanOptSetColDataType, &cxt);
|
||||
nodesWalkExpr(nodesListGetNode(pFunc->pParameterList, 0), lastRowScanOptGetLastCols, &cxt);
|
||||
if (TSDB_CODE_SUCCESS != cxt.code) break;
|
||||
}
|
||||
FOREACH(pParamNode, pFunc->pParameterList) {
|
||||
|
|
|
@ -61,6 +61,10 @@ int32_t qwDbgValidateStatus(QW_FPARAMS_DEF, int8_t oriStatus, int8_t newStatus,
|
|||
|
||||
break;
|
||||
case JOB_TASK_STATUS_SUCC:
|
||||
if (newStatus == JOB_TASK_STATUS_PART_SUCC) {
|
||||
QW_TASK_DLOG("task status update from %s to %s", jobTaskStatusStr(oriStatus), jobTaskStatusStr(newStatus));
|
||||
return TSDB_CODE_QRY_TASK_SUCC_TO_PARTSUSS;
|
||||
}
|
||||
if (newStatus != JOB_TASK_STATUS_DROP && newStatus != JOB_TASK_STATUS_FAIL) {
|
||||
QW_ERR_JRET(TSDB_CODE_APP_ERROR);
|
||||
}
|
||||
|
|
|
@ -684,6 +684,10 @@ _return:
|
|||
|
||||
if (TSDB_CODE_SUCCESS == code && QW_PHASE_POST_QUERY == phase) {
|
||||
code = qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_PART_SUCC, ctx->dynamicTask);
|
||||
if (code == TSDB_CODE_QRY_TASK_SUCC_TO_PARTSUSS && ctx->queryRsped) {
|
||||
QW_TASK_DLOG("skip error: %s. ", tstrerror(code));
|
||||
code = TSDB_CODE_SUCCESS;
|
||||
}
|
||||
ctx->queryGotData = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -1294,7 +1294,7 @@ void DestroyRegexCache(){
|
|||
uInfo("[regex cache] destory regex cache");
|
||||
bool ret = taosTmrStopA(&sRegexCache.timer);
|
||||
if (!ret) {
|
||||
uError("failed to stop regex cache timer");
|
||||
uInfo("stop regex cache timer may be failed");
|
||||
}
|
||||
taosWLockLatch(&sRegexCache.mutex);
|
||||
sRegexCache.exit = true;
|
||||
|
|
|
@ -53,6 +53,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RPC_PORT_EADDRINUSE, "Port already in use")
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_BROKEN_LINK, "Conn is broken")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_TIMEOUT, "Conn read timeout")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED, "some vnode/qnode/mnode(s) out of service")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_SOMENODE_BROKEN_LINK, "some vnode/qnode/mnode(s) conn is broken")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_MAX_SESSIONS, "rpc open too many session")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_NETWORK_ERROR, "rpc network error")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_NETWORK_BUSY, "rpc network busy")
|
||||
|
@ -511,6 +512,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_FILTER_NOT_SUPPORT_TYPE, "Not supported range t
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_FILTER_WRONG_OPTR_TYPE, "Wrong operator type")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_FILTER_RANGE_ERROR, "Wrong filter range")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_FILTER_INVALID_TYPE, "Invalid filter type")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TASK_SUCC_TO_PARTSUSS, "Change task status from success to partial success")
|
||||
|
||||
// grant
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, "License expired")
|
||||
|
|
|
@ -1582,6 +1582,7 @@
|
|||
,,y,script,./test.sh -f tsim/tag/tinyint.sim
|
||||
,,y,script,./test.sh -f tsim/tag/drop_tag.sim
|
||||
,,y,script,./test.sh -f tsim/tag/tbNameIn.sim
|
||||
,,y,script,./test.sh -f tsim/tag/change_multi_tag.sim
|
||||
,,y,script,./test.sh -f tmp/monitor.sim
|
||||
,,y,script,./test.sh -f tsim/tagindex/add_index.sim
|
||||
,,n,script,./test.sh -f tsim/tagindex/sma_and_tag_index.sim
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
#Column Define
|
||||
#caseID,rerunTimes,Run with Sanitizer,casePath,caseCommand
|
||||
#NA,NA,y or n,script,./test.sh -f tsim/analytics/basic0.sim
|
||||
|
||||
#tdgpt-test
|
||||
,,n,script,./test.sh -f tsim/analytics/basic0.sim
|
||||
#,,n,system-test,python3 ./test.py -f 9-tdgpt/test_gpt.py
|
||||
|
|
@ -0,0 +1,159 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Color setting
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[1;32m'
|
||||
GREEN_DARK='\033[0;32m'
|
||||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
TDENGINE_DIR=/root/TDinternal/community
|
||||
|
||||
|
||||
#echo "TDENGINE_DIR = $TDENGINE_DIR"
|
||||
today=`date +"%Y%m%d"`
|
||||
TDENGINE_ALLCI_REPORT=$TDENGINE_DIR/tests/all-ci-report-$today.log
|
||||
|
||||
|
||||
function runCasesOneByOne () {
|
||||
while read -r line; do
|
||||
if [[ "$line" != "#"* ]]; then
|
||||
cmd=`echo $line | cut -d',' -f 5`
|
||||
if [[ "$2" == "sim" ]] && [[ $line == *"script"* ]]; then
|
||||
case=`echo $cmd | cut -d' ' -f 3`
|
||||
start_time=`date +%s`
|
||||
date +%F\ %T | tee -a $TDENGINE_ALLCI_REPORT && timeout 20m $cmd > /dev/null 2>&1 && \
|
||||
echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_ALLCI_REPORT \
|
||||
|| echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_ALLCI_REPORT
|
||||
end_time=`date +%s`
|
||||
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_ALLCI_REPORT
|
||||
if $case failed
|
||||
|
||||
elif [[ "$line" == *"$2"* ]]; then
|
||||
if [[ "$cmd" == *"pytest.sh"* ]]; then
|
||||
cmd=`echo $cmd | cut -d' ' -f 2-20`
|
||||
fi
|
||||
case=`echo $cmd | cut -d' ' -f 4-20`
|
||||
start_time=`date +%s`
|
||||
date +%F\ %T | tee -a $TDENGINE_ALLCI_REPORT && timeout 20m $cmd > /dev/null 2>&1 && \
|
||||
echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_ALLCI_REPORT || \
|
||||
echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_ALLCI_REPORT
|
||||
end_time=`date +%s`
|
||||
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_ALLCI_REPORT
|
||||
fi
|
||||
fi
|
||||
done < $1
|
||||
}
|
||||
|
||||
function runUnitTest() {
|
||||
echo "=== Run unit test case ==="
|
||||
echo " $TDENGINE_DIR/debug"
|
||||
cd $TDENGINE_DIR/debug
|
||||
ctest -j12
|
||||
echo "3.0 unit test done"
|
||||
}
|
||||
|
||||
function runSimCases() {
|
||||
echo "=== Run sim cases ==="
|
||||
|
||||
cd $TDENGINE_DIR/tests/script
|
||||
runCasesOneByOne $TDENGINE_DIR/tests/parallel_test/cases-test.task sim
|
||||
|
||||
totalSuccess=`grep 'sim success' $TDENGINE_ALLCI_REPORT | wc -l`
|
||||
if [ "$totalSuccess" -gt "0" ]; then
|
||||
echo "### Total $totalSuccess SIM test case(s) succeed! ###" | tee -a $TDENGINE_ALLCI_REPORT
|
||||
fi
|
||||
|
||||
totalFailed=`grep 'sim failed\|fault' $TDENGINE_ALLCI_REPORT | wc -l`
|
||||
if [ "$totalFailed" -ne "0" ]; then
|
||||
echo "### Total $totalFailed SIM test case(s) failed! ###" | tee -a $TDENGINE_ALLCI_REPORT
|
||||
fi
|
||||
}
|
||||
|
||||
function runPythonCases() {
|
||||
echo "=== Run python cases ==="
|
||||
|
||||
cd $TDENGINE_DIR/tests/parallel_test
|
||||
sed -i '/compatibility.py/d' cases-test.task
|
||||
|
||||
# army
|
||||
cd $TDENGINE_DIR/tests/army
|
||||
runCasesOneByOne ../parallel_test/cases-test.task army
|
||||
|
||||
# system-test
|
||||
cd $TDENGINE_DIR/tests/system-test
|
||||
runCasesOneByOne ../parallel_test/cases-test.task system-test
|
||||
|
||||
# develop-test
|
||||
cd $TDENGINE_DIR/tests/develop-test
|
||||
runCasesOneByOne ../parallel_test/cases-test.task develop-test
|
||||
|
||||
totalSuccess=`grep 'py success' $TDENGINE_ALLCI_REPORT | wc -l`
|
||||
if [ "$totalSuccess" -gt "0" ]; then
|
||||
echo "### Total $totalSuccess python test case(s) succeed! ###" | tee -a $TDENGINE_ALLCI_REPORT
|
||||
fi
|
||||
|
||||
totalFailed=`grep 'py failed\|fault' $TDENGINE_ALLCI_REPORT | wc -l`
|
||||
if [ "$totalFailed" -ne "0" ]; then
|
||||
echo "### Total $totalFailed python test case(s) failed! ###" | tee -a $TDENGINE_ALLCI_REPORT
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function runTest() {
|
||||
echo "run Test"
|
||||
|
||||
cd $TDENGINE_DIR
|
||||
[ -d sim ] && rm -rf sim
|
||||
[ -f $TDENGINE_ALLCI_REPORT ] && rm $TDENGINE_ALLCI_REPORT
|
||||
|
||||
runUnitTest
|
||||
runSimCases
|
||||
runPythonCases
|
||||
|
||||
stopTaosd
|
||||
cd $TDENGINE_DIR/tests/script
|
||||
find . -name '*.sql' | xargs rm -f
|
||||
|
||||
cd $TDENGINE_DIR/tests/pytest
|
||||
find . -name '*.sql' | xargs rm -f
|
||||
}
|
||||
|
||||
function stopTaosd {
|
||||
echo "Stop taosd start"
|
||||
systemctl stop taosd
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
pkill -TERM -x taosd
|
||||
sleep 1
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
echo "Stop tasod end"
|
||||
}
|
||||
|
||||
function stopTaosadapter {
|
||||
echo "Stop taosadapter"
|
||||
systemctl stop taosadapter.service
|
||||
PID=`ps -ef|grep -w taosadapter | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
pkill -TERM -x taosadapter
|
||||
sleep 1
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
echo "Stop tasoadapter end"
|
||||
|
||||
}
|
||||
|
||||
WORK_DIR=/root/
|
||||
|
||||
date >> $WORK_DIR/date.log
|
||||
echo "Run ALL CI Test Cases" | tee -a $WORK_DIR/date.log
|
||||
|
||||
stopTaosd
|
||||
|
||||
runTest
|
||||
|
||||
date >> $WORK_DIR/date.log
|
||||
echo "End of CI Test Cases" | tee -a $WORK_DIR/date.log
|
|
@ -4,7 +4,7 @@ system sh/exec.sh -n dnode1 -s start
|
|||
sql connect
|
||||
|
||||
print =============== create anode
|
||||
sql create anode '127.0.0.1:6090'
|
||||
sql create anode '192.168.1.116:6050'
|
||||
|
||||
sql show anodes
|
||||
if $rows != 1 then
|
||||
|
|
|
@ -108,6 +108,8 @@ if $data30 != 12 then
|
|||
endi
|
||||
|
||||
print =============== show vnodes on dnode 1
|
||||
print =============== Wait for the synchronization status of vnode and Mnode, heartbeat for one second
|
||||
sleep 1000
|
||||
sql show vnodes on dnode 1
|
||||
if $rows != 9 then
|
||||
return -1
|
||||
|
|
|
@ -76,11 +76,11 @@ if $data00 != @-> Data Exchange 2:1 (width=296)@ then
|
|||
return -1
|
||||
endi
|
||||
sql explain select count(*), last_row(f1), min(f1),t1 from sta partition by t1;
|
||||
if $data00 != @-> Aggragate (functions=4 width=28 input_order=desc )@ then
|
||||
if $data00 != @-> Aggregate (functions=4 width=28 input_order=desc )@ then
|
||||
return -1
|
||||
endi
|
||||
sql explain select count(*), last_row(f1), min(f1),t1 from sta group by t1;
|
||||
if $data00 != @-> Aggragate (functions=4 width=28 input_order=desc )@ then
|
||||
if $data00 != @-> Aggregate (functions=4 width=28 input_order=desc )@ then
|
||||
return -1
|
||||
endi
|
||||
sql explain select distinct count(*), last_row(f1), min(f1) from sta;
|
||||
|
|
|
@ -0,0 +1,226 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
print ======================== dnode1 start
|
||||
|
||||
$dbPrefix = ta_ad_db
|
||||
$tbPrefix = ta_ad_tb
|
||||
$mtPrefix = ta_ad_mt
|
||||
$tbNum = 10
|
||||
$rowNum = 20
|
||||
$totalNum = 200
|
||||
|
||||
print =============== step1
|
||||
$i = 0
|
||||
$db = $dbPrefix . $i
|
||||
|
||||
sql create database $db
|
||||
sql use $db
|
||||
|
||||
print =============== step2
|
||||
$j = 3
|
||||
$i = 2
|
||||
$mt = $mtPrefix . $i
|
||||
$tb = $tbPrefix . $i
|
||||
$tbj = $tbPrefix . $j
|
||||
$ntable = tb_normal_table
|
||||
|
||||
sql create table $mt (ts timestamp, tbcol int) TAGS(tagCol1 bool, tagCol2 tinyint, tagCol3 smallint, tagCol4 int, tagCol5 bigint, tagCol6 nchar(10), tagCol7 binary(8))
|
||||
sql create table $tb using $mt tags( 1, 2, 3, 5,7, "test", "test")
|
||||
sql create table $tbj using $mt tags( 2, 3, 4, 6,8, "testj", "testj")
|
||||
sql create table $ntable (ts timestamp, f int)
|
||||
|
||||
sql insert into $tb values(now, 1)
|
||||
sql insert into $tb values(now, 1)
|
||||
|
||||
# invalid sql
|
||||
sql_error alter table $mt set tag tgcol1 = 1,
|
||||
sql_error alter table $mt set tag ,
|
||||
sql_error alter table $mt set tag tgcol1=10,tagcol2=
|
||||
#set tag value on supertable
|
||||
sql_error alter table $mt set tag tgcol1 = 1,tagcol2 = 2, tag3 = 4
|
||||
#set normal table value
|
||||
sql_error alter table $ntable set tag f = 10
|
||||
# duplicate tag name
|
||||
sql_error alter table $tbj set tag tagCol1=1,tagCol1 = 2
|
||||
sql_error alter table $tbj set tag tagCol1=1,tagCol5=10, tagCol5=3
|
||||
# not exist tag
|
||||
sql_error alter table $tbj set tag tagNotExist = 1,tagCol1 = 2
|
||||
sql_error alter table $tbj set tag tagCol1 = 2, tagNotExist = 1
|
||||
sql_error alter table $tbj set tagNotExist = 1
|
||||
sql_error alter table $tbj set tagNotExist = NULL,
|
||||
sql_error alter table $tbj set tag tagCol1 = 1, tagCol5="xxxxxxxxxxxxxxxx"
|
||||
# invalid tag value
|
||||
sql_error alter table $tbj set tag tagCol1 = 1, tagCol5="xxxxxxxxxxxxxxxx", tagCol7="yyyyyyyyyyyyyyyyyyyyyyyyy"
|
||||
# invalid data type
|
||||
|
||||
# escape
|
||||
sql_error alter table $tbj set tag `tagCol1`=true
|
||||
sql_error alter table $tbj set tag `tagCol1`=true,`tagCol2`=1,`tagNotExist`=10
|
||||
sql_error alter table $tbj set tag `tagCol1`=true,`tagCol2`=1,tagcol1=true
|
||||
|
||||
sql alter table $tbj set tag tagCol1 = 100, tagCol2 = 100
|
||||
|
||||
sql select * from $mt where tagCol2 = 100
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from $mt where tagCol1 = 1
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter table $tbj set tag tagCol1=true,tagCol2=-1,tagcol3=-10, tagcol4=-100,tagcol5=-1000,tagCol6="empty",tagCol7="empty1"
|
||||
sql alter table $tb set tag tagCol1=0
|
||||
|
||||
sql select * from $mt where tagCol1 = true
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from $mt where tagCol2 = -1
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from $mt where tagCol3 = -10
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from $mt where tagCol4 = -100
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from $mt where tagCol5 = -1000
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
||||
sql select * from $mt where tagCol6 = "empty"
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from $mt where tagCol6 = "empty1"
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql insert into $tbj values (now, 1)
|
||||
|
||||
sql select * from $mt where tagCol1 = true
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from $mt where tagCol2 = -1
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from $mt where tagCol3 = -10
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from $mt where tagCol4 = -100
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from $mt where tagCol5 = -1000
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
||||
sql select * from $mt where tagCol6 = "empty"
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from $mt where tagCol7 = "empty1"
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter table $tbj set tag tagCol1=true
|
||||
sql alter table $tb set tag tagCol1=true
|
||||
|
||||
sql select * from $mt where tagCol1 = true
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter table $tb set tag tagCol1=false
|
||||
|
||||
sql alter table $tbj set tag tagCol1=true,tagCol2=-10,tagcol3=-100, tagcol4=-1000,tagcol5=-10000,tagCol6="empty1",tagCol7="empty2"
|
||||
|
||||
sql select * from $mt where tagCol1 = true
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from $mt where tagCol2 = -10
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from $mt where tagCol3 = -100
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from $mt where tagCol4 = -1000
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from $mt where tagCol5 = -10000
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
||||
sql select * from $mt where tagCol6 = "empty1"
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from $mt where tagCol7 = "empty2"
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql alter table $tbj set tag tagCol1=true,tagCol2=-10,tagcol3=-100, tagcol4=-1000,tagcol5=NULL,tagCol6=NULL,tagCol7=NULL
|
||||
sql alter table $tbj set tag `tagcol1`=true,`tagcol2`=-10,`tagcol3`=-100, `tagcol4`=-1000,`tagcol5`=NULL,`tagcol6`=NULL,`tagcol7`=NULL
|
||||
|
||||
sql alter table $mt drop tag tagCol7
|
||||
sql alter table $mt drop tag tagCol3
|
||||
|
||||
sql alter table $mt add tag tagCol8 int
|
||||
|
||||
#set not exist tag and value
|
||||
sql_error alter table $tbj set tag tagCol1=true,tagCol2=-10,tagcol3=-100, tagcol4=-1000,tagcol5=NULL,tagCol6=NULL,tagCol7=NULL
|
||||
sql_error alter table $tbj set tag tagCol1=true,tagCol2=-10,tagcol3=-100, tagcol4=-1000,tagcol5=NULL,tagCol6=NULL,tagCol7=NULL
|
||||
|
||||
sql alter table $tbj set tag tagCol8 = 8
|
||||
|
||||
sql select * from $mt where tagCol4 = -1000
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from $mt where tagCol8 = 8
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|
@ -145,7 +145,6 @@ class TDTestCase:
|
|||
'select * from ``',
|
||||
'alter table meters add column `` int',
|
||||
'alter table meters drop column ``',
|
||||
'alter table t0 set tag `` = ""',
|
||||
'alter stable meters add tag `` int',
|
||||
'alter stable meters rename tag cc ``',
|
||||
'alter stable meters drop tag ``',
|
||||
|
|
|
@ -14,7 +14,7 @@ sys.path.append("./7-tmq")
|
|||
from tmqCommon import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'clientCfg': {'smlChildTableName': 'dataModelName', 'fqdn': 'localhost', 'smlTsDefaultName': "times"}, 'fqdn': 'localhost'}
|
||||
updatecfgDict = {'clientCfg': {'smlChildTableName': 'dataModelName', 'fqdn': 'localhost'}, 'fqdn': 'localhost'}
|
||||
print("===================: ", updatecfgDict)
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
|
@ -58,7 +58,7 @@ class TDTestCase:
|
|||
tdSql.query(f"select distinct tbname from {dbname}.readings")
|
||||
tdSql.checkRows(4)
|
||||
|
||||
tdSql.query(f"select * from {dbname}.t_0799064f5487946e5d22164a822acfc8 order by times")
|
||||
tdSql.query(f"select * from {dbname}.t_0799064f5487946e5d22164a822acfc8 order by _ts")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 3, "kk")
|
||||
tdSql.checkData(1, 3, "")
|
||||
|
@ -67,7 +67,7 @@ class TDTestCase:
|
|||
tdSql.query(f"select distinct tbname from {dbname}.`sys_if_bytes_out`")
|
||||
tdSql.checkRows(2)
|
||||
|
||||
tdSql.query(f"select * from {dbname}.t_f67972b49aa8adf8bca5d0d54f0d850d order by times")
|
||||
tdSql.query(f"select * from {dbname}.t_f67972b49aa8adf8bca5d0d54f0d850d order by _ts")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 1.300000000)
|
||||
tdSql.checkData(1, 1, 13.000000000)
|
||||
|
@ -80,7 +80,7 @@ class TDTestCase:
|
|||
tdSql.query(f"select distinct tbname from {dbname}.`sys_cpu_nice`")
|
||||
tdSql.checkRows(3)
|
||||
|
||||
tdSql.query(f"select * from {dbname}.`sys_cpu_nice` order by times")
|
||||
tdSql.query(f"select * from {dbname}.`sys_cpu_nice` order by _ts")
|
||||
tdSql.checkRows(4)
|
||||
tdSql.checkData(0, 1, 13.000000000)
|
||||
tdSql.checkData(0, 2, "web01")
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.common import *
|
||||
import taos
|
||||
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
clientCfgDict = {'debugFlag': 135}
|
||||
updatecfgDict = {
|
||||
"debugFlag" : "135",
|
||||
"queryBufferSize" : 10240,
|
||||
'clientCfg' : clientCfgDict
|
||||
}
|
||||
|
||||
def init(self, conn, logSql, replicaVal=1):
|
||||
self.replicaVar = int(replicaVal)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.conn = conn
|
||||
tdSql.init(conn.cursor(), False)
|
||||
self.passwd = {'root':'taosdata',
|
||||
'test':'test'}
|
||||
|
||||
def prepare_anode_data(self):
|
||||
tdSql.execute(f"create anode '127.0.0.1:6090'")
|
||||
tdSql.execute(f"create database db_gpt")
|
||||
tdSql.execute(f"create table if not exists db_gpt.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);")
|
||||
tdSql.execute(f"create table db_gpt.ct1 using db_gpt.stb tags(1000);")
|
||||
tdSql.execute(f"insert into db_gpt.ct1(ts, c1) values(now-1a, 5)(now+1a, 14)(now+2a, 15)(now+3a, 15)(now+4a, 14);")
|
||||
tdSql.execute(f"insert into db_gpt.ct1(ts, c1) values(now+5a, 19)(now+6a, 17)(now+7a, 16)(now+8a, 20)(now+9a, 22);")
|
||||
tdSql.execute(f"insert into db_gpt.ct1(ts, c1) values(now+10a, 8)(now+11a, 21)(now+12a, 28)(now+13a, 11)(now+14a, 9);")
|
||||
tdSql.execute(f"insert into db_gpt.ct1(ts, c1) values(now+15a, 29)(now+16a, 40);")
|
||||
|
||||
|
||||
def test_forecast(self):
|
||||
"""
|
||||
Test forecast
|
||||
"""
|
||||
tdLog.info(f"Test forecast")
|
||||
tdSql.query(f"SELECT _frowts, FORECAST(c1, \"algo=arima,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5,d=1\") from db_gpt.ct1 ;")
|
||||
tdSql.checkRows(10)
|
||||
|
||||
def test_anomaly_window(self):
|
||||
"""
|
||||
Test anomaly window
|
||||
"""
|
||||
tdLog.info(f"Test anomaly window")
|
||||
tdSql.query(f"SELECT _wstart, _wend, SUM(c1) FROM db_gpt.ct1 ANOMALY_WINDOW(c1, \"algo=iqr\");")
|
||||
tdSql.checkData(0,2,40)
|
||||
|
||||
|
||||
def run(self):
|
||||
self.prepare_anode_data()
|
||||
self.test_forecast()
|
||||
self.test_anomaly_window()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
|
||||
|
||||
|
||||
|
|
@ -2253,6 +2253,83 @@ int sml_ts5528_test(){
|
|||
return 0;
|
||||
}
|
||||
|
||||
int sml_td33048_Test() {
|
||||
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
|
||||
TAOS_RES *pRes = taos_query(taos, "drop database if exists td33048");
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(taos, "create database if not exists td33048");
|
||||
taos_free_result(pRes);
|
||||
|
||||
// check column name duplication
|
||||
const char *sql[] = {
|
||||
"alarm_record,tag=alarm_record uid=\"3+8001+c939604c\",deviceId=\"3\",alarmId=\"8001\",alarmStatus=\"false\",lotNo=\"2411A0302\",subMode=\"11\",occurTime=\"2024-11-25 09:31:52.702\" 1732527117484",
|
||||
};
|
||||
pRes = taos_query(taos, "use td33048");
|
||||
taos_free_result(pRes);
|
||||
pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL,
|
||||
TSDB_SML_TIMESTAMP_MILLI_SECONDS);
|
||||
int code = taos_errno(pRes);
|
||||
printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes));
|
||||
ASSERT(code == 0);
|
||||
taos_free_result(pRes);
|
||||
|
||||
// check tag name duplication
|
||||
const char *sql1[] = {
|
||||
"alarm_record,tag=alarm_record uid=\"2+100012+303fe9b5\",deviceId=\"2\",alarmId=\"100012\",alarmStatus=\"false\",lotNo=\"2411A0202\",subMode=\"11\",occurTime=\"2024-11-25 09:31:55.591\" 1732527119493",
|
||||
};
|
||||
pRes = taos_schemaless_insert(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_LINE_PROTOCOL,
|
||||
TSDB_SML_TIMESTAMP_MILLI_SECONDS);
|
||||
code = taos_errno(pRes);
|
||||
printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes));
|
||||
ASSERT(code == 0);
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(taos, "select * from alarm_record");
|
||||
code = taos_errno(pRes);
|
||||
printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes));
|
||||
ASSERT(code == 0);
|
||||
taos_free_result(pRes);
|
||||
|
||||
taos_close(taos);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int sml_td17324_Test() {
|
||||
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
|
||||
TAOS_RES *pRes = taos_query(taos, "drop database if exists gcbacaefqk");
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(taos, "create database if not exists gcbacaefqk PRECISION 'ns'");
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(taos, "use gcbacaefqk");
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(taos, "create stable gcbacaefqk.test_stb(_ts timestamp, f int) tags(t1 bigint)");
|
||||
taos_free_result(pRes);
|
||||
// check column name duplication
|
||||
const char *sql[] = {
|
||||
"st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1732700000364000000",
|
||||
"st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1732700000361000000",
|
||||
"test_stb,t2=5f64,t3=L\"ste\" c1=true,c2=4i64,c3=\"iam\" 1732700000364316532"
|
||||
};
|
||||
|
||||
pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL,
|
||||
TSDB_SML_TIMESTAMP_NANO_SECONDS);
|
||||
int code = taos_errno(pRes);
|
||||
printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes));
|
||||
ASSERT(code == 0);
|
||||
taos_free_result(pRes);
|
||||
|
||||
taos_close(taos);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
if (argc == 2) {
|
||||
taos_options(TSDB_OPTION_CONFIGDIR, argv[1]);
|
||||
|
@ -2262,6 +2339,10 @@ int main(int argc, char *argv[]) {
|
|||
ASSERT(!ret);
|
||||
ret = sml_ts5528_test();
|
||||
ASSERT(!ret);
|
||||
ret = sml_td33048_Test();
|
||||
ASSERT(!ret);
|
||||
ret = sml_td17324_Test();
|
||||
ASSERT(!ret);
|
||||
ret = sml_td29691_Test();
|
||||
ASSERT(ret);
|
||||
ret = sml_td29373_Test();
|
||||
|
|
Loading…
Reference in New Issue