Merge branch '3.0' into feature/TD-14481-3.0
This commit is contained in:
commit
65deb0541f
|
@ -31,38 +31,41 @@ TDengine currently supports Grafana versions 7.5 and above. Users can go to the
|
|||
### Install Grafana Plugin and Configure Data Source
|
||||
|
||||
<Tabs defaultValue="script">
|
||||
<TabItem value="script" label="Using Script">
|
||||
<TabItem value="gui" label="With GUI">
|
||||
|
||||
Set the url and authorization environment variables by `export` or a [`.env`(dotenv) file](https://hexdocs.pm/dotenvy/dotenv-file-format.html):
|
||||
Under Grafana 8, plugin catalog allows you to [browse and manage plugins within Grafana](https://grafana.com/docs/grafana/next/administration/plugin-management/#plugin-catalog) (but for Grafana 7.x, use **With Script** or **Install & Configure Manually**). Find the page at **Configurations > Plugins**, search **TDengine** and click it to install.
|
||||
|
||||
```sh
|
||||
export TDENGINE_API=http://tdengine.local:6041
|
||||
# user + password
|
||||
export TDENGINE_USER=user
|
||||
export TDENGINE_PASSWORD=password
|
||||

|
||||
|
||||
# Other useful variables
|
||||
# - If to install TDengine data source, default is true
|
||||
export TDENGINE_DS_ENABLED=false
|
||||
# - Data source name to be created, default is TDengine
|
||||
export TDENGINE_DS_NAME=TDengine
|
||||
# - Data source organization id, default is 1
|
||||
export GF_ORG_ID=1
|
||||
# - Data source is editable in admin ui or not, default is 0 (false)
|
||||
export TDENGINE_EDITABLE=1
|
||||
```
|
||||
Installation may cost some minutes, then you can **Create a TDengine data source**:
|
||||
|
||||
Run `install.sh`:
|
||||

|
||||
|
||||
```sh
|
||||
bash -c "$(curl -fsSL https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)"
|
||||
```
|
||||
Then you can add a TDengine data source by filling up the configuration options.
|
||||
|
||||
With this script, TDengine data source plugin and the Grafana data source will be installed and created automatically with Grafana provisioning configurations. Save the script and type `./install.sh --help` for the full usage of the script.
|
||||

|
||||
|
||||
And then, restart Grafana service and open Grafana in web-browser, usually <http://localhost:3000>.
|
||||
You can create dashboards with TDengine now.
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="script" label="With Script">
|
||||
|
||||
On a server with Grafana installed, run `install.sh` with TDengine url and username/passwords will install TDengine data source plugin and add a data source named TDengine. This is the recommended way for Grafana 7.x or [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) users.
|
||||
|
||||
```sh
|
||||
bash -c "$(curl -fsSL \
|
||||
https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -- \
|
||||
-a http://localhost:6041 \
|
||||
-u root \
|
||||
-p taosdata
|
||||
```
|
||||
|
||||
Restart Grafana service and open Grafana in web-browser, usually <http://localhost:3000>.
|
||||
|
||||
Save the script and type `./install.sh --help` for the full usage of the script.
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="manual" label="Install & Configure Manually">
|
||||
|
||||
Follow the installation steps in [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) with the [``grafana-cli`` command-line tool](https://grafana.com/docs/grafana/latest/administration/cli/) for plugin installation.
|
||||
|
@ -115,6 +118,73 @@ Click `Save & Test` to test. You should see a success message if the test worked
|
|||
|
||||

|
||||
|
||||
</TabItem>
|
||||
<TabItem value="container" label="Container">
|
||||
|
||||
Please refer to [Install plugins in the Docker container](https://grafana.com/docs/grafana/next/setup-grafana/installation/docker/#install-plugins-in-the-docker-container). This will install `tdengine-datasource` plugin when Grafana container starts:
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
-p 3000:3000 \
|
||||
--name=grafana \
|
||||
-e "GF_INSTALL_PLUGINS=tdengine-datasource" \
|
||||
grafana/grafana
|
||||
```
|
||||
|
||||
You can setup a zero-configuration stack for TDengine + Grafana by [docker-compose](https://docs.docker.com/compose/) and [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) file:
|
||||
|
||||
1. Save the provisioning configuration file to `tdengine.yml`.
|
||||
|
||||
```yml
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: TDengine
|
||||
type: tdengine-datasource
|
||||
orgId: 1
|
||||
url: "$TDENGINE_API"
|
||||
isDefault: true
|
||||
secureJsonData:
|
||||
url: "$TDENGINE_URL"
|
||||
basicAuth: "$TDENGINE_BASIC_AUTH"
|
||||
token: "$TDENGINE_CLOUD_TOKEN"
|
||||
version: 1
|
||||
editable: true
|
||||
```
|
||||
|
||||
2. Write `docker-compose.yml` with [TDengine](https://hub.docker.com/r/tdengine/tdengine) and [Grafana](https://hub.docker.com/r/grafana/grafana) image.
|
||||
|
||||
```yml
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
tdengine:
|
||||
image: tdengine/tdengine:2.6.0.2
|
||||
environment:
|
||||
TAOS_FQDN: tdengine
|
||||
volumes:
|
||||
- tdengine-data:/var/lib/taos/
|
||||
grafana:
|
||||
image: grafana/grafana:8.5.6
|
||||
volumes:
|
||||
- ./tdengine.yml/:/etc/grafana/provisioning/tdengine.yml
|
||||
- grafana-data:/var/lib/grafana
|
||||
environment:
|
||||
# install tdengine plugin at start
|
||||
GF_INSTALL_PLUGINS: "tdengine-datasource"
|
||||
TDENGINE_URL: "http://tdengine:6041"
|
||||
#printf "$TDENGINE_USER:$TDENGINE_PASSWORD" | base64
|
||||
TDENGINE_BASIC_AUTH: "cm9vdDp0YmFzZTEyNQ=="
|
||||
ports:
|
||||
- 3000:3000
|
||||
volumes:
|
||||
grafana-data:
|
||||
tdengine-data:
|
||||
```
|
||||
|
||||
3. Start TDengine and Grafana services: `docker-compose up -d`.
|
||||
|
||||
Open Grafana <http://localhost:3000>, and you can add dashboard with TDengine now.
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 28 KiB |
Binary file not shown.
After Width: | Height: | Size: 87 KiB |
Binary file not shown.
After Width: | Height: | Size: 34 KiB |
|
@ -29,39 +29,41 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/
|
|||
### 安装 Grafana Plugin 并配置数据源
|
||||
|
||||
<Tabs defaultValue="script">
|
||||
<TabItem value="gui" label="图形化界面安装">
|
||||
|
||||
使用 Grafana 最新版本(8.5+),您可以在 Grafana 中[浏览和管理插件](https://grafana.com/docs/grafana/next/administration/plugin-management/#plugin-catalog)(对于 7.x 版本,请使用 **安装脚本** 或 **手动安装并配置** 方式)。在 Grafana 管理界面中的 **Configurations > Plugins** 页面直接搜索并按照提示安装 TDengine。
|
||||
|
||||

|
||||
|
||||
如图示即安装完毕,按照指示 **Create a TDengine data source** 添加数据源。
|
||||
|
||||

|
||||
|
||||
输入 TDengine 相关配置,完成数据源配置。
|
||||
|
||||

|
||||
|
||||
配置完毕,现在可以使用 TDengine 创建 Dashboard 了。
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="script" label="使用安装脚本">
|
||||
|
||||
将集群信息设置为环境变量;也可以使用 `.env` 文件,请参考 [dotenv](https://hexdocs.pm/dotenvy/dotenv-file-format.html):
|
||||
对于使用 Grafana 7.x 版本或使用 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置的用户,可以在 Grafana 服务器上使用安装脚本自动安装插件即添加数据源 Provisioning 配置文件。
|
||||
|
||||
```sh
|
||||
export TDENGINE_API=http://tdengine.local:6041
|
||||
# user + password
|
||||
export TDENGINE_USER=user
|
||||
export TDENGINE_PASSWORD=password
|
||||
|
||||
# 其他环境变量:
|
||||
# - 是否安装数据源,默认为 true,表示安装
|
||||
export TDENGINE_DS_ENABLED=false
|
||||
# - 数据源名称,默认为 TDengine
|
||||
export TDENGINE_DS_NAME=TDengine
|
||||
# - 数据源所属组织 ID,默认为 1
|
||||
export GF_ORG_ID=1
|
||||
# - 数据源是否可通过管理面板编辑,默认为 0,表示不可编辑
|
||||
export TDENGINE_EDITABLE=1
|
||||
bash -c "$(curl -fsSL \
|
||||
https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -- \
|
||||
-a http://localhost:6041 \
|
||||
-u root \
|
||||
-p taosdata
|
||||
```
|
||||
|
||||
运行安装脚本:
|
||||
|
||||
```sh
|
||||
bash -c "$(curl -fsSL https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)"
|
||||
```
|
||||
|
||||
该脚本将自动安装 Grafana 插件并配置数据源。安装完毕后,需要重启 Grafana 服务后生效。
|
||||
安装完毕后,需要重启 Grafana 服务后方可生效。
|
||||
|
||||
保存该脚本并执行 `./install.sh --help` 可查看详细帮助文档。
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="manual" label="手动安装并配置">
|
||||
<TabItem value="manual" label="手动安装">
|
||||
|
||||
使用 [`grafana-cli` 命令行工具](https://grafana.com/docs/grafana/latest/administration/cli/) 进行插件[安装](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation)。
|
||||
|
||||
|
@ -113,6 +115,73 @@ GF_INSTALL_PLUGINS=tdengine-datasource
|
|||
|
||||

|
||||
|
||||
</TabItem>
|
||||
<TabItem value="container" label="K8s/Docker 容器">
|
||||
|
||||
参考 [Grafana 容器化安装说明](https://grafana.com/docs/grafana/next/setup-grafana/installation/docker/#install-plugins-in-the-docker-container)。使用如下命令启动一个容器,并自动安装 TDengine 插件:
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
-p 3000:3000 \
|
||||
--name=grafana \
|
||||
-e "GF_INSTALL_PLUGINS=tdengine-datasource" \
|
||||
grafana/grafana
|
||||
```
|
||||
|
||||
使用 docker-compose,配置 Grafana Provisioning 自动化配置,体验 TDengine + Grafana 组合的零配置启动:
|
||||
|
||||
1. 保存该文件为 `tdengine.yml`。
|
||||
|
||||
```yml
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: TDengine
|
||||
type: tdengine-datasource
|
||||
orgId: 1
|
||||
url: "$TDENGINE_API"
|
||||
isDefault: true
|
||||
secureJsonData:
|
||||
url: "$TDENGINE_URL"
|
||||
basicAuth: "$TDENGINE_BASIC_AUTH"
|
||||
token: "$TDENGINE_CLOUD_TOKEN"
|
||||
version: 1
|
||||
editable: true
|
||||
```
|
||||
|
||||
2. 保存该文件为 `docker-compose.yml`。
|
||||
|
||||
```yml
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
tdengine:
|
||||
image: tdengine/tdengine:2.6.0.2
|
||||
environment:
|
||||
TAOS_FQDN: tdengine
|
||||
volumes:
|
||||
- tdengine-data:/var/lib/taos/
|
||||
grafana:
|
||||
image: grafana/grafana:8.5.6
|
||||
volumes:
|
||||
- ./tdengine.yml/:/etc/grafana/provisioning/tdengine.yml
|
||||
- grafana-data:/var/lib/grafana
|
||||
environment:
|
||||
# install tdengine plugin at start
|
||||
GF_INSTALL_PLUGINS: "tdengine-datasource"
|
||||
TDENGINE_URL: "http://tdengine:6041"
|
||||
#printf "$TDENGINE_USER:$TDENGINE_PASSWORD" | base64
|
||||
TDENGINE_BASIC_AUTH: "cm9vdDp0YmFzZTEyNQ=="
|
||||
ports:
|
||||
- 3000:3000
|
||||
volumes:
|
||||
grafana-data:
|
||||
tdengine-data:
|
||||
```
|
||||
|
||||
3. 使用 docker-compose 命令启动 TDengine + Grafana :`docker-compose up -d`。
|
||||
|
||||
打开 Grafana <http://localhost:3000>,现在可以添加 Dashboard 了。
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 28 KiB |
Binary file not shown.
After Width: | Height: | Size: 87 KiB |
Binary file not shown.
After Width: | Height: | Size: 34 KiB |
|
@ -128,7 +128,7 @@ typedef struct STscObj {
|
|||
int8_t connType;
|
||||
int32_t acctId;
|
||||
uint32_t connId;
|
||||
uint64_t id; // ref ID returned by taosAddRef
|
||||
TAOS *id; // ref ID returned by taosAddRef
|
||||
TdThreadMutex mutex; // used to protect the operation on db
|
||||
int32_t numOfReqs; // number of sqlObj bound to this connection
|
||||
SAppInstInfo* pAppInfo;
|
||||
|
|
|
@ -38,7 +38,7 @@ static TdThreadOnce tscinit = PTHREAD_ONCE_INIT;
|
|||
volatile int32_t tscInitRes = 0;
|
||||
|
||||
static void registerRequest(SRequestObj *pRequest) {
|
||||
STscObj *pTscObj = acquireTscObj(pRequest->pTscObj->id);
|
||||
STscObj *pTscObj = acquireTscObj(*(int64_t*)pRequest->pTscObj->id);
|
||||
|
||||
assert(pTscObj != NULL);
|
||||
|
||||
|
@ -54,7 +54,7 @@ static void registerRequest(SRequestObj *pRequest) {
|
|||
int32_t currentInst = atomic_add_fetch_64((int64_t *)&pSummary->currentRequests, 1);
|
||||
tscDebug("0x%" PRIx64 " new Request from connObj:0x%" PRIx64
|
||||
", current:%d, app current:%d, total:%d, reqId:0x%" PRIx64,
|
||||
pRequest->self, pRequest->pTscObj->id, num, currentInst, total, pRequest->requestId);
|
||||
pRequest->self, *(int64_t*)pRequest->pTscObj->id, num, currentInst, total, pRequest->requestId);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -70,8 +70,8 @@ static void deregisterRequest(SRequestObj *pRequest) {
|
|||
int64_t duration = taosGetTimestampUs() - pRequest->metric.start;
|
||||
tscDebug("0x%" PRIx64 " free Request from connObj: 0x%" PRIx64 ", reqId:0x%" PRIx64 " elapsed:%" PRIu64
|
||||
" ms, current:%d, app current:%d",
|
||||
pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000, num, currentInst);
|
||||
releaseTscObj(pTscObj->id);
|
||||
pRequest->self, *(int64_t*)pTscObj->id, pRequest->requestId, duration / 1000, num, currentInst);
|
||||
releaseTscObj(*(int64_t*)pTscObj->id);
|
||||
}
|
||||
|
||||
// todo close the transporter properly
|
||||
|
@ -80,7 +80,7 @@ void closeTransporter(STscObj *pTscObj) {
|
|||
return;
|
||||
}
|
||||
|
||||
tscDebug("free transporter:%p in connObj: 0x%" PRIx64, pTscObj->pAppInfo->pTransporter, pTscObj->id);
|
||||
tscDebug("free transporter:%p in connObj: 0x%" PRIx64, pTscObj->pAppInfo->pTransporter, *(int64_t*)pTscObj->id);
|
||||
rpcClose(pTscObj->pAppInfo->pTransporter);
|
||||
}
|
||||
|
||||
|
@ -128,7 +128,7 @@ void closeAllRequests(SHashObj *pRequests) {
|
|||
void destroyTscObj(void *pObj) {
|
||||
STscObj *pTscObj = pObj;
|
||||
|
||||
SClientHbKey connKey = {.tscRid = pTscObj->id, .connType = pTscObj->connType};
|
||||
SClientHbKey connKey = {.tscRid = *(int64_t*)pTscObj->id, .connType = pTscObj->connType};
|
||||
hbDeregisterConn(pTscObj->pAppInfo->pAppHbMgr, connKey);
|
||||
int64_t connNum = atomic_sub_fetch_64(&pTscObj->pAppInfo->numOfConns, 1);
|
||||
closeAllRequests(pTscObj->pRequests);
|
||||
|
@ -137,7 +137,7 @@ void destroyTscObj(void *pObj) {
|
|||
// TODO
|
||||
//closeTransporter(pTscObj);
|
||||
}
|
||||
tscDebug("connObj 0x%" PRIx64 " destroyed, totalConn:%" PRId64, pTscObj->id, pTscObj->pAppInfo->numOfConns);
|
||||
tscDebug("connObj 0x%" PRIx64 " destroyed, totalConn:%" PRId64, *(int64_t*)pTscObj->id, pTscObj->pAppInfo->numOfConns);
|
||||
taosThreadMutexDestroy(&pTscObj->mutex);
|
||||
taosMemoryFreeClear(pTscObj);
|
||||
}
|
||||
|
@ -166,10 +166,11 @@ void *createTscObj(const char *user, const char *auth, const char *db, int32_t c
|
|||
}
|
||||
|
||||
taosThreadMutexInit(&pObj->mutex, NULL);
|
||||
pObj->id = taosAddRef(clientConnRefPool, pObj);
|
||||
pObj->id = taosMemoryMalloc(sizeof(int64_t));
|
||||
*(int64_t*)pObj->id = taosAddRef(clientConnRefPool, pObj);
|
||||
pObj->schemalessType = 1;
|
||||
|
||||
tscDebug("connObj created, 0x%" PRIx64, pObj->id);
|
||||
tscDebug("connObj created, 0x%" PRIx64, *(int64_t*)pObj->id);
|
||||
return pObj;
|
||||
}
|
||||
|
||||
|
|
|
@ -263,6 +263,7 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
|
|||
int32_t asyncExecDdlQuery(SRequestObj* pRequest, SQuery* pQuery) {
|
||||
// drop table if exists not_exists_table
|
||||
if (NULL == pQuery->pCmdMsg) {
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, 0);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -609,6 +610,16 @@ void schedulerExecCb(SQueryResult* pResult, void* param, int32_t code) {
|
|||
SRequestObj* pRequest = (SRequestObj*)param;
|
||||
pRequest->code = code;
|
||||
|
||||
if (TDMT_VND_SUBMIT == pRequest->type || TDMT_VND_DELETE == pRequest->type ||
|
||||
TDMT_VND_CREATE_TABLE == pRequest->type) {
|
||||
pRequest->body.resInfo.numOfRows = pResult->numOfRows;
|
||||
|
||||
if (pRequest->body.queryJob != 0) {
|
||||
schedulerFreeJob(pRequest->body.queryJob, 0);
|
||||
pRequest->body.queryJob = 0;
|
||||
}
|
||||
}
|
||||
|
||||
tscDebug("0x%" PRIx64 " enter scheduler exec cb, code:%d - %s, reqId:0x%" PRIx64,
|
||||
pRequest->self, code, tstrerror(code), pRequest->requestId);
|
||||
|
||||
|
@ -712,7 +723,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery) {
|
|||
code = asyncExecDdlQuery(pRequest, pQuery);
|
||||
break;
|
||||
case QUERY_EXEC_MODE_SCHEDULE: {
|
||||
SArray* pNodeList = taosArrayInit(4, sizeof(struct SQueryNodeAddr));
|
||||
SArray* pNodeList = taosArrayInit(4, sizeof(SQueryNodeLoad));
|
||||
|
||||
pRequest->type = pQuery->msgType;
|
||||
|
||||
|
@ -725,13 +736,11 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery) {
|
|||
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE};
|
||||
|
||||
SAppInstInfo* pAppInfo = getAppInfo(pRequest);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = qCreateQueryPlan(&cxt, &pRequest->body.pDag, pNodeList);
|
||||
if (code) {
|
||||
tscError("0x%" PRIx64 " failed to create query plan, code:%s 0x%" PRIx64, pRequest->self, tstrerror(code),
|
||||
pRequest->requestId);
|
||||
}
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
SRequestConnInfo conn = {
|
||||
|
@ -927,7 +936,7 @@ STscObj* taosConnectImpl(const char* user, const char* auth, const char* db, __t
|
|||
taos_close_internal(pTscObj);
|
||||
pTscObj = NULL;
|
||||
} else {
|
||||
tscDebug("0x%" PRIx64 " connection is opening, connId:%u, dnodeConn:%p, reqId:0x%" PRIx64, pTscObj->id,
|
||||
tscDebug("0x%" PRIx64 " connection is opening, connId:%u, dnodeConn:%p, reqId:0x%" PRIx64, *(int64_t*)pTscObj->id,
|
||||
pTscObj->connId, pTscObj->pAppInfo->pTransporter, pRequest->requestId);
|
||||
destroyRequest(pRequest);
|
||||
}
|
||||
|
@ -1090,10 +1099,10 @@ TAOS* taos_connect_auth(const char* ip, const char* user, const char* auth, cons
|
|||
|
||||
STscObj* pObj = taos_connect_internal(ip, user, NULL, auth, db, port, CONN_TYPE__QUERY);
|
||||
if (pObj) {
|
||||
return (TAOS*)pObj->id;
|
||||
return pObj->id;
|
||||
}
|
||||
|
||||
return (TAOS*)0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
TAOS* taos_connect_l(const char* ip, int ipLen, const char* user, int userLen, const char* pass, int passLen,
|
||||
|
|
|
@ -99,10 +99,10 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha
|
|||
|
||||
STscObj* pObj = taos_connect_internal(ip, user, pass, NULL, db, port, CONN_TYPE__QUERY);
|
||||
if (pObj) {
|
||||
return (TAOS*)pObj->id;
|
||||
return pObj->id;
|
||||
}
|
||||
|
||||
return (TAOS*)0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void taos_close_internal(void *taos) {
|
||||
|
@ -111,19 +111,24 @@ void taos_close_internal(void *taos) {
|
|||
}
|
||||
|
||||
STscObj *pTscObj = (STscObj *)taos;
|
||||
tscDebug("0x%" PRIx64 " try to close connection, numOfReq:%d", pTscObj->id, pTscObj->numOfReqs);
|
||||
tscDebug("0x%" PRIx64 " try to close connection, numOfReq:%d", *(int64_t*)pTscObj->id, pTscObj->numOfReqs);
|
||||
|
||||
taosRemoveRef(clientConnRefPool, pTscObj->id);
|
||||
taosRemoveRef(clientConnRefPool, *(int64_t*)pTscObj->id);
|
||||
}
|
||||
|
||||
void taos_close(TAOS *taos) {
|
||||
STscObj* pObj = acquireTscObj((int64_t)taos);
|
||||
if (taos == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
STscObj* pObj = acquireTscObj(*(int64_t*)taos);
|
||||
if (NULL == pObj) {
|
||||
return;
|
||||
}
|
||||
|
||||
taos_close_internal(pObj);
|
||||
releaseTscObj((int64_t)taos);
|
||||
releaseTscObj(*(int64_t*)taos);
|
||||
taosMemoryFree(taos);
|
||||
}
|
||||
|
||||
|
||||
|
@ -206,7 +211,12 @@ static void syncQueryFn(void *param, void *res, int32_t code) {
|
|||
}
|
||||
|
||||
TAOS_RES *taos_query(TAOS *taos, const char *sql) {
|
||||
STscObj* pTscObj = acquireTscObj((int64_t)taos);
|
||||
if (NULL == taos) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
STscObj* pTscObj = acquireTscObj(*(int64_t*)taos);
|
||||
if (pTscObj == NULL || sql == NULL) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return NULL;
|
||||
|
@ -219,13 +229,13 @@ TAOS_RES *taos_query(TAOS *taos, const char *sql) {
|
|||
taos_query_a(taos, sql, syncQueryFn, param);
|
||||
tsem_wait(¶m->sem);
|
||||
|
||||
releaseTscObj((int64_t)taos);
|
||||
releaseTscObj(*(int64_t*)taos);
|
||||
|
||||
return param->pRequest;
|
||||
#else
|
||||
size_t sqlLen = strlen(sql);
|
||||
if (sqlLen > (size_t)TSDB_MAX_ALLOWED_SQL_LEN) {
|
||||
releaseTscObj((int64_t)taos);
|
||||
releaseTscObj(*(int64_t*)taos);
|
||||
tscError("sql string exceeds max length:%d", TSDB_MAX_ALLOWED_SQL_LEN);
|
||||
terrno = TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
|
||||
return NULL;
|
||||
|
@ -233,7 +243,7 @@ TAOS_RES *taos_query(TAOS *taos, const char *sql) {
|
|||
|
||||
TAOS_RES* pRes = execQuery(pTscObj, sql, sqlLen);
|
||||
|
||||
releaseTscObj((int64_t)taos);
|
||||
releaseTscObj(*(int64_t*)taos);
|
||||
|
||||
return pRes;
|
||||
#endif
|
||||
|
@ -453,15 +463,15 @@ int taos_result_precision(TAOS_RES *res) {
|
|||
}
|
||||
|
||||
int taos_select_db(TAOS *taos, const char *db) {
|
||||
STscObj* pObj = acquireTscObj((int64_t)taos);
|
||||
STscObj* pObj = acquireTscObj(*(int64_t*)taos);
|
||||
if (pObj == NULL) {
|
||||
releaseTscObj((int64_t)taos);
|
||||
releaseTscObj(*(int64_t*)taos);
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return TSDB_CODE_TSC_DISCONNECTED;
|
||||
}
|
||||
|
||||
if (db == NULL || strlen(db) == 0) {
|
||||
releaseTscObj((int64_t)taos);
|
||||
releaseTscObj(*(int64_t*)taos);
|
||||
terrno = TSDB_CODE_TSC_INVALID_INPUT;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -473,7 +483,7 @@ int taos_select_db(TAOS *taos, const char *db) {
|
|||
int32_t code = taos_errno(pRequest);
|
||||
|
||||
taos_free_result(pRequest);
|
||||
releaseTscObj((int64_t)taos);
|
||||
releaseTscObj(*(int64_t*)taos);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -626,7 +636,7 @@ int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex) {
|
|||
int taos_validate_sql(TAOS *taos, const char *sql) { return true; }
|
||||
|
||||
void taos_reset_current_db(TAOS *taos) {
|
||||
STscObj* pTscObj = acquireTscObj((int64_t)taos);
|
||||
STscObj* pTscObj = acquireTscObj(*(int64_t*)taos);
|
||||
if (pTscObj == NULL) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return;
|
||||
|
@ -634,17 +644,17 @@ void taos_reset_current_db(TAOS *taos) {
|
|||
|
||||
resetConnectDB(pTscObj);
|
||||
|
||||
releaseTscObj((int64_t)taos);
|
||||
releaseTscObj(*(int64_t*)taos);
|
||||
}
|
||||
|
||||
const char *taos_get_server_info(TAOS *taos) {
|
||||
STscObj* pTscObj = acquireTscObj((int64_t)taos);
|
||||
STscObj* pTscObj = acquireTscObj(*(int64_t*)taos);
|
||||
if (pTscObj == NULL) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
releaseTscObj((int64_t)taos);
|
||||
releaseTscObj(*(int64_t*)taos);
|
||||
|
||||
return pTscObj->ver;
|
||||
}
|
||||
|
@ -671,6 +681,8 @@ static void destorySqlParseWrapper(SqlParseWrapper *pWrapper) {
|
|||
}
|
||||
|
||||
void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
|
||||
tscDebug("enter meta callback, code %s", tstrerror(code));
|
||||
|
||||
SqlParseWrapper *pWrapper = (SqlParseWrapper *)param;
|
||||
SQuery *pQuery = pWrapper->pQuery;
|
||||
SRequestObj *pRequest = pWrapper->pRequest;
|
||||
|
@ -711,11 +723,11 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
|
|||
}
|
||||
|
||||
void taos_query_a(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param) {
|
||||
STscObj* pTscObj = acquireTscObj((int64_t)taos);
|
||||
STscObj* pTscObj = acquireTscObj(*(int64_t*)taos);
|
||||
if (pTscObj == NULL || sql == NULL || NULL == fp) {
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
if (pTscObj) {
|
||||
releaseTscObj((int64_t)taos);
|
||||
releaseTscObj(*(int64_t*)taos);
|
||||
} else {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
}
|
||||
|
@ -936,7 +948,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
|
|||
}
|
||||
|
||||
TAOS_STMT *taos_stmt_init(TAOS *taos) {
|
||||
STscObj* pObj = acquireTscObj((int64_t)taos);
|
||||
STscObj* pObj = acquireTscObj(*(int64_t*)taos);
|
||||
if (NULL == pObj) {
|
||||
tscError("invalid parameter for %s", __FUNCTION__);
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
|
@ -945,7 +957,7 @@ TAOS_STMT *taos_stmt_init(TAOS *taos) {
|
|||
|
||||
TAOS_STMT* pStmt = stmtInit(pObj);
|
||||
|
||||
releaseTscObj((int64_t)taos);
|
||||
releaseTscObj(*(int64_t*)taos);
|
||||
|
||||
return pStmt;
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ int32_t processConnectRsp(void* param, const SDataBuf* pMsg, int32_t code) {
|
|||
|
||||
for (int32_t i = 0; i < connectRsp.epSet.numOfEps; ++i) {
|
||||
tscDebug("0x%" PRIx64 " epSet.fqdn[%d]:%s port:%d, connObj:0x%" PRIx64, pRequest->requestId, i,
|
||||
connectRsp.epSet.eps[i].fqdn, connectRsp.epSet.eps[i].port, pTscObj->id);
|
||||
connectRsp.epSet.eps[i].fqdn, connectRsp.epSet.eps[i].port, *(int64_t*)pTscObj->id);
|
||||
}
|
||||
|
||||
pTscObj->connId = connectRsp.connId;
|
||||
|
@ -90,7 +90,7 @@ int32_t processConnectRsp(void* param, const SDataBuf* pMsg, int32_t code) {
|
|||
|
||||
pTscObj->connType = connectRsp.connType;
|
||||
|
||||
hbRegisterConn(pTscObj->pAppInfo->pAppHbMgr, pTscObj->id, connectRsp.clusterId, connectRsp.connType);
|
||||
hbRegisterConn(pTscObj->pAppInfo->pAppHbMgr, *(int64_t*)pTscObj->id, connectRsp.clusterId, connectRsp.connType);
|
||||
|
||||
// pRequest->body.resInfo.pRspMsg = pMsg->pData;
|
||||
tscDebug("0x%" PRIx64 " clusterId:%" PRId64 ", totalConn:%" PRId64, pRequest->requestId, connectRsp.clusterId,
|
||||
|
|
|
@ -309,7 +309,7 @@ static int32_t smlApplySchemaAction(SSmlHandle *info, SSchemaAction *action) {
|
|||
case SCHEMA_ACTION_ADD_COLUMN: {
|
||||
int n = sprintf(result, "alter stable `%s` add column ", action->alterSTable.sTableName);
|
||||
smlBuildColumnDescription(action->alterSTable.field, result + n, capacity - n, &outBytes);
|
||||
TAOS_RES *res = taos_query((TAOS*)info->taos->id, result); // TODO async doAsyncQuery
|
||||
TAOS_RES *res = taos_query(info->taos->id, result); // TODO async doAsyncQuery
|
||||
code = taos_errno(res);
|
||||
const char *errStr = taos_errstr(res);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -323,7 +323,7 @@ static int32_t smlApplySchemaAction(SSmlHandle *info, SSchemaAction *action) {
|
|||
case SCHEMA_ACTION_ADD_TAG: {
|
||||
int n = sprintf(result, "alter stable `%s` add tag ", action->alterSTable.sTableName);
|
||||
smlBuildColumnDescription(action->alterSTable.field, result + n, capacity - n, &outBytes);
|
||||
TAOS_RES *res = taos_query((TAOS*)info->taos->id, result); // TODO async doAsyncQuery
|
||||
TAOS_RES *res = taos_query(info->taos->id, result); // TODO async doAsyncQuery
|
||||
code = taos_errno(res);
|
||||
const char *errStr = taos_errstr(res);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -337,7 +337,7 @@ static int32_t smlApplySchemaAction(SSmlHandle *info, SSchemaAction *action) {
|
|||
case SCHEMA_ACTION_CHANGE_COLUMN_SIZE: {
|
||||
int n = sprintf(result, "alter stable `%s` modify column ", action->alterSTable.sTableName);
|
||||
smlBuildColumnDescription(action->alterSTable.field, result + n, capacity - n, &outBytes);
|
||||
TAOS_RES *res = taos_query((TAOS*)info->taos->id, result); // TODO async doAsyncQuery
|
||||
TAOS_RES *res = taos_query(info->taos->id, result); // TODO async doAsyncQuery
|
||||
code = taos_errno(res);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%" PRIx64 " apply schema action. error : %s", info->id, taos_errstr(res));
|
||||
|
@ -350,7 +350,7 @@ static int32_t smlApplySchemaAction(SSmlHandle *info, SSchemaAction *action) {
|
|||
case SCHEMA_ACTION_CHANGE_TAG_SIZE: {
|
||||
int n = sprintf(result, "alter stable `%s` modify tag ", action->alterSTable.sTableName);
|
||||
smlBuildColumnDescription(action->alterSTable.field, result + n, capacity - n, &outBytes);
|
||||
TAOS_RES *res = taos_query((TAOS*)info->taos->id, result); // TODO async doAsyncQuery
|
||||
TAOS_RES *res = taos_query(info->taos->id, result); // TODO async doAsyncQuery
|
||||
code = taos_errno(res);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%" PRIx64 " apply schema action. error : %s", info->id, taos_errstr(res));
|
||||
|
@ -405,7 +405,7 @@ static int32_t smlApplySchemaAction(SSmlHandle *info, SSchemaAction *action) {
|
|||
pos--;
|
||||
++freeBytes;
|
||||
outBytes = snprintf(pos, freeBytes, ")");
|
||||
TAOS_RES *res = taos_query((TAOS*)info->taos->id, result);
|
||||
TAOS_RES *res = taos_query(info->taos->id, result);
|
||||
code = taos_errno(res);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%" PRIx64 " apply schema action. error : %s", info->id, taos_errstr(res));
|
||||
|
@ -2434,7 +2434,7 @@ static void smlInsertCallback(void *param, void *res, int32_t code) {
|
|||
*/
|
||||
|
||||
TAOS_RES* taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int protocol, int precision) {
|
||||
STscObj* pTscObj = acquireTscObj((int64_t)taos);
|
||||
STscObj* pTscObj = acquireTscObj(*(int64_t*)taos);
|
||||
if (NULL == pTscObj) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
uError("SML:taos_schemaless_insert invalid taos");
|
||||
|
@ -2443,7 +2443,7 @@ TAOS_RES* taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int pr
|
|||
|
||||
SRequestObj* request = (SRequestObj*)createRequest(pTscObj, TSDB_SQL_INSERT);
|
||||
if(!request){
|
||||
releaseTscObj((int64_t)taos);
|
||||
releaseTscObj(*(int64_t*)taos);
|
||||
uError("SML:taos_schemaless_insert error request is null");
|
||||
return NULL;
|
||||
}
|
||||
|
@ -2531,6 +2531,6 @@ end:
|
|||
// ((STscObj *)taos)->schemalessType = 0;
|
||||
pTscObj->schemalessType = 1;
|
||||
uDebug("resultend:%s", request->msgBuf);
|
||||
releaseTscObj((int64_t)taos);
|
||||
releaseTscObj(*(int64_t*)taos);
|
||||
return (TAOS_RES*)request;
|
||||
}
|
||||
|
|
|
@ -204,8 +204,8 @@ SArray *mmGetMsgHandles() {
|
|||
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY, mmPutMsgToQueryQueue, 1) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_CONTINUE, mmPutMsgToQueryQueue, 1) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_HEARTBEAT, mmPutMsgToQueryQueue, 1) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH, mmPutMsgToQueryQueue, 1) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_HEARTBEAT, mmPutMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH, mmPutMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
|
@ -213,7 +213,7 @@ SArray *mmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_SMA_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_CHANGE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_DELETE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, mmPutMsgToQueryQueue, 1) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TASK, mmPutMsgToFetchQueue, 1) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DEPLOY_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TASK_DROP_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIG_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
|
||||
|
|
|
@ -35,7 +35,7 @@ typedef struct SPhysiPlanContext {
|
|||
int32_t errCode;
|
||||
int16_t nextDataBlockId;
|
||||
SArray* pLocationHelper;
|
||||
SArray* pExecNodeList;
|
||||
SArray* pExecNodeList; // SArray<SQueryNodeLoad>
|
||||
} SPhysiPlanContext;
|
||||
|
||||
static int32_t getSlotKey(SNode* pNode, const char* pStmtName, char* pKey) {
|
||||
|
@ -459,7 +459,7 @@ static int32_t createTagScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubpla
|
|||
}
|
||||
vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode);
|
||||
SQueryNodeLoad node = {.addr = pSubplan->execNode, .load = 0};
|
||||
taosArrayPush(pCxt->pExecNodeList, &pSubplan->execNode);
|
||||
taosArrayPush(pCxt->pExecNodeList, &node);
|
||||
return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTagScan, pPhyNode);
|
||||
}
|
||||
|
||||
|
@ -532,7 +532,7 @@ static int32_t createSystemTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan*
|
|||
if (0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_USER_TABLES)) {
|
||||
vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode);
|
||||
SQueryNodeLoad node = {.addr = pSubplan->execNode, .load = 0};
|
||||
taosArrayPush(pCxt->pExecNodeList, &pSubplan->execNode);
|
||||
taosArrayPush(pCxt->pExecNodeList, &node);
|
||||
} else {
|
||||
SQueryNodeLoad node = {.addr = {.nodeId = MNODE_HANDLE, .epSet = pCxt->pPlanCxt->mgmtEpSet}, .load = 0};
|
||||
taosArrayPush(pCxt->pExecNodeList, &node);
|
||||
|
|
|
@ -210,7 +210,7 @@ typedef struct SSchJob {
|
|||
int32_t levelNum;
|
||||
int32_t taskNum;
|
||||
SRequestConnInfo conn;
|
||||
SArray *nodeList; // qnode/vnode list, SArray<SQueryNodeAddr>
|
||||
SArray *nodeList; // qnode/vnode list, SArray<SQueryNodeLoad>
|
||||
SArray *levels; // starting from 0. SArray<SSchLevel>
|
||||
SNodeList *subPlans; // subplan pointer copied from DAG, no need to free it in scheduler
|
||||
|
||||
|
|
|
@ -614,7 +614,7 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) {
|
|||
++pJob->taskNum;
|
||||
}
|
||||
|
||||
SCH_JOB_DLOG("level initialized, taskNum:%d", taskNum);
|
||||
SCH_JOB_DLOG("level %d initialized, taskNum:%d", i, taskNum);
|
||||
}
|
||||
|
||||
SCH_ERR_JRET(schBuildTaskRalation(pJob, planToTask));
|
||||
|
@ -636,7 +636,8 @@ int32_t schSetAddrsFromNodeList(SSchJob *pJob, SSchTask *pTask) {
|
|||
nodeNum = taosArrayGetSize(pJob->nodeList);
|
||||
|
||||
for (int32_t i = 0; i < nodeNum && addNum < SCH_MAX_CANDIDATE_EP_NUM; ++i) {
|
||||
SQueryNodeAddr *naddr = taosArrayGet(pJob->nodeList, i);
|
||||
SQueryNodeLoad *nload = taosArrayGet(pJob->nodeList, i);
|
||||
SQueryNodeAddr *naddr = &nload->addr;
|
||||
|
||||
if (NULL == taosArrayPush(pTask->candidateAddrs, naddr)) {
|
||||
SCH_TASK_ELOG("taosArrayPush execNode to candidate addrs failed, addNum:%d, errno:%d", addNum, errno);
|
||||
|
|
Loading…
Reference in New Issue