Merge branch 'feature/query' of github.com:taosdata/TDengine into feature/query
This commit is contained in:
commit
9c41f5cdbf
|
@ -4,6 +4,9 @@
|
||||||
[submodule "src/connector/grafanaplugin"]
|
[submodule "src/connector/grafanaplugin"]
|
||||||
path = src/connector/grafanaplugin
|
path = src/connector/grafanaplugin
|
||||||
url = https://github.com/taosdata/grafanaplugin
|
url = https://github.com/taosdata/grafanaplugin
|
||||||
|
[submodule "tests/examples/rust"]
|
||||||
|
path = tests/examples/rust
|
||||||
|
url = https://github.com/songtianyi/tdengine-rust-bindings.git
|
||||||
[submodule "src/connector/hivemq-tdengine-extension"]
|
[submodule "src/connector/hivemq-tdengine-extension"]
|
||||||
path = src/connector/hivemq-tdengine-extension
|
path = src/connector/hivemq-tdengine-extension
|
||||||
url = https://github.com/huskar-t/hivemq-tdengine-extension.git
|
url = https://github.com/huskar-t/hivemq-tdengine-extension.git
|
|
@ -61,7 +61,7 @@ The use of each configuration item is:
|
||||||
|
|
||||||
* **port**: This is the `http` service port which enables other application to manage rules by `restful API`.
|
* **port**: This is the `http` service port which enables other application to manage rules by `restful API`.
|
||||||
* **database**: rules are stored in a `sqlite` database, this is the path of the database file (if the file does not exist, the alert application creates it automatically).
|
* **database**: rules are stored in a `sqlite` database, this is the path of the database file (if the file does not exist, the alert application creates it automatically).
|
||||||
* **tdengine**: connection string of `TDEngine` server, note the database name should be put in the `sql` field of a rule in most cases, thus it should NOT be included in the string.
|
* **tdengine**: connection string of `TDEngine` server (please refer the documentation of GO connector for the detailed format of this string), note the database name should be put in the `sql` field of a rule in most cases, thus it should NOT be included in the string.
|
||||||
* **log > level**: log level, could be `production` or `debug`.
|
* **log > level**: log level, could be `production` or `debug`.
|
||||||
* **log > path**: log output file path.
|
* **log > path**: log output file path.
|
||||||
* **receivers > alertManager**: the alert application pushes alerts to `AlertManager` at this URL.
|
* **receivers > alertManager**: the alert application pushes alerts to `AlertManager` at this URL.
|
||||||
|
|
|
@ -58,7 +58,7 @@ $ go build
|
||||||
|
|
||||||
* **port**:报警监测程序支持使用 `restful API` 对规则进行管理,这个参数用于配置 `http` 服务的侦听端口。
|
* **port**:报警监测程序支持使用 `restful API` 对规则进行管理,这个参数用于配置 `http` 服务的侦听端口。
|
||||||
* **database**:报警监测程序将规则保存到了一个 `sqlite` 数据库中,这个参数用于指定数据库文件的路径(不需要提前创建这个文件,如果它不存在,程序会自动创建它)。
|
* **database**:报警监测程序将规则保存到了一个 `sqlite` 数据库中,这个参数用于指定数据库文件的路径(不需要提前创建这个文件,如果它不存在,程序会自动创建它)。
|
||||||
* **tdengine**:`TDEngine` 的连接字符串,一般来说,数据库名应该在报警规则的 `sql` 语句中指定,所以这个字符串中 **不** 应包含数据库名。
|
* **tdengine**:`TDEngine` 的连接字符串(这个字符串的详细格式说明请见 GO 连接器的文档),一般来说,数据库名应该在报警规则的 `sql` 语句中指定,所以这个字符串中 **不** 应包含数据库名。
|
||||||
* **log > level**:日志的记录级别,可选 `production` 或 `debug`。
|
* **log > level**:日志的记录级别,可选 `production` 或 `debug`。
|
||||||
* **log > path**:日志文件的路径。
|
* **log > path**:日志文件的路径。
|
||||||
* **receivers > alertManager**:报警监测程序会将报警推送到 `AlertManager`,在这里指定 `AlertManager` 的接收地址。
|
* **receivers > alertManager**:报警监测程序会将报警推送到 `AlertManager`,在这里指定 `AlertManager` 的接收地址。
|
||||||
|
|
|
@ -84,6 +84,7 @@ func (alert *Alert) doRefresh(firing bool, rule *Rule) bool {
|
||||||
|
|
||||||
case firing && (alert.State == AlertStateWaiting):
|
case firing && (alert.State == AlertStateWaiting):
|
||||||
alert.StartsAt = time.Now()
|
alert.StartsAt = time.Now()
|
||||||
|
alert.EndsAt = time.Time{}
|
||||||
if rule.For.Nanoseconds() > 0 {
|
if rule.For.Nanoseconds() > 0 {
|
||||||
alert.State = AlertStatePending
|
alert.State = AlertStatePending
|
||||||
return false
|
return false
|
||||||
|
@ -95,6 +96,7 @@ func (alert *Alert) doRefresh(firing bool, rule *Rule) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
alert.StartsAt = alert.StartsAt.Add(rule.For.Duration)
|
alert.StartsAt = alert.StartsAt.Add(rule.For.Duration)
|
||||||
|
alert.EndsAt = time.Time{}
|
||||||
alert.State = AlertStateFiring
|
alert.State = AlertStateFiring
|
||||||
|
|
||||||
case firing && (alert.State == AlertStateFiring):
|
case firing && (alert.State == AlertStateFiring):
|
||||||
|
|
|
@ -84,9 +84,9 @@ typedef struct SRetrieveSupport {
|
||||||
} SRetrieveSupport;
|
} SRetrieveSupport;
|
||||||
|
|
||||||
int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pDesc,
|
int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pDesc,
|
||||||
SColumnModel **pFinalModel, uint32_t nBufferSize);
|
SColumnModel **pFinalModel, SColumnModel** pFFModel, uint32_t nBufferSize);
|
||||||
|
|
||||||
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel,
|
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel, SColumnModel* pFFModel,
|
||||||
int32_t numOfVnodes);
|
int32_t numOfVnodes);
|
||||||
|
|
||||||
int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, void *data,
|
int32_t saveToBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tFilePage *pPage, void *data,
|
||||||
|
|
|
@ -42,7 +42,6 @@ void tscBuildResFromSubqueries(SSqlObj *pSql);
|
||||||
TAOS_ROW doSetResultRowData(SSqlObj *pSql);
|
TAOS_ROW doSetResultRowData(SSqlObj *pSql);
|
||||||
|
|
||||||
char *getArithmeticInputSrc(void *param, const char *name, int32_t colId);
|
char *getArithmeticInputSrc(void *param, const char *name, int32_t colId);
|
||||||
void doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -282,6 +282,7 @@ int tscSetMgmtEpSetFromCfg(const char *first, const char *second);
|
||||||
|
|
||||||
bool tscSetSqlOwner(SSqlObj* pSql);
|
bool tscSetSqlOwner(SSqlObj* pSql);
|
||||||
void tscClearSqlOwner(SSqlObj* pSql);
|
void tscClearSqlOwner(SSqlObj* pSql);
|
||||||
|
int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize);
|
||||||
|
|
||||||
void* malloc_throw(size_t size);
|
void* malloc_throw(size_t size);
|
||||||
void* calloc_throw(size_t nmemb, size_t size);
|
void* calloc_throw(size_t nmemb, size_t size);
|
||||||
|
|
|
@ -2695,17 +2695,18 @@ static void apercentile_func_second_merge(SQLFunctionCtx *pCtx) {
|
||||||
}
|
}
|
||||||
|
|
||||||
SAPercentileInfo *pOutput = getAPerctInfo(pCtx);
|
SAPercentileInfo *pOutput = getAPerctInfo(pCtx);
|
||||||
SHistogramInfo * pHisto = pOutput->pHisto;
|
SHistogramInfo *pHisto = pOutput->pHisto;
|
||||||
|
|
||||||
if (pHisto->numOfElems <= 0) {
|
if (pHisto->numOfElems <= 0) {
|
||||||
memcpy(pHisto, pInput->pHisto, sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1));
|
memcpy(pHisto, pInput->pHisto, sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1));
|
||||||
pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo));
|
pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo));
|
||||||
} else {
|
} else {
|
||||||
|
//TODO(dengyihao): avoid memcpy
|
||||||
pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo));
|
pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo));
|
||||||
|
|
||||||
SHistogramInfo *pRes = tHistogramMerge(pHisto, pInput->pHisto, MAX_HISTOGRAM_BIN);
|
SHistogramInfo *pRes = tHistogramMerge(pHisto, pInput->pHisto, MAX_HISTOGRAM_BIN);
|
||||||
tHistogramDestroy(&pOutput->pHisto);
|
memcpy(pHisto, pRes, sizeof(SHistogramInfo) + sizeof(SHistBin) * MAX_HISTOGRAM_BIN);
|
||||||
pOutput->pHisto = pRes;
|
pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo));
|
||||||
|
tHistogramDestroy(&pRes);
|
||||||
}
|
}
|
||||||
|
|
||||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||||
|
|
|
@ -618,7 +618,11 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName,
|
||||||
for (int32_t i = 0; i < numOfRows; ++i) {
|
for (int32_t i = 0; i < numOfRows; ++i) {
|
||||||
uint8_t type = pSchema[i].type;
|
uint8_t type = pSchema[i].type;
|
||||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes);
|
int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE;
|
||||||
|
if (type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
|
bytes = bytes/TSDB_NCHAR_SIZE;
|
||||||
|
}
|
||||||
|
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName, bytes);
|
||||||
} else {
|
} else {
|
||||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName);
|
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName);
|
||||||
}
|
}
|
||||||
|
@ -641,7 +645,11 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
|
||||||
for (int32_t i = 0; i < numOfRows; ++i) {
|
for (int32_t i = 0; i < numOfRows; ++i) {
|
||||||
uint8_t type = pSchema[i].type;
|
uint8_t type = pSchema[i].type;
|
||||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes);
|
int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE;
|
||||||
|
if (type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
|
bytes = bytes/TSDB_NCHAR_SIZE;
|
||||||
|
}
|
||||||
|
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes);
|
||||||
} else {
|
} else {
|
||||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName);
|
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName);
|
||||||
}
|
}
|
||||||
|
@ -651,7 +659,11 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
|
||||||
for (int32_t i = numOfRows; i < totalRows; i++) {
|
for (int32_t i = numOfRows; i < totalRows; i++) {
|
||||||
uint8_t type = pSchema[i].type;
|
uint8_t type = pSchema[i].type;
|
||||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes);
|
int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE;
|
||||||
|
if (type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
|
bytes = bytes/TSDB_NCHAR_SIZE;
|
||||||
|
}
|
||||||
|
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes);
|
||||||
} else {
|
} else {
|
||||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName);
|
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName);
|
||||||
}
|
}
|
||||||
|
|
|
@ -172,14 +172,14 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
||||||
SSqlRes* pRes = &pSql->res;
|
SSqlRes* pRes = &pSql->res;
|
||||||
|
|
||||||
if (pMemBuffer == NULL) {
|
if (pMemBuffer == NULL) {
|
||||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
|
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
|
||||||
tscError("%p pMemBuffer is NULL", pMemBuffer);
|
tscError("%p pMemBuffer is NULL", pMemBuffer);
|
||||||
pRes->code = TSDB_CODE_TSC_APP_ERROR;
|
pRes->code = TSDB_CODE_TSC_APP_ERROR;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pDesc->pColumnModel == NULL) {
|
if (pDesc->pColumnModel == NULL) {
|
||||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
|
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
|
||||||
tscError("%p no local buffer or intermediate result format model", pSql);
|
tscError("%p no local buffer or intermediate result format model", pSql);
|
||||||
pRes->code = TSDB_CODE_TSC_APP_ERROR;
|
pRes->code = TSDB_CODE_TSC_APP_ERROR;
|
||||||
return;
|
return;
|
||||||
|
@ -197,7 +197,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
||||||
}
|
}
|
||||||
|
|
||||||
if (numOfFlush == 0 || numOfBuffer == 0) {
|
if (numOfFlush == 0 || numOfBuffer == 0) {
|
||||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
|
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
|
||||||
tscDebug("%p retrieved no data", pSql);
|
tscDebug("%p retrieved no data", pSql);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -206,7 +206,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
||||||
tscError("%p Invalid value of buffer capacity %d and page size %d ", pSql, pDesc->pColumnModel->capacity,
|
tscError("%p Invalid value of buffer capacity %d and page size %d ", pSql, pDesc->pColumnModel->capacity,
|
||||||
pMemBuffer[0]->pageSize);
|
pMemBuffer[0]->pageSize);
|
||||||
|
|
||||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
|
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
|
||||||
pRes->code = TSDB_CODE_TSC_APP_ERROR;
|
pRes->code = TSDB_CODE_TSC_APP_ERROR;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -217,7 +217,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
||||||
if (pReducer == NULL) {
|
if (pReducer == NULL) {
|
||||||
tscError("%p failed to create local merge structure, out of memory", pSql);
|
tscError("%p failed to create local merge structure, out of memory", pSql);
|
||||||
|
|
||||||
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, numOfBuffer);
|
tscLocalReducerEnvDestroy(pMemBuffer, pDesc, finalmodel, pFFModel, numOfBuffer);
|
||||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -334,6 +334,8 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
||||||
pReducer->resColModel = finalmodel;
|
pReducer->resColModel = finalmodel;
|
||||||
pReducer->resColModel->capacity = pReducer->nResultBufSize;
|
pReducer->resColModel->capacity = pReducer->nResultBufSize;
|
||||||
|
|
||||||
|
pReducer->finalModel = pFFModel;
|
||||||
|
|
||||||
assert(pReducer->finalRowSize > 0);
|
assert(pReducer->finalRowSize > 0);
|
||||||
if (pReducer->finalRowSize > 0) {
|
if (pReducer->finalRowSize > 0) {
|
||||||
pReducer->resColModel->capacity /= pReducer->finalRowSize;
|
pReducer->resColModel->capacity /= pReducer->finalRowSize;
|
||||||
|
@ -531,7 +533,7 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
|
||||||
tfree(pLocalReducer->pFinalRes);
|
tfree(pLocalReducer->pFinalRes);
|
||||||
tfree(pLocalReducer->discardData);
|
tfree(pLocalReducer->discardData);
|
||||||
|
|
||||||
tscLocalReducerEnvDestroy(pLocalReducer->pExtMemBuffer, pLocalReducer->pDesc, pLocalReducer->resColModel,
|
tscLocalReducerEnvDestroy(pLocalReducer->pExtMemBuffer, pLocalReducer->pDesc, pLocalReducer->resColModel, pLocalReducer->finalModel,
|
||||||
pLocalReducer->numOfVnode);
|
pLocalReducer->numOfVnode);
|
||||||
for (int32_t i = 0; i < pLocalReducer->numOfBuffer; ++i) {
|
for (int32_t i = 0; i < pLocalReducer->numOfBuffer; ++i) {
|
||||||
tfree(pLocalReducer->pLocalDataSrc[i]);
|
tfree(pLocalReducer->pLocalDataSrc[i]);
|
||||||
|
@ -655,7 +657,7 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pOrderDesc,
|
int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pOrderDesc,
|
||||||
SColumnModel **pFinalModel, uint32_t nBufferSizes) {
|
SColumnModel **pFinalModel, SColumnModel** pFFModel, uint32_t nBufferSizes) {
|
||||||
SSqlCmd *pCmd = &pSql->cmd;
|
SSqlCmd *pCmd = &pSql->cmd;
|
||||||
SSqlRes *pRes = &pSql->res;
|
SSqlRes *pRes = &pSql->res;
|
||||||
|
|
||||||
|
@ -753,6 +755,18 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
|
||||||
|
|
||||||
*pFinalModel = createColumnModel(pSchema, (int32_t)size, capacity);
|
*pFinalModel = createColumnModel(pSchema, (int32_t)size, capacity);
|
||||||
|
|
||||||
|
memset(pSchema, 0, sizeof(SSchema) * size);
|
||||||
|
size = tscNumOfFields(pQueryInfo);
|
||||||
|
|
||||||
|
for(int32_t i = 0; i < size; ++i) {
|
||||||
|
SInternalField* pField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i);
|
||||||
|
pSchema[i].bytes = pField->field.bytes;
|
||||||
|
pSchema[i].type = pField->field.type;
|
||||||
|
tstrncpy(pSchema[i].name, pField->field.name, tListLen(pSchema[i].name));
|
||||||
|
}
|
||||||
|
|
||||||
|
*pFFModel = createColumnModel(pSchema, (int32_t) size, capacity);
|
||||||
|
|
||||||
tfree(pSchema);
|
tfree(pSchema);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -763,9 +777,11 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
|
||||||
* @param pFinalModel
|
* @param pFinalModel
|
||||||
* @param numOfVnodes
|
* @param numOfVnodes
|
||||||
*/
|
*/
|
||||||
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel,
|
void tscLocalReducerEnvDestroy(tExtMemBuffer **pMemBuffer, tOrderDescriptor *pDesc, SColumnModel *pFinalModel, SColumnModel *pFFModel,
|
||||||
int32_t numOfVnodes) {
|
int32_t numOfVnodes) {
|
||||||
destroyColumnModel(pFinalModel);
|
destroyColumnModel(pFinalModel);
|
||||||
|
destroyColumnModel(pFFModel);
|
||||||
|
|
||||||
tOrderDescDestroy(pDesc);
|
tOrderDescDestroy(pDesc);
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
||||||
|
@ -873,10 +889,10 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer,
|
||||||
if (pQueryInfo->limit.offset > 0) {
|
if (pQueryInfo->limit.offset > 0) {
|
||||||
if (pQueryInfo->limit.offset < pRes->numOfRows) {
|
if (pQueryInfo->limit.offset < pRes->numOfRows) {
|
||||||
int32_t prevSize = (int32_t)pBeforeFillData->num;
|
int32_t prevSize = (int32_t)pBeforeFillData->num;
|
||||||
tColModelErase(pLocalReducer->resColModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1);
|
tColModelErase(pLocalReducer->finalModel, pBeforeFillData, prevSize, 0, (int32_t)pQueryInfo->limit.offset - 1);
|
||||||
|
|
||||||
/* remove the hole in column model */
|
/* remove the hole in column model */
|
||||||
tColModelCompact(pLocalReducer->resColModel, pBeforeFillData, prevSize);
|
tColModelCompact(pLocalReducer->finalModel, pBeforeFillData, prevSize);
|
||||||
|
|
||||||
pRes->numOfRows -= pQueryInfo->limit.offset;
|
pRes->numOfRows -= pQueryInfo->limit.offset;
|
||||||
pQueryInfo->limit.offset = 0;
|
pQueryInfo->limit.offset = 0;
|
||||||
|
@ -898,7 +914,7 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer,
|
||||||
pRes->numOfRows -= overflow;
|
pRes->numOfRows -= overflow;
|
||||||
pBeforeFillData->num -= overflow;
|
pBeforeFillData->num -= overflow;
|
||||||
|
|
||||||
tColModelCompact(pLocalReducer->resColModel, pBeforeFillData, prevSize);
|
tColModelCompact(pLocalReducer->finalModel, pBeforeFillData, prevSize);
|
||||||
|
|
||||||
// set remain data to be discarded, and reset the interpolation information
|
// set remain data to be discarded, and reset the interpolation information
|
||||||
savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo);
|
savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo);
|
||||||
|
@ -1240,7 +1256,7 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
|
||||||
tColModelCompact(pModel, pResBuf, pModel->capacity);
|
tColModelCompact(pModel, pResBuf, pModel->capacity);
|
||||||
|
|
||||||
if (tscIsSecondStageQuery(pQueryInfo)) {
|
if (tscIsSecondStageQuery(pQueryInfo)) {
|
||||||
doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalReducer->finalRowSize);
|
pLocalReducer->finalRowSize = doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalReducer->finalRowSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef _DEBUG_VIEW
|
#ifdef _DEBUG_VIEW
|
||||||
|
@ -1610,7 +1626,7 @@ void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen)
|
||||||
pRes->data = pRes->pLocalReducer->pResultBuf->data;
|
pRes->data = pRes->pLocalReducer->pResultBuf->data;
|
||||||
}
|
}
|
||||||
|
|
||||||
void doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) {
|
int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) {
|
||||||
char* pbuf = calloc(1, pOutput->num * rowSize);
|
char* pbuf = calloc(1, pOutput->num * rowSize);
|
||||||
|
|
||||||
size_t size = tscNumOfFields(pQueryInfo);
|
size_t size = tscNumOfFields(pQueryInfo);
|
||||||
|
@ -1645,8 +1661,10 @@ void doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t r
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(finalRowSize <= rowSize);
|
assert(finalRowSize <= rowSize);
|
||||||
memcpy(pOutput->data, pbuf, pOutput->num * finalRowSize);
|
memcpy(pOutput->data, pbuf, pOutput->num * offset);
|
||||||
|
|
||||||
tfree(pbuf);
|
tfree(pbuf);
|
||||||
tfree(arithSup.data);
|
tfree(arithSup.data);
|
||||||
|
|
||||||
|
return offset;
|
||||||
}
|
}
|
|
@ -1148,6 +1148,10 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
||||||
|
|
||||||
index = 0;
|
index = 0;
|
||||||
sToken = tStrGetToken(str, &index, false, 0, NULL);
|
sToken = tStrGetToken(str, &index, false, 0, NULL);
|
||||||
|
if (sToken.type != TK_STRING && sToken.type != TK_ID) {
|
||||||
|
code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z);
|
||||||
|
goto _error;
|
||||||
|
}
|
||||||
str += index;
|
str += index;
|
||||||
if (sToken.n == 0) {
|
if (sToken.n == 0) {
|
||||||
code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z);
|
code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z);
|
||||||
|
|
|
@ -547,7 +547,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd, int32_t clauseIndex) {
|
||||||
int32_t srcColListSize = (int32_t)(taosArrayGetSize(pQueryInfo->colList) * sizeof(SColumnInfo));
|
int32_t srcColListSize = (int32_t)(taosArrayGetSize(pQueryInfo->colList) * sizeof(SColumnInfo));
|
||||||
|
|
||||||
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
|
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
|
||||||
int32_t exprSize = (int32_t)(sizeof(SSqlFuncMsg) * numOfExprs);
|
int32_t exprSize = (int32_t)(sizeof(SSqlFuncMsg) * numOfExprs * 2);
|
||||||
|
|
||||||
int32_t tsBufSize = (pQueryInfo->tsBuf != NULL) ? pQueryInfo->tsBuf->fileSize : 0;
|
int32_t tsBufSize = (pQueryInfo->tsBuf != NULL) ? pQueryInfo->tsBuf->fileSize : 0;
|
||||||
|
|
||||||
|
@ -787,8 +787,10 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||||
pSqlFuncExpr = (SSqlFuncMsg *)pMsg;
|
pSqlFuncExpr = (SSqlFuncMsg *)pMsg;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(tscIsSecondStageQuery(pQueryInfo)) {
|
size_t output = tscNumOfFields(pQueryInfo);
|
||||||
size_t output = tscNumOfFields(pQueryInfo);
|
|
||||||
|
if ((tscIsSecondStageQuery(pQueryInfo) || UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) ||
|
||||||
|
UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) && (output != tscSqlExprNumOfExprs(pQueryInfo))) {
|
||||||
pQueryMsg->secondStageOutput = htonl((int32_t) output);
|
pQueryMsg->secondStageOutput = htonl((int32_t) output);
|
||||||
|
|
||||||
SSqlFuncMsg *pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg;
|
SSqlFuncMsg *pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg;
|
||||||
|
|
|
@ -1644,6 +1644,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
||||||
tExtMemBuffer ** pMemoryBuf = NULL;
|
tExtMemBuffer ** pMemoryBuf = NULL;
|
||||||
tOrderDescriptor *pDesc = NULL;
|
tOrderDescriptor *pDesc = NULL;
|
||||||
SColumnModel *pModel = NULL;
|
SColumnModel *pModel = NULL;
|
||||||
|
SColumnModel *pFinalModel = NULL;
|
||||||
|
|
||||||
pRes->qhandle = 0x1; // hack the qhandle check
|
pRes->qhandle = 0x1; // hack the qhandle check
|
||||||
|
|
||||||
|
@ -1662,7 +1663,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
||||||
|
|
||||||
assert(pState->numOfSub > 0);
|
assert(pState->numOfSub > 0);
|
||||||
|
|
||||||
int32_t ret = tscLocalReducerEnvCreate(pSql, &pMemoryBuf, &pDesc, &pModel, nBufferSize);
|
int32_t ret = tscLocalReducerEnvCreate(pSql, &pMemoryBuf, &pDesc, &pModel, &pFinalModel, nBufferSize);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
tscQueueAsyncRes(pSql);
|
tscQueueAsyncRes(pSql);
|
||||||
|
@ -1677,7 +1678,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
||||||
if (pSql->pSubs == NULL) {
|
if (pSql->pSubs == NULL) {
|
||||||
tfree(pSql->pSubs);
|
tfree(pSql->pSubs);
|
||||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pState->numOfSub);
|
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pFinalModel,pState->numOfSub);
|
||||||
|
|
||||||
tscQueueAsyncRes(pSql);
|
tscQueueAsyncRes(pSql);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1707,6 +1708,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
||||||
trs->subqueryIndex = i;
|
trs->subqueryIndex = i;
|
||||||
trs->pParentSql = pSql;
|
trs->pParentSql = pSql;
|
||||||
trs->pFinalColModel = pModel;
|
trs->pFinalColModel = pModel;
|
||||||
|
trs->pFFColModel = pFinalModel;
|
||||||
|
|
||||||
SSqlObj *pNew = tscCreateSTableSubquery(pSql, trs, NULL);
|
SSqlObj *pNew = tscCreateSTableSubquery(pSql, trs, NULL);
|
||||||
if (pNew == NULL) {
|
if (pNew == NULL) {
|
||||||
|
@ -1730,13 +1732,13 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
||||||
tscError("%p failed to prepare subquery structure and launch subqueries", pSql);
|
tscError("%p failed to prepare subquery structure and launch subqueries", pSql);
|
||||||
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
|
||||||
|
|
||||||
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pState->numOfSub);
|
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pFinalModel, pState->numOfSub);
|
||||||
doCleanupSubqueries(pSql, i);
|
doCleanupSubqueries(pSql, i);
|
||||||
return pRes->code; // free all allocated resource
|
return pRes->code; // free all allocated resource
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) {
|
if (pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED) {
|
||||||
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pState->numOfSub);
|
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pFinalModel, pState->numOfSub);
|
||||||
doCleanupSubqueries(pSql, i);
|
doCleanupSubqueries(pSql, i);
|
||||||
return pRes->code;
|
return pRes->code;
|
||||||
}
|
}
|
||||||
|
@ -1876,7 +1878,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
|
||||||
tstrerror(pParentSql->res.code));
|
tstrerror(pParentSql->res.code));
|
||||||
|
|
||||||
// release allocated resource
|
// release allocated resource
|
||||||
tscLocalReducerEnvDestroy(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor, trsupport->pFinalColModel,
|
tscLocalReducerEnvDestroy(trsupport->pExtMemBuffer, trsupport->pOrderDescriptor, trsupport->pFinalColModel, trsupport->pFFColModel,
|
||||||
pState->numOfSub);
|
pState->numOfSub);
|
||||||
|
|
||||||
tscFreeRetrieveSup(pSql);
|
tscFreeRetrieveSup(pSql);
|
||||||
|
|
|
@ -56,6 +56,23 @@
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
<!-- for restful -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.httpcomponents</groupId>
|
||||||
|
<artifactId>httpclient</artifactId>
|
||||||
|
<version>4.5.8</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.commons</groupId>
|
||||||
|
<artifactId>commons-lang3</artifactId>
|
||||||
|
<version>3.9</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.alibaba</groupId>
|
||||||
|
<artifactId>fastjson</artifactId>
|
||||||
|
<version>1.2.58</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
<build>
|
<build>
|
||||||
<plugins>
|
<plugins>
|
||||||
|
|
|
@ -0,0 +1,161 @@
|
||||||
|
package com.taosdata.jdbc;
|
||||||
|
|
||||||
|
import java.io.*;
|
||||||
|
import java.sql.Driver;
|
||||||
|
import java.sql.DriverPropertyInfo;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Properties;
|
||||||
|
import java.util.StringTokenizer;
|
||||||
|
|
||||||
|
public abstract class AbstractTaosDriver implements Driver {
|
||||||
|
|
||||||
|
private static final String TAOS_CFG_FILENAME = "taos.cfg";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param cfgDirPath
|
||||||
|
* @return return the config dir
|
||||||
|
**/
|
||||||
|
protected File loadConfigDir(String cfgDirPath) {
|
||||||
|
if (cfgDirPath == null)
|
||||||
|
return loadDefaultConfigDir();
|
||||||
|
File cfgDir = new File(cfgDirPath);
|
||||||
|
if (!cfgDir.exists())
|
||||||
|
return loadDefaultConfigDir();
|
||||||
|
return cfgDir;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return search the default config dir, if the config dir is not exist will return null
|
||||||
|
*/
|
||||||
|
protected File loadDefaultConfigDir() {
|
||||||
|
File cfgDir;
|
||||||
|
File cfgDir_linux = new File("/etc/taos");
|
||||||
|
cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null;
|
||||||
|
File cfgDir_windows = new File("C:\\TDengine\\cfg");
|
||||||
|
cfgDir = (cfgDir == null && cfgDir_windows.exists()) ? cfgDir_windows : cfgDir;
|
||||||
|
return cfgDir;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected List<String> loadConfigEndpoints(File cfgFile) {
|
||||||
|
List<String> endpoints = new ArrayList<>();
|
||||||
|
try (BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) {
|
||||||
|
String line = null;
|
||||||
|
while ((line = reader.readLine()) != null) {
|
||||||
|
if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")) {
|
||||||
|
endpoints.add(line.substring(line.indexOf('p') + 1).trim());
|
||||||
|
}
|
||||||
|
if (endpoints.size() > 1)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} catch (FileNotFoundException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
} catch (IOException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
return endpoints;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected void loadTaosConfig(Properties info) {
|
||||||
|
if ((info.getProperty(TSDBDriver.PROPERTY_KEY_HOST) == null ||
|
||||||
|
info.getProperty(TSDBDriver.PROPERTY_KEY_HOST).isEmpty()) && (
|
||||||
|
info.getProperty(TSDBDriver.PROPERTY_KEY_PORT) == null ||
|
||||||
|
info.getProperty(TSDBDriver.PROPERTY_KEY_PORT).isEmpty())) {
|
||||||
|
File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR));
|
||||||
|
File cfgFile = cfgDir.listFiles((dir, name) -> TAOS_CFG_FILENAME.equalsIgnoreCase(name))[0];
|
||||||
|
List<String> endpoints = loadConfigEndpoints(cfgFile);
|
||||||
|
if (!endpoints.isEmpty()) {
|
||||||
|
info.setProperty(TSDBDriver.PROPERTY_KEY_HOST, endpoints.get(0).split(":")[0]);
|
||||||
|
info.setProperty(TSDBDriver.PROPERTY_KEY_PORT, endpoints.get(0).split(":")[1]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
protected DriverPropertyInfo[] getPropertyInfo(Properties info) {
|
||||||
|
DriverPropertyInfo hostProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_HOST, info.getProperty(TSDBDriver.PROPERTY_KEY_HOST));
|
||||||
|
hostProp.required = false;
|
||||||
|
hostProp.description = "Hostname";
|
||||||
|
|
||||||
|
DriverPropertyInfo portProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_PORT, info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT));
|
||||||
|
portProp.required = false;
|
||||||
|
portProp.description = "Port";
|
||||||
|
|
||||||
|
DriverPropertyInfo dbProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_DBNAME, info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME));
|
||||||
|
dbProp.required = false;
|
||||||
|
dbProp.description = "Database name";
|
||||||
|
|
||||||
|
DriverPropertyInfo userProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_USER, info.getProperty(TSDBDriver.PROPERTY_KEY_USER));
|
||||||
|
userProp.required = true;
|
||||||
|
userProp.description = "User";
|
||||||
|
|
||||||
|
DriverPropertyInfo passwordProp = new DriverPropertyInfo(TSDBDriver.PROPERTY_KEY_PASSWORD, info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
|
||||||
|
passwordProp.required = true;
|
||||||
|
passwordProp.description = "Password";
|
||||||
|
|
||||||
|
DriverPropertyInfo[] propertyInfo = new DriverPropertyInfo[5];
|
||||||
|
propertyInfo[0] = hostProp;
|
||||||
|
propertyInfo[1] = portProp;
|
||||||
|
propertyInfo[2] = dbProp;
|
||||||
|
propertyInfo[3] = userProp;
|
||||||
|
propertyInfo[4] = passwordProp;
|
||||||
|
return propertyInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected Properties parseURL(String url, Properties defaults) {
|
||||||
|
Properties urlProps = (defaults != null) ? defaults : new Properties();
|
||||||
|
|
||||||
|
// parse properties
|
||||||
|
int beginningOfSlashes = url.indexOf("//");
|
||||||
|
int index = url.indexOf("?");
|
||||||
|
if (index != -1) {
|
||||||
|
String paramString = url.substring(index + 1, url.length());
|
||||||
|
url = url.substring(0, index);
|
||||||
|
StringTokenizer queryParams = new StringTokenizer(paramString, "&");
|
||||||
|
while (queryParams.hasMoreElements()) {
|
||||||
|
String parameterValuePair = queryParams.nextToken();
|
||||||
|
int indexOfEqual = parameterValuePair.indexOf("=");
|
||||||
|
String parameter = null;
|
||||||
|
String value = null;
|
||||||
|
if (indexOfEqual != -1) {
|
||||||
|
parameter = parameterValuePair.substring(0, indexOfEqual);
|
||||||
|
if (indexOfEqual + 1 < parameterValuePair.length()) {
|
||||||
|
value = parameterValuePair.substring(indexOfEqual + 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ((value != null && value.length() > 0) && (parameter != null && parameter.length() > 0)) {
|
||||||
|
urlProps.setProperty(parameter, value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse Product Name
|
||||||
|
String dbProductName = url.substring(0, beginningOfSlashes);
|
||||||
|
dbProductName = dbProductName.substring(dbProductName.indexOf(":") + 1);
|
||||||
|
dbProductName = dbProductName.substring(0, dbProductName.indexOf(":"));
|
||||||
|
// parse dbname
|
||||||
|
url = url.substring(beginningOfSlashes + 2);
|
||||||
|
int indexOfSlash = url.indexOf("/");
|
||||||
|
if (indexOfSlash != -1) {
|
||||||
|
if (indexOfSlash + 1 < url.length()) {
|
||||||
|
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_DBNAME, url.substring(indexOfSlash + 1));
|
||||||
|
}
|
||||||
|
url = url.substring(0, indexOfSlash);
|
||||||
|
}
|
||||||
|
// parse port
|
||||||
|
int indexOfColon = url.indexOf(":");
|
||||||
|
if (indexOfColon != -1) {
|
||||||
|
if (indexOfColon + 1 < url.length()) {
|
||||||
|
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_PORT, url.substring(indexOfColon + 1));
|
||||||
|
}
|
||||||
|
url = url.substring(0, indexOfColon);
|
||||||
|
}
|
||||||
|
// parse host
|
||||||
|
if (url != null && url.length() > 0 && url.trim().length() > 0) {
|
||||||
|
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_HOST, url);
|
||||||
|
}
|
||||||
|
return urlProps;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -16,10 +16,10 @@ package com.taosdata.jdbc;
|
||||||
|
|
||||||
public class ColumnMetaData {
|
public class ColumnMetaData {
|
||||||
|
|
||||||
int colType = 0;
|
private int colType = 0;
|
||||||
String colName = null;
|
private String colName = null;
|
||||||
int colSize = -1;
|
private int colSize = -1;
|
||||||
int colIndex = 0;
|
private int colIndex = 0;
|
||||||
|
|
||||||
public int getColSize() {
|
public int getColSize() {
|
||||||
return colSize;
|
return colSize;
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
*****************************************************************************/
|
*****************************************************************************/
|
||||||
package com.taosdata.jdbc;
|
package com.taosdata.jdbc;
|
||||||
|
|
||||||
import java.io.*;
|
|
||||||
import java.sql.Array;
|
import java.sql.Array;
|
||||||
import java.sql.Blob;
|
import java.sql.Blob;
|
||||||
import java.sql.CallableStatement;
|
import java.sql.CallableStatement;
|
||||||
|
@ -35,11 +34,10 @@ import java.util.*;
|
||||||
import java.util.concurrent.Executor;
|
import java.util.concurrent.Executor;
|
||||||
|
|
||||||
public class TSDBConnection implements Connection {
|
public class TSDBConnection implements Connection {
|
||||||
|
protected Properties props = null;
|
||||||
|
|
||||||
private TSDBJNIConnector connector = null;
|
private TSDBJNIConnector connector = null;
|
||||||
|
|
||||||
protected Properties props = null;
|
|
||||||
|
|
||||||
private String catalog = null;
|
private String catalog = null;
|
||||||
|
|
||||||
private TSDBDatabaseMetaData dbMetaData = null;
|
private TSDBDatabaseMetaData dbMetaData = null;
|
||||||
|
@ -47,15 +45,21 @@ public class TSDBConnection implements Connection {
|
||||||
private Properties clientInfoProps = new Properties();
|
private Properties clientInfoProps = new Properties();
|
||||||
|
|
||||||
private int timeoutMilliseconds = 0;
|
private int timeoutMilliseconds = 0;
|
||||||
|
|
||||||
private String tsCharSet = "";
|
private boolean batchFetch = false;
|
||||||
|
|
||||||
public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException {
|
public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException {
|
||||||
this.dbMetaData = meta;
|
this.dbMetaData = meta;
|
||||||
connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST),
|
connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST),
|
||||||
Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")),
|
Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")),
|
||||||
info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), info.getProperty(TSDBDriver.PROPERTY_KEY_USER),
|
info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME),
|
||||||
|
info.getProperty(TSDBDriver.PROPERTY_KEY_USER),
|
||||||
info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
|
info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
|
||||||
|
|
||||||
|
String batchLoad = info.getProperty(TSDBDriver.PROPERTY_KEY_BATCH_LOAD);
|
||||||
|
if (batchLoad != null) {
|
||||||
|
this.batchFetch = Boolean.parseBoolean(batchLoad);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void connect(String host, int port, String dbName, String user, String password) throws SQLException {
|
private void connect(String host, int port, String dbName, String user, String password) throws SQLException {
|
||||||
|
@ -197,7 +201,8 @@ public class TSDBConnection implements Connection {
|
||||||
|
|
||||||
public SQLWarning getWarnings() throws SQLException {
|
public SQLWarning getWarnings() throws SQLException {
|
||||||
//todo: implement getWarnings according to the warning messages returned from TDengine
|
//todo: implement getWarnings according to the warning messages returned from TDengine
|
||||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
return null;
|
||||||
|
// throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void clearWarnings() throws SQLException {
|
public void clearWarnings() throws SQLException {
|
||||||
|
@ -222,6 +227,14 @@ public class TSDBConnection implements Connection {
|
||||||
|
|
||||||
return this.prepareStatement(sql);
|
return this.prepareStatement(sql);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Boolean getBatchFetch() {
|
||||||
|
return this.batchFetch;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setBatchFetch(Boolean batchFetch) {
|
||||||
|
this.batchFetch = batchFetch;
|
||||||
|
}
|
||||||
|
|
||||||
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
|
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
|
||||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
*****************************************************************************/
|
*****************************************************************************/
|
||||||
package com.taosdata.jdbc;
|
package com.taosdata.jdbc;
|
||||||
|
|
||||||
import java.io.*;
|
|
||||||
import java.sql.*;
|
import java.sql.*;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
import java.util.logging.Logger;
|
import java.util.logging.Logger;
|
||||||
|
@ -38,7 +37,7 @@ import java.util.logging.Logger;
|
||||||
* register it with the DriverManager. This means that a user can load and
|
* register it with the DriverManager. This means that a user can load and
|
||||||
* register a driver by doing Class.forName("foo.bah.Driver")
|
* register a driver by doing Class.forName("foo.bah.Driver")
|
||||||
*/
|
*/
|
||||||
public class TSDBDriver implements java.sql.Driver {
|
public class TSDBDriver extends AbstractTaosDriver {
|
||||||
|
|
||||||
@Deprecated
|
@Deprecated
|
||||||
private static final String URL_PREFIX1 = "jdbc:TSDB://";
|
private static final String URL_PREFIX1 = "jdbc:TSDB://";
|
||||||
|
@ -87,6 +86,11 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
*/
|
*/
|
||||||
public static final String PROPERTY_KEY_CHARSET = "charset";
|
public static final String PROPERTY_KEY_CHARSET = "charset";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* fetch data from native function in a batch model
|
||||||
|
*/
|
||||||
|
public static final String PROPERTY_KEY_BATCH_LOAD = "batch";
|
||||||
|
|
||||||
private TSDBDatabaseMetaData dbMetaData = null;
|
private TSDBDatabaseMetaData dbMetaData = null;
|
||||||
|
|
||||||
static {
|
static {
|
||||||
|
@ -97,50 +101,6 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<String> loadConfigEndpoints(File cfgFile) {
|
|
||||||
List<String> endpoints = new ArrayList<>();
|
|
||||||
try (BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) {
|
|
||||||
String line = null;
|
|
||||||
while ((line = reader.readLine()) != null) {
|
|
||||||
if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")) {
|
|
||||||
endpoints.add(line.substring(line.indexOf('p') + 1).trim());
|
|
||||||
}
|
|
||||||
if (endpoints.size() > 1)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} catch (FileNotFoundException e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
} catch (IOException e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
return endpoints;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param cfgDirPath
|
|
||||||
* @return return the config dir
|
|
||||||
**/
|
|
||||||
private File loadConfigDir(String cfgDirPath) {
|
|
||||||
if (cfgDirPath == null)
|
|
||||||
return loadDefaultConfigDir();
|
|
||||||
File cfgDir = new File(cfgDirPath);
|
|
||||||
if (!cfgDir.exists())
|
|
||||||
return loadDefaultConfigDir();
|
|
||||||
return cfgDir;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return search the default config dir, if the config dir is not exist will return null
|
|
||||||
*/
|
|
||||||
private File loadDefaultConfigDir() {
|
|
||||||
File cfgDir;
|
|
||||||
File cfgDir_linux = new File("/etc/taos");
|
|
||||||
cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null;
|
|
||||||
File cfgDir_windows = new File("C:\\TDengine\\cfg");
|
|
||||||
cfgDir = (cfgDir == null && cfgDir_windows.exists()) ? cfgDir_windows : cfgDir;
|
|
||||||
return cfgDir;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Connection connect(String url, Properties info) throws SQLException {
|
public Connection connect(String url, Properties info) throws SQLException {
|
||||||
if (url == null)
|
if (url == null)
|
||||||
throw new SQLException(TSDBConstants.WrapErrMsg("url is not set!"));
|
throw new SQLException(TSDBConstants.WrapErrMsg("url is not set!"));
|
||||||
|
@ -152,26 +112,12 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
if ((props = parseURL(url, info)) == null) {
|
if ((props = parseURL(url, info)) == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
//load taos.cfg start
|
//load taos.cfg start
|
||||||
if ((info.getProperty(TSDBDriver.PROPERTY_KEY_HOST) == null ||
|
loadTaosConfig(info);
|
||||||
info.getProperty(TSDBDriver.PROPERTY_KEY_HOST).isEmpty()) && (
|
|
||||||
info.getProperty(TSDBDriver.PROPERTY_KEY_PORT) == null ||
|
|
||||||
info.getProperty(TSDBDriver.PROPERTY_KEY_PORT).isEmpty())) {
|
|
||||||
File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR));
|
|
||||||
File cfgFile = cfgDir.listFiles((dir, name) -> "taos.cfg".equalsIgnoreCase(name))[0];
|
|
||||||
List<String> endpoints = loadConfigEndpoints(cfgFile);
|
|
||||||
if (!endpoints.isEmpty()) {
|
|
||||||
info.setProperty(TSDBDriver.PROPERTY_KEY_HOST, endpoints.get(0).split(":")[0]);
|
|
||||||
info.setProperty(TSDBDriver.PROPERTY_KEY_PORT, endpoints.get(0).split(":")[1]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR),
|
TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE),
|
||||||
(String) props.get(PROPERTY_KEY_LOCALE),
|
(String) props.get(PROPERTY_KEY_CHARSET), (String) props.get(PROPERTY_KEY_TIME_ZONE));
|
||||||
(String) props.get(PROPERTY_KEY_CHARSET),
|
|
||||||
(String) props.get(PROPERTY_KEY_TIME_ZONE));
|
|
||||||
Connection newConn = new TSDBConnection(props, this.dbMetaData);
|
Connection newConn = new TSDBConnection(props, this.dbMetaData);
|
||||||
return newConn;
|
return newConn;
|
||||||
} catch (SQLWarning sqlWarning) {
|
} catch (SQLWarning sqlWarning) {
|
||||||
|
@ -208,39 +154,13 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
info = parseURL(url, info);
|
info = parseURL(url, info);
|
||||||
}
|
}
|
||||||
|
|
||||||
DriverPropertyInfo hostProp = new DriverPropertyInfo(PROPERTY_KEY_HOST, info.getProperty(PROPERTY_KEY_HOST));
|
return getPropertyInfo(info);
|
||||||
hostProp.required = false;
|
|
||||||
hostProp.description = "Hostname";
|
|
||||||
|
|
||||||
DriverPropertyInfo portProp = new DriverPropertyInfo(PROPERTY_KEY_PORT, info.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT));
|
|
||||||
portProp.required = false;
|
|
||||||
portProp.description = "Port";
|
|
||||||
|
|
||||||
DriverPropertyInfo dbProp = new DriverPropertyInfo(PROPERTY_KEY_DBNAME, info.getProperty(PROPERTY_KEY_DBNAME));
|
|
||||||
dbProp.required = false;
|
|
||||||
dbProp.description = "Database name";
|
|
||||||
|
|
||||||
DriverPropertyInfo userProp = new DriverPropertyInfo(PROPERTY_KEY_USER, info.getProperty(PROPERTY_KEY_USER));
|
|
||||||
userProp.required = true;
|
|
||||||
userProp.description = "User";
|
|
||||||
|
|
||||||
DriverPropertyInfo passwordProp = new DriverPropertyInfo(PROPERTY_KEY_PASSWORD, info.getProperty(PROPERTY_KEY_PASSWORD));
|
|
||||||
passwordProp.required = true;
|
|
||||||
passwordProp.description = "Password";
|
|
||||||
|
|
||||||
DriverPropertyInfo[] propertyInfo = new DriverPropertyInfo[5];
|
|
||||||
propertyInfo[0] = hostProp;
|
|
||||||
propertyInfo[1] = portProp;
|
|
||||||
propertyInfo[2] = dbProp;
|
|
||||||
propertyInfo[3] = userProp;
|
|
||||||
propertyInfo[4] = passwordProp;
|
|
||||||
|
|
||||||
return propertyInfo;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* example: jdbc:TAOS://127.0.0.1:0/db?user=root&password=your_password
|
* example: jdbc:TAOS://127.0.0.1:0/db?user=root&password=your_password
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public Properties parseURL(String url, Properties defaults) {
|
public Properties parseURL(String url, Properties defaults) {
|
||||||
Properties urlProps = (defaults != null) ? defaults : new Properties();
|
Properties urlProps = (defaults != null) ? defaults : new Properties();
|
||||||
if (url == null || url.length() <= 0 || url.trim().length() <= 0)
|
if (url == null || url.length() <= 0 || url.trim().length() <= 0)
|
||||||
|
@ -257,26 +177,21 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
url = url.substring(0, index);
|
url = url.substring(0, index);
|
||||||
StringTokenizer queryParams = new StringTokenizer(paramString, "&");
|
StringTokenizer queryParams = new StringTokenizer(paramString, "&");
|
||||||
while (queryParams.hasMoreElements()) {
|
while (queryParams.hasMoreElements()) {
|
||||||
String parameterValuePair = queryParams.nextToken();
|
String oneToken = queryParams.nextToken();
|
||||||
int indexOfEqual = parameterValuePair.indexOf("=");
|
String[] pair = oneToken.split("=");
|
||||||
String parameter = null;
|
|
||||||
String value = null;
|
if ((pair[0] != null && pair[0].trim().length() > 0) && (pair[1] != null && pair[1].trim().length() > 0)) {
|
||||||
if (indexOfEqual != -1) {
|
urlProps.setProperty(pair[0].trim(), pair[1].trim());
|
||||||
parameter = parameterValuePair.substring(0, indexOfEqual);
|
|
||||||
if (indexOfEqual + 1 < parameterValuePair.length()) {
|
|
||||||
value = parameterValuePair.substring(indexOfEqual + 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ((value != null && value.length() > 0) && (parameter != null && parameter.length() > 0)) {
|
|
||||||
urlProps.setProperty(parameter, value);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// parse Product Name
|
// parse Product Name
|
||||||
String dbProductName = url.substring(0, beginningOfSlashes);
|
String dbProductName = url.substring(0, beginningOfSlashes);
|
||||||
dbProductName = dbProductName.substring(dbProductName.indexOf(":") + 1);
|
dbProductName = dbProductName.substring(dbProductName.indexOf(":") + 1);
|
||||||
dbProductName = dbProductName.substring(0, dbProductName.indexOf(":"));
|
dbProductName = dbProductName.substring(0, dbProductName.indexOf(":"));
|
||||||
// parse dbname
|
|
||||||
|
// parse database name
|
||||||
url = url.substring(beginningOfSlashes + 2);
|
url = url.substring(beginningOfSlashes + 2);
|
||||||
int indexOfSlash = url.indexOf("/");
|
int indexOfSlash = url.indexOf("/");
|
||||||
if (indexOfSlash != -1) {
|
if (indexOfSlash != -1) {
|
||||||
|
@ -285,6 +200,7 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
}
|
}
|
||||||
url = url.substring(0, indexOfSlash);
|
url = url.substring(0, indexOfSlash);
|
||||||
}
|
}
|
||||||
|
|
||||||
// parse port
|
// parse port
|
||||||
int indexOfColon = url.indexOf(":");
|
int indexOfColon = url.indexOf(":");
|
||||||
if (indexOfColon != -1) {
|
if (indexOfColon != -1) {
|
||||||
|
@ -293,89 +209,15 @@ public class TSDBDriver implements java.sql.Driver {
|
||||||
}
|
}
|
||||||
url = url.substring(0, indexOfColon);
|
url = url.substring(0, indexOfColon);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (url != null && url.length() > 0 && url.trim().length() > 0) {
|
if (url != null && url.length() > 0 && url.trim().length() > 0) {
|
||||||
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_HOST, url);
|
urlProps.setProperty(TSDBDriver.PROPERTY_KEY_HOST, url);
|
||||||
}
|
}
|
||||||
|
|
||||||
this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, urlProps.getProperty(TSDBDriver.PROPERTY_KEY_USER));
|
this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, urlProps.getProperty(TSDBDriver.PROPERTY_KEY_USER));
|
||||||
|
|
||||||
/*
|
|
||||||
String urlForMeta = url;
|
|
||||||
String dbProductName = url.substring(url.indexOf(":") + 1);
|
|
||||||
dbProductName = dbProductName.substring(0, dbProductName.indexOf(":"));
|
|
||||||
int beginningOfSlashes = url.indexOf("//");
|
|
||||||
url = url.substring(beginningOfSlashes + 2);
|
|
||||||
|
|
||||||
String host = url.substring(0, url.indexOf(":"));
|
|
||||||
url = url.substring(url.indexOf(":") + 1);
|
|
||||||
urlProps.setProperty(PROPERTY_KEY_HOST, host);
|
|
||||||
|
|
||||||
String port = url.substring(0, url.indexOf("/"));
|
|
||||||
urlProps.setProperty(PROPERTY_KEY_PORT, port);
|
|
||||||
url = url.substring(url.indexOf("/") + 1);
|
|
||||||
|
|
||||||
if (url.indexOf("?") != -1) {
|
|
||||||
String dbName = url.substring(0, url.indexOf("?"));
|
|
||||||
urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName);
|
|
||||||
url = url.trim().substring(url.indexOf("?") + 1);
|
|
||||||
} else {
|
|
||||||
// without user & password so return
|
|
||||||
if (!url.trim().isEmpty()) {
|
|
||||||
String dbName = url.trim();
|
|
||||||
urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName);
|
|
||||||
}
|
|
||||||
this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, urlProps.getProperty("user"));
|
|
||||||
return urlProps;
|
|
||||||
}
|
|
||||||
|
|
||||||
String user = "";
|
|
||||||
|
|
||||||
if (url.indexOf("&") == -1) {
|
|
||||||
String[] kvPair = url.trim().split("=");
|
|
||||||
if (kvPair.length == 2) {
|
|
||||||
setPropertyValue(urlProps, kvPair);
|
|
||||||
return urlProps;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
String[] queryStrings = url.trim().split("&");
|
|
||||||
for (String queryStr : queryStrings) {
|
|
||||||
String[] kvPair = queryStr.trim().split("=");
|
|
||||||
if (kvPair.length < 2) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
setPropertyValue(urlProps, kvPair);
|
|
||||||
}
|
|
||||||
|
|
||||||
user = urlProps.getProperty(PROPERTY_KEY_USER).toString();
|
|
||||||
this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, user);
|
|
||||||
*/
|
|
||||||
return urlProps;
|
return urlProps;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void setPropertyValue(Properties property, String[] keyValuePair) {
|
|
||||||
switch (keyValuePair[0].toLowerCase()) {
|
|
||||||
case PROPERTY_KEY_USER:
|
|
||||||
property.setProperty(PROPERTY_KEY_USER, keyValuePair[1]);
|
|
||||||
break;
|
|
||||||
case PROPERTY_KEY_PASSWORD:
|
|
||||||
property.setProperty(PROPERTY_KEY_PASSWORD, keyValuePair[1]);
|
|
||||||
break;
|
|
||||||
case PROPERTY_KEY_TIME_ZONE:
|
|
||||||
property.setProperty(PROPERTY_KEY_TIME_ZONE, keyValuePair[1]);
|
|
||||||
break;
|
|
||||||
case PROPERTY_KEY_LOCALE:
|
|
||||||
property.setProperty(PROPERTY_KEY_LOCALE, keyValuePair[1]);
|
|
||||||
break;
|
|
||||||
case PROPERTY_KEY_CHARSET:
|
|
||||||
property.setProperty(PROPERTY_KEY_CHARSET, keyValuePair[1]);
|
|
||||||
break;
|
|
||||||
case PROPERTY_KEY_CONFIG_DIR:
|
|
||||||
property.setProperty(PROPERTY_KEY_CONFIG_DIR, keyValuePair[1]);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getMajorVersion() {
|
public int getMajorVersion() {
|
||||||
return 2;
|
return 2;
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,7 +49,7 @@ public class TSDBResultSet implements ResultSet {
|
||||||
private TSDBResultSetRowData rowData;
|
private TSDBResultSetRowData rowData;
|
||||||
private TSDBResultSetBlockData blockData;
|
private TSDBResultSetBlockData blockData;
|
||||||
|
|
||||||
private boolean blockwiseFetch = false;
|
private boolean batchFetch = false;
|
||||||
private boolean lastWasNull = false;
|
private boolean lastWasNull = false;
|
||||||
private final int COLUMN_INDEX_START_VALUE = 1;
|
private final int COLUMN_INDEX_START_VALUE = 1;
|
||||||
|
|
||||||
|
@ -71,8 +71,12 @@ public class TSDBResultSet implements ResultSet {
|
||||||
this.resultSetPointer = resultSetPointer;
|
this.resultSetPointer = resultSetPointer;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setBlockWiseFetch(boolean fetchBlock) {
|
public void setBatchFetch(boolean batchFetch) {
|
||||||
this.blockwiseFetch = fetchBlock;
|
this.batchFetch = batchFetch;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Boolean getBatchFetch() {
|
||||||
|
return this.batchFetch;
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<ColumnMetaData> getColumnMetaDataList() {
|
public List<ColumnMetaData> getColumnMetaDataList() {
|
||||||
|
@ -102,8 +106,8 @@ public class TSDBResultSet implements ResultSet {
|
||||||
public TSDBResultSet() {
|
public TSDBResultSet() {
|
||||||
}
|
}
|
||||||
|
|
||||||
public TSDBResultSet(TSDBJNIConnector connecter, long resultSetPointer) throws SQLException {
|
public TSDBResultSet(TSDBJNIConnector connector, long resultSetPointer) throws SQLException {
|
||||||
this.jniConnector = connecter;
|
this.jniConnector = connector;
|
||||||
this.resultSetPointer = resultSetPointer;
|
this.resultSetPointer = resultSetPointer;
|
||||||
int code = this.jniConnector.getSchemaMetaData(this.resultSetPointer, this.columnMetaDataList);
|
int code = this.jniConnector.getSchemaMetaData(this.resultSetPointer, this.columnMetaDataList);
|
||||||
if (code == TSDBConstants.JNI_CONNECTION_NULL) {
|
if (code == TSDBConstants.JNI_CONNECTION_NULL) {
|
||||||
|
@ -127,13 +131,13 @@ public class TSDBResultSet implements ResultSet {
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean next() throws SQLException {
|
public boolean next() throws SQLException {
|
||||||
if (this.blockwiseFetch) {
|
if (this.getBatchFetch()) {
|
||||||
if (this.blockData.forward()) {
|
if (this.blockData.forward()) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
int code = this.jniConnector.fetchBlock(this.resultSetPointer, this.blockData);
|
int code = this.jniConnector.fetchBlock(this.resultSetPointer, this.blockData);
|
||||||
this.blockData.resetCursor();
|
this.blockData.reset();
|
||||||
|
|
||||||
if (code == TSDBConstants.JNI_CONNECTION_NULL) {
|
if (code == TSDBConstants.JNI_CONNECTION_NULL) {
|
||||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||||
|
@ -185,7 +189,7 @@ public class TSDBResultSet implements ResultSet {
|
||||||
String res = null;
|
String res = null;
|
||||||
int colIndex = getTrueColumnIndex(columnIndex);
|
int colIndex = getTrueColumnIndex(columnIndex);
|
||||||
|
|
||||||
if (!this.blockwiseFetch) {
|
if (!this.getBatchFetch()) {
|
||||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||||
if (!lastWasNull) {
|
if (!lastWasNull) {
|
||||||
res = this.rowData.getString(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
res = this.rowData.getString(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||||
|
@ -200,7 +204,7 @@ public class TSDBResultSet implements ResultSet {
|
||||||
boolean res = false;
|
boolean res = false;
|
||||||
int colIndex = getTrueColumnIndex(columnIndex);
|
int colIndex = getTrueColumnIndex(columnIndex);
|
||||||
|
|
||||||
if (!this.blockwiseFetch) {
|
if (!this.getBatchFetch()) {
|
||||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||||
if (!lastWasNull) {
|
if (!lastWasNull) {
|
||||||
res = this.rowData.getBoolean(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
res = this.rowData.getBoolean(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||||
|
@ -216,7 +220,7 @@ public class TSDBResultSet implements ResultSet {
|
||||||
byte res = 0;
|
byte res = 0;
|
||||||
int colIndex = getTrueColumnIndex(columnIndex);
|
int colIndex = getTrueColumnIndex(columnIndex);
|
||||||
|
|
||||||
if (!this.blockwiseFetch) {
|
if (!this.getBatchFetch()) {
|
||||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||||
if (!lastWasNull) {
|
if (!lastWasNull) {
|
||||||
res = (byte) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
res = (byte) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||||
|
@ -231,7 +235,7 @@ public class TSDBResultSet implements ResultSet {
|
||||||
short res = 0;
|
short res = 0;
|
||||||
int colIndex = getTrueColumnIndex(columnIndex);
|
int colIndex = getTrueColumnIndex(columnIndex);
|
||||||
|
|
||||||
if (!this.blockwiseFetch) {
|
if (!this.getBatchFetch()) {
|
||||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||||
if (!lastWasNull) {
|
if (!lastWasNull) {
|
||||||
res = (short) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
res = (short) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||||
|
@ -246,7 +250,7 @@ public class TSDBResultSet implements ResultSet {
|
||||||
int res = 0;
|
int res = 0;
|
||||||
int colIndex = getTrueColumnIndex(columnIndex);
|
int colIndex = getTrueColumnIndex(columnIndex);
|
||||||
|
|
||||||
if (!this.blockwiseFetch) {
|
if (!this.getBatchFetch()) {
|
||||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||||
if (!lastWasNull) {
|
if (!lastWasNull) {
|
||||||
res = this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
res = this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||||
|
@ -262,7 +266,7 @@ public class TSDBResultSet implements ResultSet {
|
||||||
long res = 0l;
|
long res = 0l;
|
||||||
int colIndex = getTrueColumnIndex(columnIndex);
|
int colIndex = getTrueColumnIndex(columnIndex);
|
||||||
|
|
||||||
if (!this.blockwiseFetch) {
|
if (!this.getBatchFetch()) {
|
||||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||||
if (!lastWasNull) {
|
if (!lastWasNull) {
|
||||||
res = this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
res = this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||||
|
@ -277,7 +281,7 @@ public class TSDBResultSet implements ResultSet {
|
||||||
float res = 0;
|
float res = 0;
|
||||||
int colIndex = getTrueColumnIndex(columnIndex);
|
int colIndex = getTrueColumnIndex(columnIndex);
|
||||||
|
|
||||||
if (!this.blockwiseFetch) {
|
if (!this.getBatchFetch()) {
|
||||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||||
if (!lastWasNull) {
|
if (!lastWasNull) {
|
||||||
res = this.rowData.getFloat(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
res = this.rowData.getFloat(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||||
|
@ -292,7 +296,7 @@ public class TSDBResultSet implements ResultSet {
|
||||||
double res = 0;
|
double res = 0;
|
||||||
int colIndex = getTrueColumnIndex(columnIndex);
|
int colIndex = getTrueColumnIndex(columnIndex);
|
||||||
|
|
||||||
if (!this.blockwiseFetch) {
|
if (!this.getBatchFetch()) {
|
||||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||||
if (!lastWasNull) {
|
if (!lastWasNull) {
|
||||||
res = this.rowData.getDouble(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
res = this.rowData.getDouble(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||||
|
@ -334,7 +338,7 @@ public class TSDBResultSet implements ResultSet {
|
||||||
Timestamp res = null;
|
Timestamp res = null;
|
||||||
int colIndex = getTrueColumnIndex(columnIndex);
|
int colIndex = getTrueColumnIndex(columnIndex);
|
||||||
|
|
||||||
if (!this.blockwiseFetch) {
|
if (!this.getBatchFetch()) {
|
||||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||||
if (!lastWasNull) {
|
if (!lastWasNull) {
|
||||||
res = this.rowData.getTimestamp(colIndex);
|
res = this.rowData.getTimestamp(colIndex);
|
||||||
|
@ -454,7 +458,7 @@ public class TSDBResultSet implements ResultSet {
|
||||||
public Object getObject(int columnIndex) throws SQLException {
|
public Object getObject(int columnIndex) throws SQLException {
|
||||||
int colIndex = getTrueColumnIndex(columnIndex);
|
int colIndex = getTrueColumnIndex(columnIndex);
|
||||||
|
|
||||||
if (!this.blockwiseFetch) {
|
if (!this.getBatchFetch()) {
|
||||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||||
return this.rowData.get(colIndex);
|
return this.rowData.get(colIndex);
|
||||||
} else {
|
} else {
|
||||||
|
@ -491,7 +495,7 @@ public class TSDBResultSet implements ResultSet {
|
||||||
public BigDecimal getBigDecimal(int columnIndex) throws SQLException {
|
public BigDecimal getBigDecimal(int columnIndex) throws SQLException {
|
||||||
int colIndex = getTrueColumnIndex(columnIndex);
|
int colIndex = getTrueColumnIndex(columnIndex);
|
||||||
|
|
||||||
if (!this.blockwiseFetch) {
|
if (!this.getBatchFetch()) {
|
||||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||||
return new BigDecimal(this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType()));
|
return new BigDecimal(this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType()));
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -56,13 +56,6 @@ public class TSDBResultSetBlockData {
|
||||||
if (this.numOfCols == 0) {
|
if (this.numOfCols == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
this.colData = new ArrayList<Object>(numOfCols);
|
|
||||||
this.colData.addAll(Collections.nCopies(this.numOfCols, null));
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean wasNull(int col) {
|
|
||||||
return colData.get(col) == null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getNumOfRows() {
|
public int getNumOfRows() {
|
||||||
|
@ -82,20 +75,19 @@ public class TSDBResultSetBlockData {
|
||||||
this.clear();
|
this.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setColumnData(int col, byte val) {
|
|
||||||
this.colData.set(col, val);
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean hasMore() {
|
public boolean hasMore() {
|
||||||
return this.rowIndex < this.numOfRows;
|
return this.rowIndex < this.numOfRows;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean forward() {
|
public boolean forward() {
|
||||||
this.rowIndex++;
|
if (this.rowIndex > this.numOfRows) {
|
||||||
return (this.rowIndex < this.numOfRows);
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ((++this.rowIndex) < this.numOfRows);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void resetCursor() {
|
public void reset() {
|
||||||
this.rowIndex = 0;
|
this.rowIndex = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -172,10 +164,58 @@ public class TSDBResultSetBlockData {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class NullType {
|
private static class NullType {
|
||||||
|
private static final byte NULL_BOOL_VAL = 0x2;
|
||||||
|
private static final String NULL_STR = "null";
|
||||||
|
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return new String("null");
|
return NullType.NULL_STR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static boolean isBooleanNull(byte val) {
|
||||||
|
return val == NullType.NULL_BOOL_VAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isTinyIntNull(byte val) {
|
||||||
|
return val == Byte.MIN_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isSmallIntNull(short val) {
|
||||||
|
return val == Short.MIN_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isIntNull(int val) {
|
||||||
|
return val == Integer.MIN_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isBigIntNull(long val) {
|
||||||
|
return val == Long.MIN_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isFloatNull(float val) {
|
||||||
|
return Float.isNaN(val);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isDoubleNull(double val) {
|
||||||
|
return Double.isNaN(val);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isBinaryNull(byte[] val, int length) {
|
||||||
|
if (length != Byte.BYTES) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return val[0] == 0xFF;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isNcharNull(byte[] val, int length) {
|
||||||
|
if (length != Integer.BYTES) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (val[0] & val[1] & val[2] & val[3]) == 0xFF;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -195,50 +235,6 @@ public class TSDBResultSetBlockData {
|
||||||
return obj.toString();
|
return obj.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean isBooleanNull(byte val) {
|
|
||||||
return val == 0x2;
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean isTinyIntNull(byte val) {
|
|
||||||
return val == 0x80;
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean isSmallIntNull(short val) {
|
|
||||||
return val == 0x8000;
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean isIntNull(int val) {
|
|
||||||
return val == 0x80000000L;
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean isBigIntNull(long val) {
|
|
||||||
return val == 0x8000000000000000L;
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean isFloatNull(float val) {
|
|
||||||
return Float.isNaN(val);
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean isDoubleNull(double val) {
|
|
||||||
return Double.isNaN(val);
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean isBinaryNull(byte[] val, int length) {
|
|
||||||
if (length != 1) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return val[0] == 0xFF;
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean isNcharNull(byte[] val, int length) {
|
|
||||||
if (length != 4) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (val[0] & val[1] & val[2] & val[3]) == 0xFF ;
|
|
||||||
}
|
|
||||||
|
|
||||||
public int getInt(int col) {
|
public int getInt(int col) {
|
||||||
Object obj = get(col);
|
Object obj = get(col);
|
||||||
if (obj == null) {
|
if (obj == null) {
|
||||||
|
@ -284,16 +280,16 @@ public class TSDBResultSetBlockData {
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
|
case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
|
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_INT: {
|
case TSDBConstants.TSDB_DATA_TYPE_INT: {
|
||||||
return ((int) obj == 0L)? Boolean.FALSE:Boolean.TRUE;
|
return ((int) obj == 0L) ? Boolean.FALSE : Boolean.TRUE;
|
||||||
}
|
}
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
|
case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: {
|
case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: {
|
||||||
return (((Long) obj) == 0L)? Boolean.FALSE:Boolean.TRUE;
|
return (((Long) obj) == 0L) ? Boolean.FALSE : Boolean.TRUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
|
case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
|
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
|
||||||
return (((Double) obj) == 0)? Boolean.FALSE:Boolean.TRUE;
|
return (((Double) obj) == 0) ? Boolean.FALSE : Boolean.TRUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
|
case TSDBConstants.TSDB_DATA_TYPE_NCHAR:
|
||||||
|
@ -395,7 +391,7 @@ public class TSDBResultSetBlockData {
|
||||||
ByteBuffer bb = (ByteBuffer) this.colData.get(col);
|
ByteBuffer bb = (ByteBuffer) this.colData.get(col);
|
||||||
|
|
||||||
byte val = bb.get(this.rowIndex);
|
byte val = bb.get(this.rowIndex);
|
||||||
if (isBooleanNull(val)) {
|
if (NullType.isBooleanNull(val)) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -406,7 +402,7 @@ public class TSDBResultSetBlockData {
|
||||||
ByteBuffer bb = (ByteBuffer) this.colData.get(col);
|
ByteBuffer bb = (ByteBuffer) this.colData.get(col);
|
||||||
|
|
||||||
byte val = bb.get(this.rowIndex);
|
byte val = bb.get(this.rowIndex);
|
||||||
if (isTinyIntNull(val)) {
|
if (NullType.isTinyIntNull(val)) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -416,7 +412,7 @@ public class TSDBResultSetBlockData {
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: {
|
case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: {
|
||||||
ShortBuffer sb = (ShortBuffer) this.colData.get(col);
|
ShortBuffer sb = (ShortBuffer) this.colData.get(col);
|
||||||
short val = sb.get(this.rowIndex);
|
short val = sb.get(this.rowIndex);
|
||||||
if (isSmallIntNull(val)) {
|
if (NullType.isSmallIntNull(val)) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -426,7 +422,7 @@ public class TSDBResultSetBlockData {
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_INT: {
|
case TSDBConstants.TSDB_DATA_TYPE_INT: {
|
||||||
IntBuffer ib = (IntBuffer) this.colData.get(col);
|
IntBuffer ib = (IntBuffer) this.colData.get(col);
|
||||||
int val = ib.get(this.rowIndex);
|
int val = ib.get(this.rowIndex);
|
||||||
if (isIntNull(val)) {
|
if (NullType.isIntNull(val)) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -437,7 +433,7 @@ public class TSDBResultSetBlockData {
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: {
|
case TSDBConstants.TSDB_DATA_TYPE_BIGINT: {
|
||||||
LongBuffer lb = (LongBuffer) this.colData.get(col);
|
LongBuffer lb = (LongBuffer) this.colData.get(col);
|
||||||
long val = lb.get(this.rowIndex);
|
long val = lb.get(this.rowIndex);
|
||||||
if (isBigIntNull(val)) {
|
if (NullType.isBigIntNull(val)) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -447,7 +443,7 @@ public class TSDBResultSetBlockData {
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: {
|
case TSDBConstants.TSDB_DATA_TYPE_FLOAT: {
|
||||||
FloatBuffer fb = (FloatBuffer) this.colData.get(col);
|
FloatBuffer fb = (FloatBuffer) this.colData.get(col);
|
||||||
float val = fb.get(this.rowIndex);
|
float val = fb.get(this.rowIndex);
|
||||||
if (isFloatNull(val)) {
|
if (NullType.isFloatNull(val)) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -457,7 +453,7 @@ public class TSDBResultSetBlockData {
|
||||||
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
|
case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: {
|
||||||
DoubleBuffer lb = (DoubleBuffer) this.colData.get(col);
|
DoubleBuffer lb = (DoubleBuffer) this.colData.get(col);
|
||||||
double val = lb.get(this.rowIndex);
|
double val = lb.get(this.rowIndex);
|
||||||
if (isDoubleNull(val)) {
|
if (NullType.isDoubleNull(val)) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -472,7 +468,7 @@ public class TSDBResultSetBlockData {
|
||||||
|
|
||||||
byte[] dest = new byte[length];
|
byte[] dest = new byte[length];
|
||||||
bb.get(dest, 0, length);
|
bb.get(dest, 0, length);
|
||||||
if (isBinaryNull(dest, length)) {
|
if (NullType.isBinaryNull(dest, length)) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -487,7 +483,7 @@ public class TSDBResultSetBlockData {
|
||||||
|
|
||||||
byte[] dest = new byte[length];
|
byte[] dest = new byte[length];
|
||||||
bb.get(dest, 0, length);
|
bb.get(dest, 0, length);
|
||||||
if (isNcharNull(dest, length)) {
|
if (NullType.isNcharNull(dest, length)) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
public class TSDBStatement implements Statement {
|
public class TSDBStatement implements Statement {
|
||||||
private TSDBJNIConnector connecter = null;
|
private TSDBJNIConnector connector = null;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* To store batched commands
|
* To store batched commands
|
||||||
|
@ -45,9 +45,9 @@ public class TSDBStatement implements Statement {
|
||||||
this.connection = connection;
|
this.connection = connection;
|
||||||
}
|
}
|
||||||
|
|
||||||
TSDBStatement(TSDBConnection connection, TSDBJNIConnector connecter) {
|
TSDBStatement(TSDBConnection connection, TSDBJNIConnector connector) {
|
||||||
this.connection = connection;
|
this.connection = connection;
|
||||||
this.connecter = connecter;
|
this.connector = connector;
|
||||||
this.isClosed = false;
|
this.isClosed = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,27 +65,27 @@ public class TSDBStatement implements Statement {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO make sure it is not a update query
|
// TODO make sure it is not a update query
|
||||||
pSql = this.connecter.executeQuery(sql);
|
pSql = this.connector.executeQuery(sql);
|
||||||
|
|
||||||
long resultSetPointer = this.connecter.getResultSet();
|
long resultSetPointer = this.connector.getResultSet();
|
||||||
|
|
||||||
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
|
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
|
||||||
this.connecter.freeResultSet(pSql);
|
this.connector.freeResultSet(pSql);
|
||||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||||
}
|
}
|
||||||
|
|
||||||
// create/insert/update/delete/alter
|
// create/insert/update/delete/alter
|
||||||
if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) {
|
if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) {
|
||||||
this.connecter.freeResultSet(pSql);
|
this.connector.freeResultSet(pSql);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!this.connecter.isUpdateQuery(pSql)) {
|
if (!this.connector.isUpdateQuery(pSql)) {
|
||||||
TSDBResultSet res = new TSDBResultSet(this.connecter, resultSetPointer);
|
TSDBResultSet res = new TSDBResultSet(this.connector, resultSetPointer);
|
||||||
res.setBlockWiseFetch(true);
|
res.setBatchFetch(this.connection.getBatchFetch());
|
||||||
return res;
|
return res;
|
||||||
} else {
|
} else {
|
||||||
this.connecter.freeResultSet(pSql);
|
this.connector.freeResultSet(pSql);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,28 +97,28 @@ public class TSDBStatement implements Statement {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO check if current query is update query
|
// TODO check if current query is update query
|
||||||
pSql = this.connecter.executeQuery(sql);
|
pSql = this.connector.executeQuery(sql);
|
||||||
long resultSetPointer = this.connecter.getResultSet();
|
long resultSetPointer = this.connector.getResultSet();
|
||||||
|
|
||||||
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
|
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
|
||||||
this.connecter.freeResultSet(pSql);
|
this.connector.freeResultSet(pSql);
|
||||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||||
}
|
}
|
||||||
|
|
||||||
this.affectedRows = this.connecter.getAffectedRows(pSql);
|
this.affectedRows = this.connector.getAffectedRows(pSql);
|
||||||
this.connecter.freeResultSet(pSql);
|
this.connector.freeResultSet(pSql);
|
||||||
|
|
||||||
return this.affectedRows;
|
return this.affectedRows;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getErrorMsg(long pSql) {
|
public String getErrorMsg(long pSql) {
|
||||||
return this.connecter.getErrMsg(pSql);
|
return this.connector.getErrMsg(pSql);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void close() throws SQLException {
|
public void close() throws SQLException {
|
||||||
if (!isClosed) {
|
if (!isClosed) {
|
||||||
if (!this.connecter.isResultsetClosed()) {
|
if (!this.connector.isResultsetClosed()) {
|
||||||
this.connecter.freeResultSet();
|
this.connector.freeResultSet();
|
||||||
}
|
}
|
||||||
isClosed = true;
|
isClosed = true;
|
||||||
}
|
}
|
||||||
|
@ -174,15 +174,15 @@ public class TSDBStatement implements Statement {
|
||||||
throw new SQLException("Invalid method call on a closed statement.");
|
throw new SQLException("Invalid method call on a closed statement.");
|
||||||
}
|
}
|
||||||
boolean res = true;
|
boolean res = true;
|
||||||
pSql = this.connecter.executeQuery(sql);
|
pSql = this.connector.executeQuery(sql);
|
||||||
long resultSetPointer = this.connecter.getResultSet();
|
long resultSetPointer = this.connector.getResultSet();
|
||||||
|
|
||||||
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
|
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
|
||||||
this.connecter.freeResultSet(pSql);
|
this.connector.freeResultSet(pSql);
|
||||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||||
} else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) {
|
} else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) {
|
||||||
// no result set is retrieved
|
// no result set is retrieved
|
||||||
this.connecter.freeResultSet(pSql);
|
this.connector.freeResultSet(pSql);
|
||||||
res = false;
|
res = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,10 +193,10 @@ public class TSDBStatement implements Statement {
|
||||||
if (isClosed) {
|
if (isClosed) {
|
||||||
throw new SQLException("Invalid method call on a closed statement.");
|
throw new SQLException("Invalid method call on a closed statement.");
|
||||||
}
|
}
|
||||||
long resultSetPointer = connecter.getResultSet();
|
long resultSetPointer = connector.getResultSet();
|
||||||
TSDBResultSet resSet = null;
|
TSDBResultSet resSet = null;
|
||||||
if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
|
if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
|
||||||
resSet = new TSDBResultSet(connecter, resultSetPointer);
|
resSet = new TSDBResultSet(connector, resultSetPointer);
|
||||||
}
|
}
|
||||||
return resSet;
|
return resSet;
|
||||||
}
|
}
|
||||||
|
@ -269,7 +269,7 @@ public class TSDBStatement implements Statement {
|
||||||
}
|
}
|
||||||
|
|
||||||
public Connection getConnection() throws SQLException {
|
public Connection getConnection() throws SQLException {
|
||||||
if (this.connecter != null)
|
if (this.connector != null)
|
||||||
return this.connection;
|
return this.connection;
|
||||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,319 @@
|
||||||
|
package com.taosdata.jdbc.rs;
|
||||||
|
|
||||||
|
import com.taosdata.jdbc.TSDBConstants;
|
||||||
|
|
||||||
|
import java.sql.*;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Properties;
|
||||||
|
import java.util.concurrent.Executor;
|
||||||
|
|
||||||
|
public class RestfulConnection implements Connection {
|
||||||
|
|
||||||
|
private final String host;
|
||||||
|
private final int port;
|
||||||
|
private final Properties props;
|
||||||
|
private final String database;
|
||||||
|
private final String url;
|
||||||
|
|
||||||
|
|
||||||
|
public RestfulConnection(String host, String port, Properties props, String database, String url) {
|
||||||
|
this.host = host;
|
||||||
|
this.port = Integer.parseInt(port);
|
||||||
|
this.props = props;
|
||||||
|
this.database = database;
|
||||||
|
this.url = url;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Statement createStatement() throws SQLException {
|
||||||
|
if (isClosed())
|
||||||
|
throw new SQLException(TSDBConstants.WrapErrMsg("restful TDengine connection is closed."));
|
||||||
|
return new RestfulStatement(this, this.database);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public PreparedStatement prepareStatement(String sql) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CallableStatement prepareCall(String sql) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String nativeSQL(String sql) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setAutoCommit(boolean autoCommit) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean getAutoCommit() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void commit() throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void rollback() throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isClosed() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public DatabaseMetaData getMetaData() throws SQLException {
|
||||||
|
//TODO: RestfulDatabaseMetaData is not implemented
|
||||||
|
return new RestfulDatabaseMetaData();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setReadOnly(boolean readOnly) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isReadOnly() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setCatalog(String catalog) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getCatalog() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setTransactionIsolation(int level) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getTransactionIsolation() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public SQLWarning getWarnings() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void clearWarnings() throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Map<String, Class<?>> getTypeMap() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setHoldability(int holdability) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getHoldability() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Savepoint setSavepoint() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Savepoint setSavepoint(String name) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void rollback(Savepoint savepoint) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void releaseSavepoint(Savepoint savepoint) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Clob createClob() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Blob createBlob() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public NClob createNClob() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public SQLXML createSQLXML() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isValid(int timeout) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setClientInfo(String name, String value) throws SQLClientInfoException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setClientInfo(Properties properties) throws SQLClientInfoException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getClientInfo(String name) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Properties getClientInfo() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Array createArrayOf(String typeName, Object[] elements) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setSchema(String schema) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getSchema() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void abort(Executor executor) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getNetworkTimeout() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public <T> T unwrap(Class<T> iface) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isWrapperFor(Class<?> iface) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getHost() {
|
||||||
|
return host;
|
||||||
|
}
|
||||||
|
|
||||||
|
public int getPort() {
|
||||||
|
return port;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Properties getProps() {
|
||||||
|
return props;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getDatabase() {
|
||||||
|
return database;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getUrl() {
|
||||||
|
return url;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,886 @@
|
||||||
|
package com.taosdata.jdbc.rs;
|
||||||
|
|
||||||
|
import java.sql.*;
|
||||||
|
|
||||||
|
public class RestfulDatabaseMetaData implements DatabaseMetaData {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean allProceduresAreCallable() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean allTablesAreSelectable() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getURL() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getUserName() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isReadOnly() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean nullsAreSortedHigh() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean nullsAreSortedLow() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean nullsAreSortedAtStart() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean nullsAreSortedAtEnd() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDatabaseProductName() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDatabaseProductVersion() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDriverName() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDriverVersion() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getDriverMajorVersion() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getDriverMinorVersion() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean usesLocalFiles() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean usesLocalFilePerTable() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsMixedCaseIdentifiers() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean storesUpperCaseIdentifiers() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean storesLowerCaseIdentifiers() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean storesMixedCaseIdentifiers() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean storesUpperCaseQuotedIdentifiers() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean storesLowerCaseQuotedIdentifiers() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean storesMixedCaseQuotedIdentifiers() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getIdentifierQuoteString() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getSQLKeywords() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getNumericFunctions() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getStringFunctions() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getSystemFunctions() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getTimeDateFunctions() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getSearchStringEscape() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getExtraNameCharacters() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsAlterTableWithAddColumn() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsAlterTableWithDropColumn() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsColumnAliasing() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean nullPlusNonNullIsNull() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsConvert() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsConvert(int fromType, int toType) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsTableCorrelationNames() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsDifferentTableCorrelationNames() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsExpressionsInOrderBy() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsOrderByUnrelated() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsGroupBy() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsGroupByUnrelated() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsGroupByBeyondSelect() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsLikeEscapeClause() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsMultipleResultSets() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsMultipleTransactions() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsNonNullableColumns() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsMinimumSQLGrammar() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsCoreSQLGrammar() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsExtendedSQLGrammar() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsANSI92EntryLevelSQL() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsANSI92IntermediateSQL() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsANSI92FullSQL() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsIntegrityEnhancementFacility() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsOuterJoins() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsFullOuterJoins() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsLimitedOuterJoins() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getSchemaTerm() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getProcedureTerm() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getCatalogTerm() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCatalogAtStart() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getCatalogSeparator() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsSchemasInDataManipulation() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsSchemasInProcedureCalls() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsSchemasInTableDefinitions() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsSchemasInIndexDefinitions() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsCatalogsInDataManipulation() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsCatalogsInProcedureCalls() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsCatalogsInTableDefinitions() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsCatalogsInIndexDefinitions() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsPositionedDelete() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsPositionedUpdate() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsSelectForUpdate() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsStoredProcedures() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsSubqueriesInComparisons() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsSubqueriesInExists() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsSubqueriesInIns() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsSubqueriesInQuantifieds() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsCorrelatedSubqueries() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsUnion() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsUnionAll() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsOpenCursorsAcrossCommit() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsOpenCursorsAcrossRollback() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsOpenStatementsAcrossCommit() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsOpenStatementsAcrossRollback() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxBinaryLiteralLength() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxCharLiteralLength() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxColumnNameLength() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxColumnsInGroupBy() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxColumnsInIndex() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxColumnsInOrderBy() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxColumnsInSelect() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxColumnsInTable() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxConnections() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxCursorNameLength() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxIndexLength() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxSchemaNameLength() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxProcedureNameLength() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxCatalogNameLength() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxRowSize() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean doesMaxRowSizeIncludeBlobs() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxStatementLength() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxStatements() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxTableNameLength() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxTablesInSelect() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxUserNameLength() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getDefaultTransactionIsolation() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsTransactions() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsTransactionIsolationLevel(int level) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsDataManipulationTransactionsOnly() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean dataDefinitionCausesTransactionCommit() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean dataDefinitionIgnoredInTransactions() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getSchemas() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getCatalogs() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getTableTypes() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable, String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getTypeInfo() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsResultSetType(int type) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean ownUpdatesAreVisible(int type) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean ownDeletesAreVisible(int type) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean ownInsertsAreVisible(int type) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean othersUpdatesAreVisible(int type) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean othersDeletesAreVisible(int type) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean othersInsertsAreVisible(int type) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean updatesAreDetected(int type) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean deletesAreDetected(int type) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean insertsAreDetected(int type) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsBatchUpdates() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Connection getConnection() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsSavepoints() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsNamedParameters() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsMultipleOpenResults() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsGetGeneratedKeys() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, String attributeNamePattern) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsResultSetHoldability(int holdability) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getResultSetHoldability() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getDatabaseMajorVersion() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getDatabaseMinorVersion() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getJDBCMajorVersion() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getJDBCMinorVersion() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getSQLStateType() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean locatorsUpdateCopy() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsStatementPooling() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public RowIdLifetime getRowIdLifetime() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean autoCommitFailureClosesAllResultSets() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getClientInfoProperties() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, String columnNamePattern) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean generatedKeyAlwaysReturned() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public <T> T unwrap(Class<T> iface) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isWrapperFor(Class<?> iface) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,91 @@
|
||||||
|
package com.taosdata.jdbc.rs;
|
||||||
|
|
||||||
|
import com.alibaba.fastjson.JSON;
|
||||||
|
import com.alibaba.fastjson.JSONObject;
|
||||||
|
import com.taosdata.jdbc.AbstractTaosDriver;
|
||||||
|
import com.taosdata.jdbc.TSDBConstants;
|
||||||
|
import com.taosdata.jdbc.TSDBDriver;
|
||||||
|
import com.taosdata.jdbc.rs.util.HttpClientPoolUtil;
|
||||||
|
|
||||||
|
import java.sql.*;
|
||||||
|
import java.util.Properties;
|
||||||
|
import java.util.logging.Logger;
|
||||||
|
|
||||||
|
public class RestfulDriver extends AbstractTaosDriver {
|
||||||
|
|
||||||
|
private static final String URL_PREFIX = "jdbc:TAOS-RS://";
|
||||||
|
|
||||||
|
static {
|
||||||
|
try {
|
||||||
|
DriverManager.registerDriver(new RestfulDriver());
|
||||||
|
} catch (SQLException e) {
|
||||||
|
throw new RuntimeException(TSDBConstants.WrapErrMsg("can not register Restful JDBC driver"), e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Connection connect(String url, Properties info) throws SQLException {
|
||||||
|
// throw SQLException if url is null
|
||||||
|
if (url == null)
|
||||||
|
throw new SQLException(TSDBConstants.WrapErrMsg("url is not set!"));
|
||||||
|
// return null if url is not be accepted
|
||||||
|
if (!acceptsURL(url))
|
||||||
|
return null;
|
||||||
|
|
||||||
|
Properties props = parseURL(url, info);
|
||||||
|
String host = props.getProperty(TSDBDriver.PROPERTY_KEY_HOST, "localhost");
|
||||||
|
String port = props.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "6041");
|
||||||
|
String database = props.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME);
|
||||||
|
|
||||||
|
String loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":"
|
||||||
|
+ props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/"
|
||||||
|
+ props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + "/"
|
||||||
|
+ props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD) + "";
|
||||||
|
String result = HttpClientPoolUtil.execute(loginUrl);
|
||||||
|
JSONObject jsonResult = JSON.parseObject(result);
|
||||||
|
String status = jsonResult.getString("status");
|
||||||
|
if (!status.equals("succ")) {
|
||||||
|
throw new SQLException(jsonResult.getString("desc"));
|
||||||
|
}
|
||||||
|
|
||||||
|
return new RestfulConnection(host, port, props, database, url);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean acceptsURL(String url) throws SQLException {
|
||||||
|
if (url == null)
|
||||||
|
throw new SQLException(TSDBConstants.WrapErrMsg("url is null"));
|
||||||
|
return (url != null && url.length() > 0 && url.trim().length() > 0) && url.startsWith(URL_PREFIX);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException {
|
||||||
|
if (info == null) {
|
||||||
|
info = new Properties();
|
||||||
|
}
|
||||||
|
if (acceptsURL(url)) {
|
||||||
|
info = parseURL(url, info);
|
||||||
|
}
|
||||||
|
return getPropertyInfo(info);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMajorVersion() {
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMinorVersion() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean jdbcCompliant() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,129 @@
|
||||||
|
package com.taosdata.jdbc.rs;
|
||||||
|
|
||||||
|
import java.sql.ResultSetMetaData;
|
||||||
|
import java.sql.SQLException;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
public class RestfulResultSetMetaData implements ResultSetMetaData {
|
||||||
|
|
||||||
|
private List<String> fields;
|
||||||
|
|
||||||
|
public RestfulResultSetMetaData(List<String> fields) {
|
||||||
|
this.fields = fields;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getColumnCount() throws SQLException {
|
||||||
|
return fields.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isAutoIncrement(int column) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCaseSensitive(int column) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isSearchable(int column) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCurrency(int column) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int isNullable(int column) throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isSigned(int column) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getColumnDisplaySize(int column) throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getColumnLabel(int column) throws SQLException {
|
||||||
|
return fields.get(column - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getColumnName(int column) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getSchemaName(int column) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getPrecision(int column) throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getScale(int column) throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getTableName(int column) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getCatalogName(int column) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getColumnType(int column) throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getColumnTypeName(int column) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isReadOnly(int column) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isWritable(int column) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isDefinitelyWritable(int column) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getColumnClassName(int column) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public <T> T unwrap(Class<T> iface) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isWrapperFor(Class<?> iface) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,280 @@
|
||||||
|
package com.taosdata.jdbc.rs;
|
||||||
|
|
||||||
|
import com.alibaba.fastjson.JSON;
|
||||||
|
import com.alibaba.fastjson.JSONObject;
|
||||||
|
import com.taosdata.jdbc.TSDBConstants;
|
||||||
|
import com.taosdata.jdbc.rs.util.HttpClientPoolUtil;
|
||||||
|
|
||||||
|
import java.sql.*;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
public class RestfulStatement implements Statement {
|
||||||
|
|
||||||
|
private final String catalog;
|
||||||
|
private final RestfulConnection conn;
|
||||||
|
|
||||||
|
public RestfulStatement(RestfulConnection c, String catalog) {
|
||||||
|
this.conn = c;
|
||||||
|
this.catalog = catalog;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet executeQuery(String sql) throws SQLException {
|
||||||
|
|
||||||
|
final String url = "http://" + conn.getHost() + ":"+conn.getPort()+"/rest/sql";
|
||||||
|
|
||||||
|
String result = HttpClientPoolUtil.execute(url, sql);
|
||||||
|
String fields = "";
|
||||||
|
List<String> words = Arrays.asList(sql.split(" "));
|
||||||
|
if (words.get(0).equalsIgnoreCase("select")) {
|
||||||
|
int index = 0;
|
||||||
|
if (words.contains("from")) {
|
||||||
|
index = words.indexOf("from");
|
||||||
|
}
|
||||||
|
if (words.contains("FROM")) {
|
||||||
|
index = words.indexOf("FROM");
|
||||||
|
}
|
||||||
|
fields = HttpClientPoolUtil.execute(url, "DESCRIBE " + words.get(index + 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
JSONObject jsonObject = JSON.parseObject(result);
|
||||||
|
if (jsonObject.getString("status").equals("error")) {
|
||||||
|
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " +
|
||||||
|
jsonObject.getString("desc") + "\n" +
|
||||||
|
"error code: " + jsonObject.getString("code")));
|
||||||
|
}
|
||||||
|
String dataStr = jsonObject.getString("data");
|
||||||
|
if ("use".equalsIgnoreCase(fields.split(" ")[0])) {
|
||||||
|
return new RestfulResultSet(dataStr, "");
|
||||||
|
}
|
||||||
|
|
||||||
|
JSONObject jsonField = JSON.parseObject(fields);
|
||||||
|
if (jsonField == null) {
|
||||||
|
return new RestfulResultSet(dataStr, "");
|
||||||
|
}
|
||||||
|
if (jsonField.getString("status").equals("error")) {
|
||||||
|
throw new SQLException(TSDBConstants.WrapErrMsg("SQL execution error: " +
|
||||||
|
jsonField.getString("desc") + "\n" +
|
||||||
|
"error code: " + jsonField.getString("code")));
|
||||||
|
}
|
||||||
|
String fieldData = jsonField.getString("data");
|
||||||
|
|
||||||
|
return new RestfulResultSet(dataStr, fieldData);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int executeUpdate(String sql) throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxFieldSize() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setMaxFieldSize(int max) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getMaxRows() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setMaxRows(int max) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setEscapeProcessing(boolean enable) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getQueryTimeout() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setQueryTimeout(int seconds) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void cancel() throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public SQLWarning getWarnings() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void clearWarnings() throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setCursorName(String name) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean execute(String sql) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getResultSet() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getUpdateCount() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean getMoreResults() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setFetchDirection(int direction) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getFetchDirection() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setFetchSize(int rows) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getFetchSize() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getResultSetConcurrency() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getResultSetType() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void addBatch(String sql) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void clearBatch() throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int[] executeBatch() throws SQLException {
|
||||||
|
return new int[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Connection getConnection() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean getMoreResults(int current) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ResultSet getGeneratedKeys() throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int executeUpdate(String sql, String[] columnNames) throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean execute(String sql, int[] columnIndexes) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean execute(String sql, String[] columnNames) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getResultSetHoldability() throws SQLException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isClosed() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setPoolable(boolean poolable) throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isPoolable() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void closeOnCompletion() throws SQLException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCloseOnCompletion() throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public <T> T unwrap(Class<T> iface) throws SQLException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isWrapperFor(Class<?> iface) throws SQLException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,222 @@
|
||||||
|
package com.taosdata.jdbc.rs.util;
|
||||||
|
|
||||||
|
import org.apache.commons.lang3.StringUtils;
|
||||||
|
import org.apache.http.HeaderElement;
|
||||||
|
import org.apache.http.HeaderElementIterator;
|
||||||
|
import org.apache.http.HttpEntity;
|
||||||
|
import org.apache.http.client.config.RequestConfig;
|
||||||
|
import org.apache.http.client.methods.*;
|
||||||
|
import org.apache.http.client.protocol.HttpClientContext;
|
||||||
|
import org.apache.http.conn.ConnectionKeepAliveStrategy;
|
||||||
|
import org.apache.http.entity.StringEntity;
|
||||||
|
import org.apache.http.impl.client.CloseableHttpClient;
|
||||||
|
import org.apache.http.impl.client.HttpClients;
|
||||||
|
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
|
||||||
|
import org.apache.http.message.BasicHeaderElementIterator;
|
||||||
|
import org.apache.http.protocol.HTTP;
|
||||||
|
import org.apache.http.protocol.HttpContext;
|
||||||
|
import org.apache.http.util.EntityUtils;
|
||||||
|
|
||||||
|
|
||||||
|
public class HttpClientPoolUtil {
|
||||||
|
public static PoolingHttpClientConnectionManager cm = null;
|
||||||
|
public static CloseableHttpClient httpClient = null;
|
||||||
|
/**
|
||||||
|
* 默认content 类型
|
||||||
|
*/
|
||||||
|
private static final String DEFAULT_CONTENT_TYPE = "application/json";
|
||||||
|
/**
|
||||||
|
* 默认请求超时时间30s
|
||||||
|
*/
|
||||||
|
private static final int DEFAULT_TIME_OUT = 15000;
|
||||||
|
private static final int count = 32;
|
||||||
|
private static final int totalCount = 1000;
|
||||||
|
private static final int Http_Default_Keep_Time = 15000;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 初始化连接池
|
||||||
|
*/
|
||||||
|
public static synchronized void initPools() {
|
||||||
|
if (httpClient == null) {
|
||||||
|
cm = new PoolingHttpClientConnectionManager();
|
||||||
|
cm.setDefaultMaxPerRoute(count);
|
||||||
|
cm.setMaxTotal(totalCount);
|
||||||
|
httpClient = HttpClients.custom().setKeepAliveStrategy(defaultStrategy).setConnectionManager(cm).build();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Http connection keepAlive 设置
|
||||||
|
*/
|
||||||
|
public static ConnectionKeepAliveStrategy defaultStrategy = (response, context) -> {
|
||||||
|
HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
|
||||||
|
int keepTime = Http_Default_Keep_Time * 1000;
|
||||||
|
while (it.hasNext()) {
|
||||||
|
HeaderElement headerElement = it.nextElement();
|
||||||
|
String param = headerElement.getName();
|
||||||
|
String value = headerElement.getValue();
|
||||||
|
if (value != null && param.equalsIgnoreCase("timeout")) {
|
||||||
|
try {
|
||||||
|
return Long.parseLong(value) * 1000;
|
||||||
|
} catch (Exception e) {
|
||||||
|
new Exception(
|
||||||
|
"format KeepAlive timeout exception, exception:" + e.toString())
|
||||||
|
.printStackTrace();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return keepTime;
|
||||||
|
};
|
||||||
|
|
||||||
|
public static CloseableHttpClient getHttpClient() {
|
||||||
|
return httpClient;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static PoolingHttpClientConnectionManager getHttpConnectionManager() {
|
||||||
|
return cm;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 执行http post请求
|
||||||
|
* 默认采用Content-Type:application/json,Accept:application/json
|
||||||
|
*
|
||||||
|
* @param uri 请求地址
|
||||||
|
* @param data 请求数据
|
||||||
|
* @return responseBody
|
||||||
|
*/
|
||||||
|
public static String execute(String uri, String data) {
|
||||||
|
long startTime = System.currentTimeMillis();
|
||||||
|
HttpEntity httpEntity = null;
|
||||||
|
HttpEntityEnclosingRequestBase method = null;
|
||||||
|
String responseBody = "";
|
||||||
|
try {
|
||||||
|
if (httpClient == null) {
|
||||||
|
initPools();
|
||||||
|
}
|
||||||
|
method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0);
|
||||||
|
method.setEntity(new StringEntity(data));
|
||||||
|
HttpContext context = HttpClientContext.create();
|
||||||
|
CloseableHttpResponse httpResponse = httpClient.execute(method, context);
|
||||||
|
httpEntity = httpResponse.getEntity();
|
||||||
|
if (httpEntity != null) {
|
||||||
|
responseBody = EntityUtils.toString(httpEntity, "UTF-8");
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
if (method != null) {
|
||||||
|
method.abort();
|
||||||
|
}
|
||||||
|
// e.printStackTrace();
|
||||||
|
// logger.error("execute post request exception, url:" + uri + ", exception:" + e.toString()
|
||||||
|
// + ", cost time(ms):" + (System.currentTimeMillis() - startTime));
|
||||||
|
new Exception("execute post request exception, url:"
|
||||||
|
+ uri + ", exception:" + e.toString() +
|
||||||
|
", cost time(ms):" + (System.currentTimeMillis() - startTime))
|
||||||
|
.printStackTrace();
|
||||||
|
} finally {
|
||||||
|
if (httpEntity != null) {
|
||||||
|
try {
|
||||||
|
EntityUtils.consumeQuietly(httpEntity);
|
||||||
|
} catch (Exception e) {
|
||||||
|
// e.printStackTrace();
|
||||||
|
// logger.error("close response exception, url:" + uri + ", exception:" + e.toString()
|
||||||
|
// + ", cost time(ms):" + (System.currentTimeMillis() - startTime));
|
||||||
|
new Exception(
|
||||||
|
"close response exception, url:" + uri +
|
||||||
|
", exception:" + e.toString()
|
||||||
|
+ ", cost time(ms):" + (System.currentTimeMillis() - startTime))
|
||||||
|
.printStackTrace();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return responseBody;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* * 创建请求
|
||||||
|
*
|
||||||
|
* @param uri 请求url
|
||||||
|
* @param methodName 请求的方法类型
|
||||||
|
* @param contentType contentType类型
|
||||||
|
* @param timeout 超时时间
|
||||||
|
* @return HttpRequestBase 返回类型
|
||||||
|
* @author lisc
|
||||||
|
*/
|
||||||
|
public static HttpRequestBase getRequest(String uri, String methodName, String contentType, int timeout) {
|
||||||
|
if (httpClient == null) {
|
||||||
|
initPools();
|
||||||
|
}
|
||||||
|
HttpRequestBase method;
|
||||||
|
if (timeout <= 0) {
|
||||||
|
timeout = DEFAULT_TIME_OUT;
|
||||||
|
}
|
||||||
|
RequestConfig requestConfig = RequestConfig.custom().setSocketTimeout(timeout * 1000)
|
||||||
|
.setConnectTimeout(timeout * 1000).setConnectionRequestTimeout(timeout * 1000)
|
||||||
|
.setExpectContinueEnabled(false).build();
|
||||||
|
if (HttpPut.METHOD_NAME.equalsIgnoreCase(methodName)) {
|
||||||
|
method = new HttpPut(uri);
|
||||||
|
} else if (HttpPost.METHOD_NAME.equalsIgnoreCase(methodName)) {
|
||||||
|
method = new HttpPost(uri);
|
||||||
|
} else if (HttpGet.METHOD_NAME.equalsIgnoreCase(methodName)) {
|
||||||
|
method = new HttpGet(uri);
|
||||||
|
} else {
|
||||||
|
method = new HttpPost(uri);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (StringUtils.isBlank(contentType)) {
|
||||||
|
contentType = DEFAULT_CONTENT_TYPE;
|
||||||
|
}
|
||||||
|
method.addHeader("Content-Type", contentType);
|
||||||
|
method.addHeader("Accept", contentType);
|
||||||
|
method.setConfig(requestConfig);
|
||||||
|
return method;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 执行GET 请求
|
||||||
|
*
|
||||||
|
* @param uri 网址
|
||||||
|
* @return responseBody
|
||||||
|
*/
|
||||||
|
public static String execute(String uri) {
|
||||||
|
long startTime = System.currentTimeMillis();
|
||||||
|
HttpEntity httpEntity = null;
|
||||||
|
HttpRequestBase method = null;
|
||||||
|
String responseBody = "";
|
||||||
|
try {
|
||||||
|
if (httpClient == null) {
|
||||||
|
initPools();
|
||||||
|
}
|
||||||
|
method = getRequest(uri, HttpGet.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0);
|
||||||
|
HttpContext context = HttpClientContext.create();
|
||||||
|
CloseableHttpResponse httpResponse = httpClient.execute(method, context);
|
||||||
|
httpEntity = httpResponse.getEntity();
|
||||||
|
if (httpEntity != null) {
|
||||||
|
responseBody = EntityUtils.toString(httpEntity, "UTF-8");
|
||||||
|
// logger.info("请求URL: " + uri + "+ 返回状态码:" + httpResponse.getStatusLine().getStatusCode());
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
if (method != null) {
|
||||||
|
method.abort();
|
||||||
|
}
|
||||||
|
e.printStackTrace();
|
||||||
|
// logger.error("execute get request exception, url:" + uri + ", exception:" + e.toString() + ",cost time(ms):"
|
||||||
|
// + (System.currentTimeMillis() - startTime));
|
||||||
|
System.out.println("log:调用 HttpClientPoolUtil execute get request exception, url:" + uri + ", exception:" + e.toString() + ",cost time(ms):"
|
||||||
|
+ (System.currentTimeMillis() - startTime));
|
||||||
|
} finally {
|
||||||
|
if (httpEntity != null) {
|
||||||
|
try {
|
||||||
|
EntityUtils.consumeQuietly(httpEntity);
|
||||||
|
} catch (Exception e) {
|
||||||
|
// e.printStackTrace();
|
||||||
|
// logger.error("close response exception, url:" + uri + ", exception:" + e.toString()
|
||||||
|
// + ",cost time(ms):" + (System.currentTimeMillis() - startTime));
|
||||||
|
new Exception("close response exception, url:" + uri + ", exception:" + e.toString()
|
||||||
|
+ ",cost time(ms):" + (System.currentTimeMillis() - startTime))
|
||||||
|
.printStackTrace();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return responseBody;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,40 @@
|
||||||
|
package com.taosdata.jdbc.rs;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.sql.*;
|
||||||
|
|
||||||
|
public class RestfulDriverTest {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCase001() {
|
||||||
|
try {
|
||||||
|
Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
|
||||||
|
Connection connection = DriverManager.getConnection("jdbc:TAOS-RS://master:6041/?user=root&password=taosdata");
|
||||||
|
Statement statement = connection.createStatement();
|
||||||
|
ResultSet resultSet = statement.executeQuery("select * from log.log");
|
||||||
|
ResultSetMetaData metaData = resultSet.getMetaData();
|
||||||
|
while (resultSet.next()) {
|
||||||
|
for (int i = 1; i <= metaData.getColumnCount(); i++) {
|
||||||
|
String column = metaData.getColumnLabel(i);
|
||||||
|
String value = resultSet.getString(i);
|
||||||
|
System.out.print(column + ":" + value + "\t");
|
||||||
|
}
|
||||||
|
System.out.println();
|
||||||
|
}
|
||||||
|
statement.close();
|
||||||
|
connection.close();
|
||||||
|
} catch (SQLException | ClassNotFoundException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAcceptUrl() throws SQLException {
|
||||||
|
Driver driver = new RestfulDriver();
|
||||||
|
boolean isAccept = driver.acceptsURL("jdbc:TAOS-RS://master:6041");
|
||||||
|
Assert.assertTrue(isAccept);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -24,7 +24,7 @@ int32_t dnodeInitVRead();
|
||||||
void dnodeCleanupVRead();
|
void dnodeCleanupVRead();
|
||||||
void dnodeDispatchToVReadQueue(SRpcMsg *pMsg);
|
void dnodeDispatchToVReadQueue(SRpcMsg *pMsg);
|
||||||
void * dnodeAllocVReadQueue(void *pVnode);
|
void * dnodeAllocVReadQueue(void *pVnode);
|
||||||
void dnodeFreeVReadQueue(void *rqueue);
|
void dnodeFreeVReadQueue(void *pRqueue);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,8 +24,8 @@ int32_t dnodeInitVWrite();
|
||||||
void dnodeCleanupVWrite();
|
void dnodeCleanupVWrite();
|
||||||
void dnodeDispatchToVWriteQueue(SRpcMsg *pMsg);
|
void dnodeDispatchToVWriteQueue(SRpcMsg *pMsg);
|
||||||
void * dnodeAllocVWriteQueue(void *pVnode);
|
void * dnodeAllocVWriteQueue(void *pVnode);
|
||||||
void dnodeFreeVWriteQueue(void *wqueue);
|
void dnodeFreeVWriteQueue(void *pWqueue);
|
||||||
void dnodeSendRpcVWriteRsp(void *pVnode, void *param, int32_t code);
|
void dnodeSendRpcVWriteRsp(void *pVnode, void *pWrite, int32_t code);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -151,6 +151,13 @@ void dnodeCleanupClient() {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dnodeProcessRspFromDnode(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
|
static void dnodeProcessRspFromDnode(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
|
||||||
|
if (dnodeGetRunStatus() != TSDB_RUN_STATUS_RUNING) {
|
||||||
|
if (pMsg == NULL || pMsg->pCont == NULL) return;
|
||||||
|
dDebug("msg:%p is ignored since dnode not running", pMsg);
|
||||||
|
rpcFreeCont(pMsg->pCont);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (pMsg->msgType == TSDB_MSG_TYPE_DM_STATUS_RSP && pEpSet) {
|
if (pMsg->msgType == TSDB_MSG_TYPE_DM_STATUS_RSP && pEpSet) {
|
||||||
dnodeUpdateEpSetForPeer(pEpSet);
|
dnodeUpdateEpSetForPeer(pEpSet);
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,7 @@ typedef struct {
|
||||||
pthread_mutex_t mutex;
|
pthread_mutex_t mutex;
|
||||||
} SVReadWorkerPool;
|
} SVReadWorkerPool;
|
||||||
|
|
||||||
static void *dnodeProcessReadQueue(void *param);
|
static void *dnodeProcessReadQueue(void *pWorker);
|
||||||
|
|
||||||
// module global variable
|
// module global variable
|
||||||
static SVReadWorkerPool tsVReadWP;
|
static SVReadWorkerPool tsVReadWP;
|
||||||
|
@ -47,7 +47,7 @@ int32_t dnodeInitVRead() {
|
||||||
tsVReadWP.min = tsNumOfCores;
|
tsVReadWP.min = tsNumOfCores;
|
||||||
tsVReadWP.max = tsNumOfCores * tsNumOfThreadsPerCore;
|
tsVReadWP.max = tsNumOfCores * tsNumOfThreadsPerCore;
|
||||||
if (tsVReadWP.max <= tsVReadWP.min * 2) tsVReadWP.max = 2 * tsVReadWP.min;
|
if (tsVReadWP.max <= tsVReadWP.min * 2) tsVReadWP.max = 2 * tsVReadWP.min;
|
||||||
tsVReadWP.worker = (SVReadWorker *)calloc(sizeof(SVReadWorker), tsVReadWP.max);
|
tsVReadWP.worker = calloc(sizeof(SVReadWorker), tsVReadWP.max);
|
||||||
pthread_mutex_init(&tsVReadWP.mutex, NULL);
|
pthread_mutex_init(&tsVReadWP.mutex, NULL);
|
||||||
|
|
||||||
if (tsVReadWP.worker == NULL) return -1;
|
if (tsVReadWP.worker == NULL) return -1;
|
||||||
|
@ -85,7 +85,7 @@ void dnodeCleanupVRead() {
|
||||||
void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) {
|
void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) {
|
||||||
int32_t queuedMsgNum = 0;
|
int32_t queuedMsgNum = 0;
|
||||||
int32_t leftLen = pMsg->contLen;
|
int32_t leftLen = pMsg->contLen;
|
||||||
char * pCont = (char *)pMsg->pCont;
|
char * pCont = pMsg->pCont;
|
||||||
|
|
||||||
while (leftLen > 0) {
|
while (leftLen > 0) {
|
||||||
SMsgHead *pHead = (SMsgHead *)pCont;
|
SMsgHead *pHead = (SMsgHead *)pCont;
|
||||||
|
@ -146,8 +146,8 @@ void *dnodeAllocVReadQueue(void *pVnode) {
|
||||||
return queue;
|
return queue;
|
||||||
}
|
}
|
||||||
|
|
||||||
void dnodeFreeVReadQueue(void *rqueue) {
|
void dnodeFreeVReadQueue(void *pRqueue) {
|
||||||
taosCloseQueue(rqueue);
|
taosCloseQueue(pRqueue);
|
||||||
}
|
}
|
||||||
|
|
||||||
void dnodeSendRpcVReadRsp(void *pVnode, SVReadMsg *pRead, int32_t code) {
|
void dnodeSendRpcVReadRsp(void *pVnode, SVReadMsg *pRead, int32_t code) {
|
||||||
|
@ -159,14 +159,12 @@ void dnodeSendRpcVReadRsp(void *pVnode, SVReadMsg *pRead, int32_t code) {
|
||||||
};
|
};
|
||||||
|
|
||||||
rpcSendResponse(&rpcRsp);
|
rpcSendResponse(&rpcRsp);
|
||||||
vnodeRelease(pVnode);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void dnodeDispatchNonRspMsg(void *pVnode, SVReadMsg *pRead, int32_t code) {
|
void dnodeDispatchNonRspMsg(void *pVnode, SVReadMsg *pRead, int32_t code) {
|
||||||
vnodeRelease(pVnode);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *dnodeProcessReadQueue(void *param) {
|
static void *dnodeProcessReadQueue(void *pWorker) {
|
||||||
SVReadMsg *pRead;
|
SVReadMsg *pRead;
|
||||||
int32_t qtype;
|
int32_t qtype;
|
||||||
void * pVnode;
|
void * pVnode;
|
||||||
|
@ -193,7 +191,7 @@ static void *dnodeProcessReadQueue(void *param) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
taosFreeQitem(pRead);
|
vnodeFreeFromRQueue(pVnode, pRead);
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -38,11 +38,11 @@ typedef struct {
|
||||||
} SVWriteWorkerPool;
|
} SVWriteWorkerPool;
|
||||||
|
|
||||||
static SVWriteWorkerPool tsVWriteWP;
|
static SVWriteWorkerPool tsVWriteWP;
|
||||||
static void *dnodeProcessVWriteQueue(void *param);
|
static void *dnodeProcessVWriteQueue(void *pWorker);
|
||||||
|
|
||||||
int32_t dnodeInitVWrite() {
|
int32_t dnodeInitVWrite() {
|
||||||
tsVWriteWP.max = tsNumOfCores;
|
tsVWriteWP.max = tsNumOfCores;
|
||||||
tsVWriteWP.worker = (SVWriteWorker *)tcalloc(sizeof(SVWriteWorker), tsVWriteWP.max);
|
tsVWriteWP.worker = tcalloc(sizeof(SVWriteWorker), tsVWriteWP.max);
|
||||||
if (tsVWriteWP.worker == NULL) return -1;
|
if (tsVWriteWP.worker == NULL) return -1;
|
||||||
pthread_mutex_init(&tsVWriteWP.mutex, NULL);
|
pthread_mutex_init(&tsVWriteWP.mutex, NULL);
|
||||||
|
|
||||||
|
@ -162,13 +162,13 @@ void *dnodeAllocVWriteQueue(void *pVnode) {
|
||||||
return queue;
|
return queue;
|
||||||
}
|
}
|
||||||
|
|
||||||
void dnodeFreeVWriteQueue(void *wqueue) {
|
void dnodeFreeVWriteQueue(void *pWqueue) {
|
||||||
taosCloseQueue(wqueue);
|
taosCloseQueue(pWqueue);
|
||||||
}
|
}
|
||||||
|
|
||||||
void dnodeSendRpcVWriteRsp(void *pVnode, void *param, int32_t code) {
|
void dnodeSendRpcVWriteRsp(void *pVnode, void *wparam, int32_t code) {
|
||||||
if (param == NULL) return;
|
if (wparam == NULL) return;
|
||||||
SVWriteMsg *pWrite = param;
|
SVWriteMsg *pWrite = wparam;
|
||||||
|
|
||||||
if (code < 0) pWrite->code = code;
|
if (code < 0) pWrite->code = code;
|
||||||
int32_t count = atomic_add_fetch_32(&pWrite->processedCount, 1);
|
int32_t count = atomic_add_fetch_32(&pWrite->processedCount, 1);
|
||||||
|
@ -183,13 +183,11 @@ void dnodeSendRpcVWriteRsp(void *pVnode, void *param, int32_t code) {
|
||||||
};
|
};
|
||||||
|
|
||||||
rpcSendResponse(&rpcRsp);
|
rpcSendResponse(&rpcRsp);
|
||||||
taosFreeQitem(pWrite);
|
vnodeFreeFromWQueue(pVnode, pWrite);
|
||||||
|
|
||||||
vnodeRelease(pVnode);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *dnodeProcessVWriteQueue(void *param) {
|
static void *dnodeProcessVWriteQueue(void *wparam) {
|
||||||
SVWriteWorker *pWorker = param;
|
SVWriteWorker *pWorker = wparam;
|
||||||
SVWriteMsg * pWrite;
|
SVWriteMsg * pWrite;
|
||||||
void * pVnode;
|
void * pVnode;
|
||||||
int32_t numOfMsgs;
|
int32_t numOfMsgs;
|
||||||
|
@ -232,8 +230,7 @@ static void *dnodeProcessVWriteQueue(void *param) {
|
||||||
if (pWrite->rspRet.rsp) {
|
if (pWrite->rspRet.rsp) {
|
||||||
rpcFreeCont(pWrite->rspRet.rsp);
|
rpcFreeCont(pWrite->rspRet.rsp);
|
||||||
}
|
}
|
||||||
taosFreeQitem(pWrite);
|
vnodeFreeFromWQueue(pVnode, pWrite);
|
||||||
vnodeRelease(pVnode);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,10 +54,10 @@ void dnodeSendMsgToDnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp, SRpcEpSet *epSet
|
||||||
void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t tid);
|
void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t tid);
|
||||||
|
|
||||||
void *dnodeAllocVWriteQueue(void *pVnode);
|
void *dnodeAllocVWriteQueue(void *pVnode);
|
||||||
void dnodeFreeVWriteQueue(void *wqueue);
|
void dnodeFreeVWriteQueue(void *pWqueue);
|
||||||
void dnodeSendRpcVWriteRsp(void *pVnode, void *param, int32_t code);
|
void dnodeSendRpcVWriteRsp(void *pVnode, void *pWrite, int32_t code);
|
||||||
void *dnodeAllocVReadQueue(void *pVnode);
|
void *dnodeAllocVReadQueue(void *pVnode);
|
||||||
void dnodeFreeVReadQueue(void *rqueue);
|
void dnodeFreeVReadQueue(void *pRqueue);
|
||||||
|
|
||||||
int32_t dnodeAllocateMPeerQueue();
|
int32_t dnodeAllocateMPeerQueue();
|
||||||
void dnodeFreeMPeerQueue();
|
void dnodeFreeMPeerQueue();
|
||||||
|
|
|
@ -201,6 +201,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR, 0, 0x0507, "Missing da
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_OUT_OF_MEMORY, 0, 0x0508, "Out of memory")
|
TAOS_DEFINE_ERROR(TSDB_CODE_VND_OUT_OF_MEMORY, 0, 0x0508, "Out of memory")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_APP_ERROR, 0, 0x0509, "Unexpected generic error in vnode")
|
TAOS_DEFINE_ERROR(TSDB_CODE_VND_APP_ERROR, 0, 0x0509, "Unexpected generic error in vnode")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VRESION_FILE, 0, 0x050A, "Invalid version file")
|
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VRESION_FILE, 0, 0x050A, "Invalid version file")
|
||||||
|
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FULL, 0, 0x050B, "Vnode memory is full because commit failed")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, 0, 0x0511, "Database suspended")
|
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, 0, 0x0511, "Database suspended")
|
||||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0512, "Write operation denied")
|
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0512, "Write operation denied")
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ extern "C" {
|
||||||
typedef struct {
|
typedef struct {
|
||||||
void *appH;
|
void *appH;
|
||||||
void *cqH;
|
void *cqH;
|
||||||
int (*notifyStatus)(void *, int status);
|
int (*notifyStatus)(void *, int status, int eno);
|
||||||
int (*eventCallBack)(void *);
|
int (*eventCallBack)(void *);
|
||||||
void *(*cqCreateFunc)(void *handle, uint64_t uid, int sid, char *sqlStr, STSchema *pSchema);
|
void *(*cqCreateFunc)(void *handle, uint64_t uid, int sid, char *sqlStr, STSchema *pSchema);
|
||||||
void (*cqDropFunc)(void *handle);
|
void (*cqDropFunc)(void *handle);
|
||||||
|
@ -83,7 +83,7 @@ STsdbCfg *tsdbGetCfg(const TSDB_REPO_T *repo);
|
||||||
int tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg);
|
int tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg);
|
||||||
int32_t tsdbDropRepo(char *rootDir);
|
int32_t tsdbDropRepo(char *rootDir);
|
||||||
TSDB_REPO_T *tsdbOpenRepo(char *rootDir, STsdbAppH *pAppH);
|
TSDB_REPO_T *tsdbOpenRepo(char *rootDir, STsdbAppH *pAppH);
|
||||||
void tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit);
|
int tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit);
|
||||||
int32_t tsdbConfigRepo(TSDB_REPO_T *repo, STsdbCfg *pCfg);
|
int32_t tsdbConfigRepo(TSDB_REPO_T *repo, STsdbCfg *pCfg);
|
||||||
int tsdbGetState(TSDB_REPO_T *repo);
|
int tsdbGetState(TSDB_REPO_T *repo);
|
||||||
|
|
||||||
|
|
|
@ -23,12 +23,12 @@ extern "C" {
|
||||||
#include "twal.h"
|
#include "twal.h"
|
||||||
|
|
||||||
typedef enum _VN_STATUS {
|
typedef enum _VN_STATUS {
|
||||||
TAOS_VN_STATUS_INIT,
|
TAOS_VN_STATUS_INIT = 0,
|
||||||
TAOS_VN_STATUS_READY,
|
TAOS_VN_STATUS_READY = 1,
|
||||||
TAOS_VN_STATUS_CLOSING,
|
TAOS_VN_STATUS_CLOSING = 2,
|
||||||
TAOS_VN_STATUS_UPDATING,
|
TAOS_VN_STATUS_UPDATING = 3,
|
||||||
TAOS_VN_STATUS_RESET,
|
TAOS_VN_STATUS_RESET = 4,
|
||||||
} EVnStatus;
|
} EVnodeStatus;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t len;
|
int32_t len;
|
||||||
|
@ -70,17 +70,19 @@ void* vnodeAcquire(int32_t vgId); // add refcount
|
||||||
void vnodeRelease(void *pVnode); // dec refCount
|
void vnodeRelease(void *pVnode); // dec refCount
|
||||||
void* vnodeGetWal(void *pVnode);
|
void* vnodeGetWal(void *pVnode);
|
||||||
|
|
||||||
int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rparam);
|
int32_t vnodeWriteToWQueue(void *pVnode, void *pHead, int32_t qtype, void *pRpcMsg);
|
||||||
int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rparam);
|
void vnodeFreeFromWQueue(void *pVnode, SVWriteMsg *pWrite);
|
||||||
|
int32_t vnodeProcessWrite(void *pVnode, void *pHead, int32_t qtype, void *pRspRet);
|
||||||
int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes);
|
int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes);
|
||||||
void vnodeBuildStatusMsg(void *param);
|
void vnodeBuildStatusMsg(void *pStatus);
|
||||||
void vnodeConfirmForward(void *param, uint64_t version, int32_t code);
|
void vnodeConfirmForward(void *pVnode, uint64_t version, int32_t code);
|
||||||
void vnodeSetAccess(SVgroupAccess *pAccess, int32_t numOfVnodes);
|
void vnodeSetAccess(SVgroupAccess *pAccess, int32_t numOfVnodes);
|
||||||
|
|
||||||
int32_t vnodeInitResources();
|
int32_t vnodeInitResources();
|
||||||
void vnodeCleanupResources();
|
void vnodeCleanupResources();
|
||||||
|
|
||||||
int32_t vnodeWriteToRQueue(void *vparam, void *pCont, int32_t contLen, int8_t qtype, void *rparam);
|
int32_t vnodeWriteToRQueue(void *pVnode, void *pCont, int32_t contLen, int8_t qtype, void *rparam);
|
||||||
|
void vnodeFreeFromRQueue(void *pVnode, SVReadMsg *pRead);
|
||||||
int32_t vnodeProcessRead(void *pVnode, SVReadMsg *pRead);
|
int32_t vnodeProcessRead(void *pVnode, SVReadMsg *pRead);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
|
@ -40,19 +40,22 @@ typedef struct {
|
||||||
enum _show_db_index {
|
enum _show_db_index {
|
||||||
TSDB_SHOW_DB_NAME_INDEX,
|
TSDB_SHOW_DB_NAME_INDEX,
|
||||||
TSDB_SHOW_DB_CREATED_TIME_INDEX,
|
TSDB_SHOW_DB_CREATED_TIME_INDEX,
|
||||||
TSDB_SHOW_DB_VGROUPS_INDEX,
|
|
||||||
TSDB_SHOW_DB_NTABLES_INDEX,
|
TSDB_SHOW_DB_NTABLES_INDEX,
|
||||||
|
TSDB_SHOW_DB_VGROUPS_INDEX,
|
||||||
TSDB_SHOW_DB_REPLICA_INDEX,
|
TSDB_SHOW_DB_REPLICA_INDEX,
|
||||||
|
TSDB_SHOW_DB_QUORUM_INDEX,
|
||||||
TSDB_SHOW_DB_DAYS_INDEX,
|
TSDB_SHOW_DB_DAYS_INDEX,
|
||||||
TSDB_SHOW_DB_KEEP_INDEX,
|
TSDB_SHOW_DB_KEEP_INDEX,
|
||||||
TSDB_SHOW_DB_TABLES_INDEX,
|
|
||||||
TSDB_SHOW_DB_ROWS_INDEX,
|
|
||||||
TSDB_SHOW_DB_CACHE_INDEX,
|
TSDB_SHOW_DB_CACHE_INDEX,
|
||||||
TSDB_SHOW_DB_ABLOCKS_INDEX,
|
TSDB_SHOW_DB_BLOCKS_INDEX,
|
||||||
TSDB_SHOW_DB_TBLOCKS_INDEX,
|
TSDB_SHOW_DB_MINROWS_INDEX,
|
||||||
TSDB_SHOW_DB_CTIME_INDEX,
|
TSDB_SHOW_DB_MAXROWS_INDEX,
|
||||||
TSDB_SHOW_DB_CLOG_INDEX,
|
TSDB_SHOW_DB_WALLEVEL_INDEX,
|
||||||
|
TSDB_SHOW_DB_FSYNC_INDEX,
|
||||||
TSDB_SHOW_DB_COMP_INDEX,
|
TSDB_SHOW_DB_COMP_INDEX,
|
||||||
|
TSDB_SHOW_DB_PRECISION_INDEX,
|
||||||
|
TSDB_SHOW_DB_UPDATE_INDEX,
|
||||||
|
TSDB_SHOW_DB_STATUS_INDEX,
|
||||||
TSDB_MAX_SHOW_DB
|
TSDB_MAX_SHOW_DB
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -90,17 +93,23 @@ extern char version[];
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
char name[TSDB_DB_NAME_LEN + 1];
|
char name[TSDB_DB_NAME_LEN + 1];
|
||||||
int32_t replica;
|
int32_t tables;
|
||||||
int32_t days;
|
int32_t vgroups;
|
||||||
int32_t keep;
|
int16_t replications;
|
||||||
int32_t tables;
|
int16_t quorum;
|
||||||
int32_t rows;
|
int16_t daysPerFile;
|
||||||
int32_t cache;
|
int16_t daysToKeep;
|
||||||
int32_t ablocks;
|
int16_t daysToKeep1;
|
||||||
int32_t tblocks;
|
int16_t daysToKeep2;
|
||||||
int32_t ctime;
|
int32_t cacheBlockSize; //MB
|
||||||
int32_t clog;
|
int32_t totalBlocks;
|
||||||
int32_t comp;
|
int32_t minRowsPerFileBlock;
|
||||||
|
int32_t maxRowsPerFileBlock;
|
||||||
|
int8_t walLevel;
|
||||||
|
int32_t fsyncPeriod;
|
||||||
|
int8_t compression;
|
||||||
|
int8_t precision; // time resolution
|
||||||
|
int8_t update;
|
||||||
} SDbInfo;
|
} SDbInfo;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -173,6 +182,7 @@ static struct argp_option options[] = {
|
||||||
{"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3},
|
{"start-time", 'S', "START_TIME", 0, "Start time to dump.", 3},
|
||||||
{"end-time", 'E', "END_TIME", 0, "End time to dump.", 3},
|
{"end-time", 'E', "END_TIME", 0, "End time to dump.", 3},
|
||||||
{"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
|
{"data-batch", 'N', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3},
|
||||||
|
{"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
|
||||||
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
|
{"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
|
||||||
{"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
|
{"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
|
||||||
{"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3},
|
{"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3},
|
||||||
|
@ -200,6 +210,7 @@ struct arguments {
|
||||||
int64_t start_time;
|
int64_t start_time;
|
||||||
int64_t end_time;
|
int64_t end_time;
|
||||||
int32_t data_batch;
|
int32_t data_batch;
|
||||||
|
int32_t max_sql_len;
|
||||||
int32_t table_batch; // num of table which will be dump into one output file.
|
int32_t table_batch; // num of table which will be dump into one output file.
|
||||||
bool allow_sys;
|
bool allow_sys;
|
||||||
// other options
|
// other options
|
||||||
|
@ -298,6 +309,17 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
||||||
case 'N':
|
case 'N':
|
||||||
arguments->data_batch = atoi(arg);
|
arguments->data_batch = atoi(arg);
|
||||||
break;
|
break;
|
||||||
|
case 'L':
|
||||||
|
{
|
||||||
|
int32_t len = atoi(arg);
|
||||||
|
if (len > TSDB_MAX_ALLOWED_SQL_LEN) {
|
||||||
|
len = TSDB_MAX_ALLOWED_SQL_LEN;
|
||||||
|
} else if (len < TSDB_MAX_SQL_LEN) {
|
||||||
|
len = TSDB_MAX_SQL_LEN;
|
||||||
|
}
|
||||||
|
arguments->max_sql_len = len;
|
||||||
|
break;
|
||||||
|
}
|
||||||
case 't':
|
case 't':
|
||||||
arguments->table_batch = atoi(arg);
|
arguments->table_batch = atoi(arg);
|
||||||
break;
|
break;
|
||||||
|
@ -360,6 +382,7 @@ struct arguments tsArguments = {
|
||||||
0,
|
0,
|
||||||
INT64_MAX,
|
INT64_MAX,
|
||||||
1,
|
1,
|
||||||
|
TSDB_MAX_SQL_LEN,
|
||||||
1,
|
1,
|
||||||
false,
|
false,
|
||||||
// other options
|
// other options
|
||||||
|
@ -415,7 +438,9 @@ int main(int argc, char *argv[]) {
|
||||||
printf("start_time: %" PRId64 "\n", tsArguments.start_time);
|
printf("start_time: %" PRId64 "\n", tsArguments.start_time);
|
||||||
printf("end_time: %" PRId64 "\n", tsArguments.end_time);
|
printf("end_time: %" PRId64 "\n", tsArguments.end_time);
|
||||||
printf("data_batch: %d\n", tsArguments.data_batch);
|
printf("data_batch: %d\n", tsArguments.data_batch);
|
||||||
|
printf("max_sql_len: %d\n", tsArguments.max_sql_len);
|
||||||
printf("table_batch: %d\n", tsArguments.table_batch);
|
printf("table_batch: %d\n", tsArguments.table_batch);
|
||||||
|
printf("thread_num: %d\n", tsArguments.thread_num);
|
||||||
printf("allow_sys: %d\n", tsArguments.allow_sys);
|
printf("allow_sys: %d\n", tsArguments.allow_sys);
|
||||||
printf("abort: %d\n", tsArguments.abort);
|
printf("abort: %d\n", tsArguments.abort);
|
||||||
printf("isDumpIn: %d\n", tsArguments.isDumpIn);
|
printf("isDumpIn: %d\n", tsArguments.isDumpIn);
|
||||||
|
@ -682,8 +707,8 @@ int taosDumpOut(struct arguments *arguments) {
|
||||||
TAOS_FIELD *fields = taos_fetch_fields(result);
|
TAOS_FIELD *fields = taos_fetch_fields(result);
|
||||||
|
|
||||||
while ((row = taos_fetch_row(result)) != NULL) {
|
while ((row = taos_fetch_row(result)) != NULL) {
|
||||||
// sys database name : 'monitor', but subsequent version changed to 'log'
|
// sys database name : 'log', but subsequent version changed to 'log'
|
||||||
if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "monitor", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0 &&
|
if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0 &&
|
||||||
(!arguments->allow_sys))
|
(!arguments->allow_sys))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -711,20 +736,27 @@ int taosDumpOut(struct arguments *arguments) {
|
||||||
}
|
}
|
||||||
|
|
||||||
strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
|
strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes);
|
||||||
#if 0
|
#if 0
|
||||||
dbInfos[count]->replica = (int)(*((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]));
|
if (arguments->with_property) {
|
||||||
dbInfos[count]->days = (int)(*((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]));
|
dbInfos[count]->tables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
|
||||||
dbInfos[count]->keep = *((int *)row[TSDB_SHOW_DB_KEEP_INDEX]);
|
dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
|
||||||
dbInfos[count]->tables = *((int *)row[TSDB_SHOW_DB_TABLES_INDEX]);
|
dbInfos[count]->replications = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
|
||||||
dbInfos[count]->rows = *((int *)row[TSDB_SHOW_DB_ROWS_INDEX]);
|
dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
|
||||||
dbInfos[count]->cache = *((int *)row[TSDB_SHOW_DB_CACHE_INDEX]);
|
dbInfos[count]->daysPerFile = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
|
||||||
dbInfos[count]->ablocks = *((int *)row[TSDB_SHOW_DB_ABLOCKS_INDEX]);
|
dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
|
||||||
dbInfos[count]->tblocks = (int)(*((int16_t *)row[TSDB_SHOW_DB_TBLOCKS_INDEX]));
|
dbInfos[count]->daysToKeep1;
|
||||||
dbInfos[count]->ctime = *((int *)row[TSDB_SHOW_DB_CTIME_INDEX]);
|
dbInfos[count]->daysToKeep2;
|
||||||
dbInfos[count]->clog = (int)(*((int8_t *)row[TSDB_SHOW_DB_CLOG_INDEX]));
|
dbInfos[count]->cacheBlockSize = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
|
||||||
dbInfos[count]->comp = (int)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
|
dbInfos[count]->totalBlocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
|
||||||
|
dbInfos[count]->minRowsPerFileBlock = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
|
||||||
|
dbInfos[count]->maxRowsPerFileBlock = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
|
||||||
|
dbInfos[count]->walLevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
|
||||||
|
dbInfos[count]->fsyncPeriod = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
|
||||||
|
dbInfos[count]->compression = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
|
||||||
|
dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]);
|
||||||
|
dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
count++;
|
count++;
|
||||||
|
|
||||||
if (arguments->databases) {
|
if (arguments->databases) {
|
||||||
|
@ -1037,10 +1069,13 @@ void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) {
|
||||||
|
|
||||||
pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s", dbInfo->name);
|
pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s", dbInfo->name);
|
||||||
if (isDumpProperty) {
|
if (isDumpProperty) {
|
||||||
|
#if 0
|
||||||
pstr += sprintf(pstr,
|
pstr += sprintf(pstr,
|
||||||
" REPLICA %d DAYS %d KEEP %d TABLES %d ROWS %d CACHE %d ABLOCKS %d TBLOCKS %d CTIME %d CLOG %d COMP %d",
|
"TABLES %d vgroups %d REPLICA %d quorum %d DAYS %d KEEP %d CACHE %d BLOCKS %d MINROWS %d MAXROWS %d WALLEVEL %d FYNC %d COMP %d PRECISION %s UPDATE %d",
|
||||||
dbInfo->replica, dbInfo->days, dbInfo->keep, dbInfo->tables, dbInfo->rows, dbInfo->cache,
|
dbInfo->tables, dbInfo->vgroups, dbInfo->replications, dbInfo->quorum, dbInfo->daysPerFile, dbInfo->daysToKeep, dbInfo->cacheBlockSize,
|
||||||
dbInfo->ablocks, dbInfo->tblocks, dbInfo->ctime, dbInfo->clog, dbInfo->comp);
|
dbInfo->totalBlocks, dbInfo->minRowsPerFileBlock, dbInfo->maxRowsPerFileBlock, dbInfo->walLevel, dbInfo->fsyncPeriod, dbInfo->compression,
|
||||||
|
dbInfo->precision, dbInfo->update);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
pstr += sprintf(pstr, ";");
|
pstr += sprintf(pstr, ";");
|
||||||
|
@ -1459,7 +1494,8 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
char* tmpBuffer = (char *)calloc(1, COMMAND_SIZE);
|
int32_t sql_buf_len = arguments->max_sql_len;
|
||||||
|
char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128);
|
||||||
if (tmpBuffer == NULL) {
|
if (tmpBuffer == NULL) {
|
||||||
fprintf(stderr, "failed to allocate memory\n");
|
fprintf(stderr, "failed to allocate memory\n");
|
||||||
free(tmpCommand);
|
free(tmpCommand);
|
||||||
|
@ -1502,85 +1538,83 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
char sqlStr[8] = "\0";
|
|
||||||
if (arguments->mysqlFlag) {
|
|
||||||
sprintf(sqlStr, "INSERT");
|
|
||||||
} else {
|
|
||||||
sprintf(sqlStr, "IMPORT");
|
|
||||||
}
|
|
||||||
|
|
||||||
int rowFlag = 0;
|
int rowFlag = 0;
|
||||||
|
int32_t curr_sqlstr_len = 0;
|
||||||
|
int32_t total_sqlstr_len = 0;
|
||||||
count = 0;
|
count = 0;
|
||||||
while ((row = taos_fetch_row(tmpResult)) != NULL) {
|
while ((row = taos_fetch_row(tmpResult)) != NULL) {
|
||||||
pstr = tmpBuffer;
|
pstr = tmpBuffer;
|
||||||
|
curr_sqlstr_len = 0;
|
||||||
|
|
||||||
int32_t* length = taos_fetch_lengths(tmpResult); // act len
|
int32_t* length = taos_fetch_lengths(tmpResult); // act len
|
||||||
|
|
||||||
if (count == 0) {
|
if (count == 0) {
|
||||||
pstr += sprintf(pstr, "%s INTO %s VALUES (", sqlStr, tbname);
|
total_sqlstr_len = 0;
|
||||||
} else {
|
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "INSERT INTO %s VALUES (", tbname);
|
||||||
|
} else {
|
||||||
if (arguments->mysqlFlag) {
|
if (arguments->mysqlFlag) {
|
||||||
if (0 == rowFlag) {
|
if (0 == rowFlag) {
|
||||||
pstr += sprintf(pstr, "(");
|
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
|
||||||
rowFlag++;
|
rowFlag++;
|
||||||
} else {
|
} else {
|
||||||
pstr += sprintf(pstr, ", (");
|
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", (");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
pstr += sprintf(pstr, "(");
|
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int col = 0; col < numFields; col++) {
|
for (int col = 0; col < numFields; col++) {
|
||||||
if (col != 0) pstr += sprintf(pstr, ", ");
|
if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ");
|
||||||
|
|
||||||
if (row[col] == NULL) {
|
if (row[col] == NULL) {
|
||||||
pstr += sprintf(pstr, "NULL");
|
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (fields[col].type) {
|
switch (fields[col].type) {
|
||||||
case TSDB_DATA_TYPE_BOOL:
|
case TSDB_DATA_TYPE_BOOL:
|
||||||
pstr += sprintf(pstr, "%d", ((((int32_t)(*((char *)row[col]))) == 1) ? 1 : 0));
|
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", ((((int32_t)(*((char *)row[col]))) == 1) ? 1 : 0));
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_TINYINT:
|
case TSDB_DATA_TYPE_TINYINT:
|
||||||
pstr += sprintf(pstr, "%d", *((int8_t *)row[col]));
|
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int8_t *)row[col]));
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_SMALLINT:
|
case TSDB_DATA_TYPE_SMALLINT:
|
||||||
pstr += sprintf(pstr, "%d", *((int16_t *)row[col]));
|
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int16_t *)row[col]));
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_INT:
|
case TSDB_DATA_TYPE_INT:
|
||||||
pstr += sprintf(pstr, "%d", *((int32_t *)row[col]));
|
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int32_t *)row[col]));
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_BIGINT:
|
case TSDB_DATA_TYPE_BIGINT:
|
||||||
pstr += sprintf(pstr, "%" PRId64 "", *((int64_t *)row[col]));
|
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", *((int64_t *)row[col]));
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_FLOAT:
|
case TSDB_DATA_TYPE_FLOAT:
|
||||||
pstr += sprintf(pstr, "%f", GET_FLOAT_VAL(row[col]));
|
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_FLOAT_VAL(row[col]));
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_DOUBLE:
|
case TSDB_DATA_TYPE_DOUBLE:
|
||||||
pstr += sprintf(pstr, "%f", GET_DOUBLE_VAL(row[col]));
|
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_DOUBLE_VAL(row[col]));
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_BINARY:
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
*(pstr++) = '\'';
|
//*(pstr++) = '\'';
|
||||||
converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
|
converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
|
||||||
pstr = stpcpy(pstr, tbuf);
|
//pstr = stpcpy(pstr, tbuf);
|
||||||
*(pstr++) = '\'';
|
//*(pstr++) = '\'';
|
||||||
|
pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_NCHAR:
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
|
convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
|
||||||
pstr += sprintf(pstr, "\'%s\'", tbuf);
|
pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||||
if (!arguments->mysqlFlag) {
|
if (!arguments->mysqlFlag) {
|
||||||
pstr += sprintf(pstr, "%" PRId64 "", *(int64_t *)row[col]);
|
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", *(int64_t *)row[col]);
|
||||||
} else {
|
} else {
|
||||||
char buf[64] = "\0";
|
char buf[64] = "\0";
|
||||||
int64_t ts = *((int64_t *)row[col]);
|
int64_t ts = *((int64_t *)row[col]);
|
||||||
time_t tt = (time_t)(ts / 1000);
|
time_t tt = (time_t)(ts / 1000);
|
||||||
struct tm *ptm = localtime(&tt);
|
struct tm *ptm = localtime(&tt);
|
||||||
strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
|
strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
|
||||||
pstr += sprintf(pstr, "\'%s.%03d\'", buf, (int)(ts % 1000));
|
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s.%03d\'", buf, (int)(ts % 1000));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -1588,13 +1622,15 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS*
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pstr += sprintf(pstr, ") ");
|
curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ") ");
|
||||||
|
|
||||||
totalRows++;
|
totalRows++;
|
||||||
count++;
|
count++;
|
||||||
fprintf(fp, "%s", tmpBuffer);
|
fprintf(fp, "%s", tmpBuffer);
|
||||||
|
|
||||||
if (count >= arguments->data_batch) {
|
total_sqlstr_len += curr_sqlstr_len;
|
||||||
|
|
||||||
|
if ((count >= arguments->data_batch) || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) {
|
||||||
fprintf(fp, ";\n");
|
fprintf(fp, ";\n");
|
||||||
count = 0;
|
count = 0;
|
||||||
} //else {
|
} //else {
|
||||||
|
|
|
@ -37,7 +37,7 @@ extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef TAOS_OS_DEF_EPOLL
|
#ifndef TAOS_OS_DEF_EPOLL
|
||||||
#define TAOS_EPOLL_WAIT_TIME -1
|
#define TAOS_EPOLL_WAIT_TIME 500
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef TAOS_RANDOM_NETWORK_FAIL
|
#ifdef TAOS_RANDOM_NETWORK_FAIL
|
||||||
|
|
|
@ -111,6 +111,9 @@ void taosUninitTimer() {
|
||||||
pthread_sigmask(SIG_BLOCK, &set, NULL);
|
pthread_sigmask(SIG_BLOCK, &set, NULL);
|
||||||
*/
|
*/
|
||||||
void taosMsleep(int mseconds) {
|
void taosMsleep(int mseconds) {
|
||||||
|
#if 1
|
||||||
|
usleep(mseconds * 1000);
|
||||||
|
#else
|
||||||
struct timeval timeout;
|
struct timeval timeout;
|
||||||
int seconds, useconds;
|
int seconds, useconds;
|
||||||
|
|
||||||
|
@ -126,7 +129,8 @@ void taosMsleep(int mseconds) {
|
||||||
|
|
||||||
select(0, NULL, NULL, NULL, &timeout);
|
select(0, NULL, NULL, NULL, &timeout);
|
||||||
|
|
||||||
/* pthread_sigmask(SIG_UNBLOCK, &set, NULL); */
|
/* pthread_sigmask(SIG_UNBLOCK, &set, NULL); */
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
|
@ -85,7 +85,7 @@ static void httpProcessHttpData(void *param) {
|
||||||
while (1) {
|
while (1) {
|
||||||
struct epoll_event events[HTTP_MAX_EVENTS];
|
struct epoll_event events[HTTP_MAX_EVENTS];
|
||||||
//-1 means uncertainty, 0-nowait, 1-wait 1 ms, set it from -1 to 1
|
//-1 means uncertainty, 0-nowait, 1-wait 1 ms, set it from -1 to 1
|
||||||
fdNum = epoll_wait(pThread->pollFd, events, HTTP_MAX_EVENTS, 1);
|
fdNum = epoll_wait(pThread->pollFd, events, HTTP_MAX_EVENTS, TAOS_EPOLL_WAIT_TIME);
|
||||||
if (pThread->stop) {
|
if (pThread->stop) {
|
||||||
httpDebug("%p, http thread get stop event, exiting...", pThread);
|
httpDebug("%p, http thread get stop event, exiting...", pThread);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -148,10 +148,12 @@ static void *monitorThreadFunc(void *param) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tsMonitor.state == MON_STATE_NOT_INIT) {
|
if (tsMonitor.state == MON_STATE_NOT_INIT) {
|
||||||
|
int code = 0;
|
||||||
|
|
||||||
for (; tsMonitor.cmdIndex < MON_CMD_MAX; ++tsMonitor.cmdIndex) {
|
for (; tsMonitor.cmdIndex < MON_CMD_MAX; ++tsMonitor.cmdIndex) {
|
||||||
monitorBuildMonitorSql(tsMonitor.sql, tsMonitor.cmdIndex);
|
monitorBuildMonitorSql(tsMonitor.sql, tsMonitor.cmdIndex);
|
||||||
void *res = taos_query(tsMonitor.conn, tsMonitor.sql);
|
void *res = taos_query(tsMonitor.conn, tsMonitor.sql);
|
||||||
int code = taos_errno(res);
|
code = taos_errno(res);
|
||||||
taos_free_result(res);
|
taos_free_result(res);
|
||||||
|
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
|
@ -162,7 +164,7 @@ static void *monitorThreadFunc(void *param) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tsMonitor.start) {
|
if (tsMonitor.start && code == 0) {
|
||||||
tsMonitor.state = MON_STATE_INITED;
|
tsMonitor.state = MON_STATE_INITED;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,7 +43,8 @@ typedef struct SHistogramInfo {
|
||||||
int32_t numOfElems;
|
int32_t numOfElems;
|
||||||
int32_t numOfEntries;
|
int32_t numOfEntries;
|
||||||
int32_t maxEntries;
|
int32_t maxEntries;
|
||||||
|
double min;
|
||||||
|
double max;
|
||||||
#if defined(USE_ARRAYLIST)
|
#if defined(USE_ARRAYLIST)
|
||||||
SHistBin* elems;
|
SHistBin* elems;
|
||||||
#else
|
#else
|
||||||
|
@ -52,9 +53,6 @@ typedef struct SHistogramInfo {
|
||||||
int32_t maxIndex;
|
int32_t maxIndex;
|
||||||
bool ordered;
|
bool ordered;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
double min;
|
|
||||||
double max;
|
|
||||||
} SHistogramInfo;
|
} SHistogramInfo;
|
||||||
|
|
||||||
SHistogramInfo* tHistogramCreate(int32_t numOfBins);
|
SHistogramInfo* tHistogramCreate(int32_t numOfBins);
|
||||||
|
|
|
@ -171,40 +171,17 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
|
||||||
}
|
}
|
||||||
|
|
||||||
static void taosStopTcpThread(SThreadObj* pThreadObj) {
|
static void taosStopTcpThread(SThreadObj* pThreadObj) {
|
||||||
pThreadObj->stop = true;
|
// save thread into local variable and signal thread to stop
|
||||||
eventfd_t fd = -1;
|
|
||||||
|
|
||||||
// save thread into local variable since pThreadObj is freed when thread exits
|
|
||||||
pthread_t thread = pThreadObj->thread;
|
pthread_t thread = pThreadObj->thread;
|
||||||
|
if (!taosCheckPthreadValid(thread)) {
|
||||||
if (taosComparePthread(pThreadObj->thread, pthread_self())) {
|
return;
|
||||||
|
}
|
||||||
|
pThreadObj->stop = true;
|
||||||
|
if (taosComparePthread(thread, pthread_self())) {
|
||||||
pthread_detach(pthread_self());
|
pthread_detach(pthread_self());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
pthread_join(thread, NULL);
|
||||||
if (taosCheckPthreadValid(pThreadObj->thread)) {
|
|
||||||
// signal the thread to stop, try graceful method first,
|
|
||||||
// and use pthread_cancel when failed
|
|
||||||
struct epoll_event event = { .events = EPOLLIN };
|
|
||||||
fd = eventfd(1, 0);
|
|
||||||
if (fd == -1) {
|
|
||||||
// failed to create eventfd, call pthread_cancel instead, which may result in data corruption:
|
|
||||||
tError("%s, failed to create eventfd(%s)", pThreadObj->label, strerror(errno));
|
|
||||||
pThreadObj->stop = true;
|
|
||||||
pthread_cancel(pThreadObj->thread);
|
|
||||||
} else if (epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_ADD, fd, &event) < 0) {
|
|
||||||
// failed to call epoll_ctl, call pthread_cancel instead, which may result in data corruption:
|
|
||||||
tError("%s, failed to call epoll_ctl(%s)", pThreadObj->label, strerror(errno));
|
|
||||||
pthread_cancel(pThreadObj->thread);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// at this step, pThreadObj has already been released
|
|
||||||
if (taosCheckPthreadValid(thread)) {
|
|
||||||
pthread_join(thread, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (fd != -1) taosCloseSocket(fd);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void taosStopTcpServer(void *handle) {
|
void taosStopTcpServer(void *handle) {
|
||||||
|
|
|
@ -36,6 +36,7 @@ extern "C" {
|
||||||
#define TAOS_SMSG_STATUS 7
|
#define TAOS_SMSG_STATUS 7
|
||||||
|
|
||||||
#define SYNC_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + sizeof(SSyncHead) + 16)
|
#define SYNC_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + sizeof(SSyncHead) + 16)
|
||||||
|
#define SYNC_RECV_BUFFER_SIZE (5*1024*1024)
|
||||||
|
|
||||||
#define nodeRole pNode->peerInfo[pNode->selfIndex]->role
|
#define nodeRole pNode->peerInfo[pNode->selfIndex]->role
|
||||||
#define nodeVersion pNode->peerInfo[pNode->selfIndex]->version
|
#define nodeVersion pNode->peerInfo[pNode->selfIndex]->version
|
||||||
|
@ -105,7 +106,7 @@ typedef struct {
|
||||||
int8_t nacks;
|
int8_t nacks;
|
||||||
int8_t confirmed;
|
int8_t confirmed;
|
||||||
int32_t code;
|
int32_t code;
|
||||||
uint64_t time;
|
int64_t time;
|
||||||
} SFwdInfo;
|
} SFwdInfo;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
|
|
@ -179,6 +179,13 @@ int64_t syncStart(const SSyncInfo *pInfo) {
|
||||||
for (int32_t i = 0; i < pCfg->replica; ++i) {
|
for (int32_t i = 0; i < pCfg->replica; ++i) {
|
||||||
const SNodeInfo *pNodeInfo = pCfg->nodeInfo + i;
|
const SNodeInfo *pNodeInfo = pCfg->nodeInfo + i;
|
||||||
pNode->peerInfo[i] = syncAddPeer(pNode, pNodeInfo);
|
pNode->peerInfo[i] = syncAddPeer(pNode, pNodeInfo);
|
||||||
|
if (pNode->peerInfo[i] == NULL) {
|
||||||
|
sError("vgId:%d, node:%d fqdn:%s port:%u is not configured, stop taosd", pNode->vgId, pNodeInfo->nodeId, pNodeInfo->nodeFqdn,
|
||||||
|
pNodeInfo->nodePort);
|
||||||
|
syncStop(pNode->rid);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
if ((strcmp(pNodeInfo->nodeFqdn, tsNodeFqdn) == 0) && (pNodeInfo->nodePort == tsSyncPort)) {
|
if ((strcmp(pNodeInfo->nodeFqdn, tsNodeFqdn) == 0) && (pNodeInfo->nodePort == tsSyncPort)) {
|
||||||
pNode->selfIndex = i;
|
pNode->selfIndex = i;
|
||||||
}
|
}
|
||||||
|
@ -476,7 +483,11 @@ static void syncRemovePeer(SSyncPeer *pPeer) {
|
||||||
|
|
||||||
static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) {
|
static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) {
|
||||||
uint32_t ip = taosGetIpFromFqdn(pInfo->nodeFqdn);
|
uint32_t ip = taosGetIpFromFqdn(pInfo->nodeFqdn);
|
||||||
if (ip == -1) return NULL;
|
if (ip == 0xFFFFFFFF) {
|
||||||
|
sError("failed to add peer, can resolve fqdn:%s since %s", pInfo->nodeFqdn, strerror(errno));
|
||||||
|
terrno = TSDB_CODE_RPC_FQDN_ERROR;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
SSyncPeer *pPeer = calloc(1, sizeof(SSyncPeer));
|
SSyncPeer *pPeer = calloc(1, sizeof(SSyncPeer));
|
||||||
if (pPeer == NULL) return NULL;
|
if (pPeer == NULL) return NULL;
|
||||||
|
@ -1193,14 +1204,17 @@ static void syncMonitorFwdInfos(void *param, void *tmrId) {
|
||||||
|
|
||||||
SSyncFwds *pSyncFwds = pNode->pSyncFwds;
|
SSyncFwds *pSyncFwds = pNode->pSyncFwds;
|
||||||
|
|
||||||
if (pSyncFwds) {;
|
if (pSyncFwds) {
|
||||||
uint64_t time = taosGetTimestampMs();
|
int64_t time = taosGetTimestampMs();
|
||||||
|
|
||||||
if (pSyncFwds->fwds > 0) {
|
if (pSyncFwds->fwds > 0) {
|
||||||
pthread_mutex_lock(&(pNode->mutex));
|
pthread_mutex_lock(&(pNode->mutex));
|
||||||
for (int32_t i = 0; i < pSyncFwds->fwds; ++i) {
|
for (int32_t i = 0; i < pSyncFwds->fwds; ++i) {
|
||||||
SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + (pSyncFwds->first + i) % tsMaxFwdInfo;
|
SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + (pSyncFwds->first + i) % tsMaxFwdInfo;
|
||||||
if (time - pFwdInfo->time < 2000) break;
|
if (ABS(time - pFwdInfo->time) < 2000) break;
|
||||||
|
|
||||||
|
sDebug("vgId:%d, forward info expired, hver:%" PRIu64 " curtime:%" PRIu64 " savetime:%" PRIu64, pNode->vgId,
|
||||||
|
pFwdInfo->version, time, pFwdInfo->time);
|
||||||
syncProcessFwdAck(pNode, pFwdInfo, TSDB_CODE_RPC_NETWORK_UNAVAIL);
|
syncProcessFwdAck(pNode, pFwdInfo, TSDB_CODE_RPC_NETWORK_UNAVAIL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -136,7 +136,7 @@ static int32_t syncRestoreWal(SSyncPeer *pPeer) {
|
||||||
SSyncNode *pNode = pPeer->pSyncNode;
|
SSyncNode *pNode = pPeer->pSyncNode;
|
||||||
int32_t ret, code = -1;
|
int32_t ret, code = -1;
|
||||||
|
|
||||||
void *buffer = calloc(1024000, 1); // size for one record
|
void *buffer = calloc(SYNC_MAX_SIZE, 1); // size for one record
|
||||||
if (buffer == NULL) return -1;
|
if (buffer == NULL) return -1;
|
||||||
|
|
||||||
SWalHead *pHead = (SWalHead *)buffer;
|
SWalHead *pHead = (SWalHead *)buffer;
|
||||||
|
@ -237,7 +237,7 @@ static int32_t syncOpenRecvBuffer(SSyncNode *pNode) {
|
||||||
SRecvBuffer *pRecv = calloc(sizeof(SRecvBuffer), 1);
|
SRecvBuffer *pRecv = calloc(sizeof(SRecvBuffer), 1);
|
||||||
if (pRecv == NULL) return -1;
|
if (pRecv == NULL) return -1;
|
||||||
|
|
||||||
pRecv->bufferSize = 5000000;
|
pRecv->bufferSize = SYNC_RECV_BUFFER_SIZE;
|
||||||
pRecv->buffer = malloc(pRecv->bufferSize);
|
pRecv->buffer = malloc(pRecv->bufferSize);
|
||||||
if (pRecv->buffer == NULL) {
|
if (pRecv->buffer == NULL) {
|
||||||
free(pRecv);
|
free(pRecv);
|
||||||
|
|
|
@ -301,31 +301,14 @@ static SThreadObj *taosGetTcpThread(SPoolObj *pPool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void taosStopPoolThread(SThreadObj *pThread) {
|
static void taosStopPoolThread(SThreadObj *pThread) {
|
||||||
|
pthread_t thread = pThread->thread;
|
||||||
|
if (!taosCheckPthreadValid(thread)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
pThread->stop = true;
|
pThread->stop = true;
|
||||||
|
if (taosComparePthread(thread, pthread_self())) {
|
||||||
if (pThread->thread == pthread_self()) {
|
|
||||||
pthread_detach(pthread_self());
|
pthread_detach(pthread_self());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// save thread ID into a local variable, since pThread is freed when the thread exits
|
|
||||||
pthread_t thread = pThread->thread;
|
|
||||||
|
|
||||||
// signal the thread to stop, try graceful method first,
|
|
||||||
// and use pthread_cancel when failed
|
|
||||||
struct epoll_event event = {.events = EPOLLIN};
|
|
||||||
eventfd_t fd = eventfd(1, 0);
|
|
||||||
if (fd == -1) {
|
|
||||||
// failed to create eventfd, call pthread_cancel instead, which may result in data corruption
|
|
||||||
sError("failed to create eventfd since %s", strerror(errno));
|
|
||||||
pthread_cancel(pThread->thread);
|
|
||||||
pThread->stop = true;
|
|
||||||
} else if (epoll_ctl(pThread->pollFd, EPOLL_CTL_ADD, fd, &event) < 0) {
|
|
||||||
// failed to call epoll_ctl, call pthread_cancel instead, which may result in data corruption
|
|
||||||
sError("failed to call epoll_ctl since %s", strerror(errno));
|
|
||||||
pthread_cancel(pThread->thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
pthread_join(thread, NULL);
|
pthread_join(thread, NULL);
|
||||||
if (fd >= 0) taosClose(fd);
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -235,6 +235,7 @@ typedef struct {
|
||||||
sem_t readyToCommit;
|
sem_t readyToCommit;
|
||||||
pthread_mutex_t mutex;
|
pthread_mutex_t mutex;
|
||||||
bool repoLocked;
|
bool repoLocked;
|
||||||
|
int32_t code; // Commit code
|
||||||
} STsdbRepo;
|
} STsdbRepo;
|
||||||
|
|
||||||
// ------------------ tsdbRWHelper.c
|
// ------------------ tsdbRWHelper.c
|
||||||
|
|
|
@ -0,0 +1,340 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
#include "tsdbMain.h"
|
||||||
|
|
||||||
|
static int tsdbCommitTSData(STsdbRepo *pRepo);
|
||||||
|
static int tsdbCommitMeta(STsdbRepo *pRepo);
|
||||||
|
static void tsdbEndCommit(STsdbRepo *pRepo, int eno);
|
||||||
|
static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey);
|
||||||
|
static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols);
|
||||||
|
static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo);
|
||||||
|
static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables);
|
||||||
|
|
||||||
|
void *tsdbCommitData(STsdbRepo *pRepo) {
|
||||||
|
SMemTable * pMem = pRepo->imem;
|
||||||
|
|
||||||
|
tsdbInfo("vgId:%d start to commit! keyFirst %" PRId64 " keyLast %" PRId64 " numOfRows %" PRId64 " meta rows: %d",
|
||||||
|
REPO_ID(pRepo), pMem->keyFirst, pMem->keyLast, pMem->numOfRows, listNEles(pMem->actList));
|
||||||
|
|
||||||
|
pRepo->code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
|
// Commit to update meta file
|
||||||
|
if (tsdbCommitMeta(pRepo) < 0) {
|
||||||
|
tsdbError("vgId:%d error occurs while committing META data since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the iterator to read from cache
|
||||||
|
if (tsdbCommitTSData(pRepo) < 0) {
|
||||||
|
tsdbError("vgId:%d error occurs while committing TS data since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
tsdbFitRetention(pRepo);
|
||||||
|
|
||||||
|
tsdbInfo("vgId:%d commit over, succeed", REPO_ID(pRepo));
|
||||||
|
tsdbEndCommit(pRepo, TSDB_CODE_SUCCESS);
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
_err:
|
||||||
|
ASSERT(terrno != TSDB_CODE_SUCCESS);
|
||||||
|
pRepo->code = terrno;
|
||||||
|
tsdbInfo("vgId:%d commit over, failed", REPO_ID(pRepo));
|
||||||
|
tsdbEndCommit(pRepo, terrno);
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tsdbCommitTSData(STsdbRepo *pRepo) {
|
||||||
|
SMemTable * pMem = pRepo->imem;
|
||||||
|
SDataCols * pDataCols = NULL;
|
||||||
|
STsdbMeta * pMeta = pRepo->tsdbMeta;
|
||||||
|
SCommitIter *iters = NULL;
|
||||||
|
SRWHelper whelper = {0};
|
||||||
|
STsdbCfg * pCfg = &(pRepo->config);
|
||||||
|
|
||||||
|
if (pMem->numOfRows <= 0) return 0;
|
||||||
|
|
||||||
|
iters = tsdbCreateCommitIters(pRepo);
|
||||||
|
if (iters == NULL) {
|
||||||
|
tsdbError("vgId:%d failed to create commit iterator since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbInitWriteHelper(&whelper, pRepo) < 0) {
|
||||||
|
tsdbError("vgId:%d failed to init write helper since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pCfg->maxRowsPerFileBlock)) == NULL) {
|
||||||
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
|
tsdbError("vgId:%d failed to init data cols with maxRowBytes %d maxCols %d maxRowsPerFileBlock %d since %s",
|
||||||
|
REPO_ID(pRepo), pMeta->maxCols, pMeta->maxRowBytes, pCfg->maxRowsPerFileBlock, tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
int sfid = (int)(TSDB_KEY_FILEID(pMem->keyFirst, pCfg->daysPerFile, pCfg->precision));
|
||||||
|
int efid = (int)(TSDB_KEY_FILEID(pMem->keyLast, pCfg->daysPerFile, pCfg->precision));
|
||||||
|
|
||||||
|
// Loop to commit to each file
|
||||||
|
for (int fid = sfid; fid <= efid; fid++) {
|
||||||
|
if (tsdbCommitToFile(pRepo, fid, iters, &whelper, pDataCols) < 0) {
|
||||||
|
tsdbError("vgId:%d failed to commit to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tdFreeDataCols(pDataCols);
|
||||||
|
tsdbDestroyCommitIters(iters, pMem->maxTables);
|
||||||
|
tsdbDestroyHelper(&whelper);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
_err:
|
||||||
|
tdFreeDataCols(pDataCols);
|
||||||
|
tsdbDestroyCommitIters(iters, pMem->maxTables);
|
||||||
|
tsdbDestroyHelper(&whelper);
|
||||||
|
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tsdbCommitMeta(STsdbRepo *pRepo) {
|
||||||
|
SMemTable *pMem = pRepo->imem;
|
||||||
|
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||||
|
SActObj * pAct = NULL;
|
||||||
|
SActCont * pCont = NULL;
|
||||||
|
|
||||||
|
if (listNEles(pMem->actList) <= 0) return 0;
|
||||||
|
|
||||||
|
if (tdKVStoreStartCommit(pMeta->pStore) < 0) {
|
||||||
|
tsdbError("vgId:%d failed to commit data while start commit meta since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
SListNode *pNode = NULL;
|
||||||
|
|
||||||
|
while ((pNode = tdListPopHead(pMem->actList)) != NULL) {
|
||||||
|
pAct = (SActObj *)pNode->data;
|
||||||
|
if (pAct->act == TSDB_UPDATE_META) {
|
||||||
|
pCont = (SActCont *)POINTER_SHIFT(pAct, sizeof(SActObj));
|
||||||
|
if (tdUpdateKVStoreRecord(pMeta->pStore, pAct->uid, (void *)(pCont->cont), pCont->len) < 0) {
|
||||||
|
tsdbError("vgId:%d failed to update meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
|
||||||
|
tstrerror(terrno));
|
||||||
|
tdKVStoreEndCommit(pMeta->pStore);
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
} else if (pAct->act == TSDB_DROP_META) {
|
||||||
|
if (tdDropKVStoreRecord(pMeta->pStore, pAct->uid) < 0) {
|
||||||
|
tsdbError("vgId:%d failed to drop meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
|
||||||
|
tstrerror(terrno));
|
||||||
|
tdKVStoreEndCommit(pMeta->pStore);
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ASSERT(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tdKVStoreEndCommit(pMeta->pStore) < 0) {
|
||||||
|
tsdbError("vgId:%d failed to commit data while end commit meta since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
_err:
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tsdbEndCommit(STsdbRepo *pRepo, int eno) {
|
||||||
|
if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_OVER, eno);
|
||||||
|
sem_post(&(pRepo->readyToCommit));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey) {
|
||||||
|
for (int i = 0; i < nIters; i++) {
|
||||||
|
TSKEY nextKey = tsdbNextIterKey((iters + i)->pIter);
|
||||||
|
if (nextKey != TSDB_DATA_TIMESTAMP_NULL && (nextKey >= minKey && nextKey <= maxKey)) return 1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols) {
|
||||||
|
char * dataDir = NULL;
|
||||||
|
STsdbCfg * pCfg = &pRepo->config;
|
||||||
|
STsdbFileH *pFileH = pRepo->tsdbFileH;
|
||||||
|
SFileGroup *pGroup = NULL;
|
||||||
|
SMemTable * pMem = pRepo->imem;
|
||||||
|
bool newLast = false;
|
||||||
|
|
||||||
|
TSKEY minKey = 0, maxKey = 0;
|
||||||
|
tsdbGetFidKeyRange(pCfg->daysPerFile, pCfg->precision, fid, &minKey, &maxKey);
|
||||||
|
|
||||||
|
// Check if there are data to commit to this file
|
||||||
|
int hasDataToCommit = tsdbHasDataToCommit(iters, pMem->maxTables, minKey, maxKey);
|
||||||
|
if (!hasDataToCommit) {
|
||||||
|
tsdbDebug("vgId:%d no data to commit to file %d", REPO_ID(pRepo), fid);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create and open files for commit
|
||||||
|
dataDir = tsdbGetDataDirName(pRepo->rootDir);
|
||||||
|
if (dataDir == NULL) {
|
||||||
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((pGroup = tsdbCreateFGroupIfNeed(pRepo, dataDir, fid)) == NULL) {
|
||||||
|
tsdbError("vgId:%d failed to create file group %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open files for write/read
|
||||||
|
if (tsdbSetAndOpenHelperFile(pHelper, pGroup) < 0) {
|
||||||
|
tsdbError("vgId:%d failed to set helper file since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
newLast = TSDB_NLAST_FILE_OPENED(pHelper);
|
||||||
|
|
||||||
|
if (tsdbLoadCompIdx(pHelper, NULL) < 0) {
|
||||||
|
tsdbError("vgId:%d failed to load SCompIdx part since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loop to commit data in each table
|
||||||
|
for (int tid = 1; tid < pMem->maxTables; tid++) {
|
||||||
|
SCommitIter *pIter = iters + tid;
|
||||||
|
if (pIter->pTable == NULL) continue;
|
||||||
|
|
||||||
|
taosRLockLatch(&(pIter->pTable->latch));
|
||||||
|
|
||||||
|
if (tsdbSetHelperTable(pHelper, pIter->pTable, pRepo) < 0) goto _err;
|
||||||
|
|
||||||
|
if (pIter->pIter != NULL) {
|
||||||
|
if (tdInitDataCols(pDataCols, tsdbGetTableSchemaImpl(pIter->pTable, false, false, -1)) < 0) {
|
||||||
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbCommitTableData(pHelper, pIter, pDataCols, maxKey) < 0) {
|
||||||
|
taosRUnLockLatch(&(pIter->pTable->latch));
|
||||||
|
tsdbError("vgId:%d failed to write data of table %s tid %d uid %" PRIu64 " since %s", REPO_ID(pRepo),
|
||||||
|
TABLE_CHAR_NAME(pIter->pTable), TABLE_TID(pIter->pTable), TABLE_UID(pIter->pTable),
|
||||||
|
tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
taosRUnLockLatch(&(pIter->pTable->latch));
|
||||||
|
|
||||||
|
// Move the last block to the new .l file if neccessary
|
||||||
|
if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) {
|
||||||
|
tsdbError("vgId:%d, failed to move last block, since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the SCompBlock part
|
||||||
|
if (tsdbWriteCompInfo(pHelper) < 0) {
|
||||||
|
tsdbError("vgId:%d, failed to write compInfo part since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbWriteCompIdx(pHelper) < 0) {
|
||||||
|
tsdbError("vgId:%d failed to write compIdx part to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
tfree(dataDir);
|
||||||
|
tsdbCloseHelperFile(pHelper, 0, pGroup);
|
||||||
|
|
||||||
|
pthread_rwlock_wrlock(&(pFileH->fhlock));
|
||||||
|
|
||||||
|
(void)rename(helperNewHeadF(pHelper)->fname, helperHeadF(pHelper)->fname);
|
||||||
|
pGroup->files[TSDB_FILE_TYPE_HEAD].info = helperNewHeadF(pHelper)->info;
|
||||||
|
|
||||||
|
if (newLast) {
|
||||||
|
(void)rename(helperNewLastF(pHelper)->fname, helperLastF(pHelper)->fname);
|
||||||
|
pGroup->files[TSDB_FILE_TYPE_LAST].info = helperNewLastF(pHelper)->info;
|
||||||
|
} else {
|
||||||
|
pGroup->files[TSDB_FILE_TYPE_LAST].info = helperLastF(pHelper)->info;
|
||||||
|
}
|
||||||
|
|
||||||
|
pGroup->files[TSDB_FILE_TYPE_DATA].info = helperDataF(pHelper)->info;
|
||||||
|
|
||||||
|
pthread_rwlock_unlock(&(pFileH->fhlock));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
_err:
|
||||||
|
tfree(dataDir);
|
||||||
|
tsdbCloseHelperFile(pHelper, 1, pGroup);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) {
|
||||||
|
SMemTable *pMem = pRepo->imem;
|
||||||
|
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||||
|
|
||||||
|
SCommitIter *iters = (SCommitIter *)calloc(pMem->maxTables, sizeof(SCommitIter));
|
||||||
|
if (iters == NULL) {
|
||||||
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbRLockRepoMeta(pRepo) < 0) goto _err;
|
||||||
|
|
||||||
|
// reference all tables
|
||||||
|
for (int i = 0; i < pMem->maxTables; i++) {
|
||||||
|
if (pMeta->tables[i] != NULL) {
|
||||||
|
tsdbRefTable(pMeta->tables[i]);
|
||||||
|
iters[i].pTable = pMeta->tables[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsdbUnlockRepoMeta(pRepo) < 0) goto _err;
|
||||||
|
|
||||||
|
for (int i = 0; i < pMem->maxTables; i++) {
|
||||||
|
if ((iters[i].pTable != NULL) && (pMem->tData[i] != NULL) && (TABLE_UID(iters[i].pTable) == pMem->tData[i]->uid)) {
|
||||||
|
if ((iters[i].pIter = tSkipListCreateIter(pMem->tData[i]->pData)) == NULL) {
|
||||||
|
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||||
|
goto _err;
|
||||||
|
}
|
||||||
|
|
||||||
|
tSkipListIterNext(iters[i].pIter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return iters;
|
||||||
|
|
||||||
|
_err:
|
||||||
|
tsdbDestroyCommitIters(iters, pMem->maxTables);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables) {
|
||||||
|
if (iters == NULL) return;
|
||||||
|
|
||||||
|
for (int i = 1; i < maxTables; i++) {
|
||||||
|
if (iters[i].pTable != NULL) {
|
||||||
|
tsdbUnRefTable(iters[i].pTable);
|
||||||
|
tSkipListDestroyIter(iters[i].pIter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
free(iters);
|
||||||
|
}
|
|
@ -256,7 +256,8 @@ SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid) {
|
||||||
pFileH->pFGroup[pFileH->nFGroups++] = fGroup;
|
pFileH->pFGroup[pFileH->nFGroups++] = fGroup;
|
||||||
qsort((void *)(pFileH->pFGroup), pFileH->nFGroups, sizeof(SFileGroup), compFGroup);
|
qsort((void *)(pFileH->pFGroup), pFileH->nFGroups, sizeof(SFileGroup), compFGroup);
|
||||||
pthread_rwlock_unlock(&pFileH->fhlock);
|
pthread_rwlock_unlock(&pFileH->fhlock);
|
||||||
return tsdbSearchFGroup(pFileH, fid, TD_EQ);
|
pGroup = tsdbSearchFGroup(pFileH, fid, TD_EQ);
|
||||||
|
ASSERT(pGroup != NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
return pGroup;
|
return pGroup;
|
||||||
|
|
|
@ -134,17 +134,20 @@ _err:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note: all working thread and query thread must stopped when calling this function
|
// Note: all working thread and query thread must stopped when calling this function
|
||||||
void tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit) {
|
int tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit) {
|
||||||
if (repo == NULL) return;
|
if (repo == NULL) return 0;
|
||||||
|
|
||||||
STsdbRepo *pRepo = (STsdbRepo *)repo;
|
STsdbRepo *pRepo = (STsdbRepo *)repo;
|
||||||
int vgId = REPO_ID(pRepo);
|
int vgId = REPO_ID(pRepo);
|
||||||
|
|
||||||
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
tsdbStopStream(pRepo);
|
tsdbStopStream(pRepo);
|
||||||
|
|
||||||
if (toCommit) {
|
if (toCommit) {
|
||||||
tsdbAsyncCommit(pRepo);
|
tsdbAsyncCommit(pRepo);
|
||||||
sem_wait(&(pRepo->readyToCommit));
|
sem_wait(&(pRepo->readyToCommit));
|
||||||
|
terrno = pRepo->code;
|
||||||
}
|
}
|
||||||
tsdbUnRefMemTable(pRepo, pRepo->mem);
|
tsdbUnRefMemTable(pRepo, pRepo->mem);
|
||||||
tsdbUnRefMemTable(pRepo, pRepo->imem);
|
tsdbUnRefMemTable(pRepo, pRepo->imem);
|
||||||
|
@ -156,6 +159,12 @@ void tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit) {
|
||||||
tsdbCloseMeta(pRepo);
|
tsdbCloseMeta(pRepo);
|
||||||
tsdbFreeRepo(pRepo);
|
tsdbFreeRepo(pRepo);
|
||||||
tsdbDebug("vgId:%d repository is closed", vgId);
|
tsdbDebug("vgId:%d repository is closed", vgId);
|
||||||
|
|
||||||
|
if (terrno != TSDB_CODE_SUCCESS) {
|
||||||
|
return -1;
|
||||||
|
} else {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_t eindex, int64_t *size) {
|
uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_t eindex, int64_t *size) {
|
||||||
|
@ -619,6 +628,7 @@ static STsdbRepo *tsdbNewRepo(char *rootDir, STsdbAppH *pAppH, STsdbCfg *pCfg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
pRepo->state = TSDB_STATE_OK;
|
pRepo->state = TSDB_STATE_OK;
|
||||||
|
pRepo->code = TSDB_CODE_SUCCESS;
|
||||||
|
|
||||||
int code = pthread_mutex_init(&pRepo->mutex, NULL);
|
int code = pthread_mutex_init(&pRepo->mutex, NULL);
|
||||||
if (code != 0) {
|
if (code != 0) {
|
||||||
|
|
|
@ -23,12 +23,6 @@ static void tsdbFreeMemTable(SMemTable *pMemTable);
|
||||||
static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable);
|
static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable);
|
||||||
static void tsdbFreeTableData(STableData *pTableData);
|
static void tsdbFreeTableData(STableData *pTableData);
|
||||||
static char * tsdbGetTsTupleKey(const void *data);
|
static char * tsdbGetTsTupleKey(const void *data);
|
||||||
static int tsdbCommitMeta(STsdbRepo *pRepo);
|
|
||||||
static void tsdbEndCommit(STsdbRepo *pRepo);
|
|
||||||
static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey);
|
|
||||||
static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols);
|
|
||||||
static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo);
|
|
||||||
static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables);
|
|
||||||
static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables);
|
static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables);
|
||||||
static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SDataRow row);
|
static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SDataRow row);
|
||||||
static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter);
|
static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter);
|
||||||
|
@ -215,7 +209,11 @@ int tsdbAsyncCommit(STsdbRepo *pRepo) {
|
||||||
|
|
||||||
sem_wait(&(pRepo->readyToCommit));
|
sem_wait(&(pRepo->readyToCommit));
|
||||||
|
|
||||||
if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_START);
|
if (pRepo->code != TSDB_CODE_SUCCESS) {
|
||||||
|
tsdbWarn("vgId:%d try to commit when TSDB not in good state: %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_START, TSDB_CODE_SUCCESS);
|
||||||
if (tsdbLockRepo(pRepo) < 0) return -1;
|
if (tsdbLockRepo(pRepo) < 0) return -1;
|
||||||
pRepo->imem = pRepo->mem;
|
pRepo->imem = pRepo->mem;
|
||||||
pRepo->mem = NULL;
|
pRepo->mem = NULL;
|
||||||
|
@ -229,10 +227,18 @@ int tsdbAsyncCommit(STsdbRepo *pRepo) {
|
||||||
|
|
||||||
int tsdbSyncCommit(TSDB_REPO_T *repo) {
|
int tsdbSyncCommit(TSDB_REPO_T *repo) {
|
||||||
STsdbRepo *pRepo = (STsdbRepo *)repo;
|
STsdbRepo *pRepo = (STsdbRepo *)repo;
|
||||||
|
|
||||||
tsdbAsyncCommit(pRepo);
|
tsdbAsyncCommit(pRepo);
|
||||||
sem_wait(&(pRepo->readyToCommit));
|
sem_wait(&(pRepo->readyToCommit));
|
||||||
sem_post(&(pRepo->readyToCommit));
|
sem_post(&(pRepo->readyToCommit));
|
||||||
return 0;
|
|
||||||
|
if (pRepo->code != TSDB_CODE_SUCCESS) {
|
||||||
|
terrno = pRepo->code;
|
||||||
|
return -1;
|
||||||
|
} else {
|
||||||
|
terrno = TSDB_CODE_SUCCESS;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -355,68 +361,6 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *tsdbCommitData(STsdbRepo *pRepo) {
|
|
||||||
SMemTable * pMem = pRepo->imem;
|
|
||||||
STsdbCfg * pCfg = &pRepo->config;
|
|
||||||
SDataCols * pDataCols = NULL;
|
|
||||||
STsdbMeta * pMeta = pRepo->tsdbMeta;
|
|
||||||
SCommitIter *iters = NULL;
|
|
||||||
SRWHelper whelper = {0};
|
|
||||||
ASSERT(pMem != NULL);
|
|
||||||
|
|
||||||
tsdbInfo("vgId:%d start to commit! keyFirst %" PRId64 " keyLast %" PRId64 " numOfRows %" PRId64, REPO_ID(pRepo),
|
|
||||||
pMem->keyFirst, pMem->keyLast, pMem->numOfRows);
|
|
||||||
|
|
||||||
// Create the iterator to read from cache
|
|
||||||
if (pMem->numOfRows > 0) {
|
|
||||||
iters = tsdbCreateCommitIters(pRepo);
|
|
||||||
if (iters == NULL) {
|
|
||||||
tsdbError("vgId:%d failed to create commit iterator since %s", REPO_ID(pRepo), tstrerror(terrno));
|
|
||||||
goto _exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tsdbInitWriteHelper(&whelper, pRepo) < 0) {
|
|
||||||
tsdbError("vgId:%d failed to init write helper since %s", REPO_ID(pRepo), tstrerror(terrno));
|
|
||||||
goto _exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pCfg->maxRowsPerFileBlock)) == NULL) {
|
|
||||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
|
||||||
tsdbError("vgId:%d failed to init data cols with maxRowBytes %d maxCols %d maxRowsPerFileBlock %d since %s",
|
|
||||||
REPO_ID(pRepo), pMeta->maxCols, pMeta->maxRowBytes, pCfg->maxRowsPerFileBlock, tstrerror(terrno));
|
|
||||||
goto _exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
int sfid = (int)(TSDB_KEY_FILEID(pMem->keyFirst, pCfg->daysPerFile, pCfg->precision));
|
|
||||||
int efid = (int)(TSDB_KEY_FILEID(pMem->keyLast, pCfg->daysPerFile, pCfg->precision));
|
|
||||||
|
|
||||||
// Loop to commit to each file
|
|
||||||
for (int fid = sfid; fid <= efid; fid++) {
|
|
||||||
if (tsdbCommitToFile(pRepo, fid, iters, &whelper, pDataCols) < 0) {
|
|
||||||
tsdbError("vgId:%d failed to commit to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
|
|
||||||
goto _exit;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit to update meta file
|
|
||||||
if (tsdbCommitMeta(pRepo) < 0) {
|
|
||||||
tsdbError("vgId:%d failed to commit data while committing meta data since %s", REPO_ID(pRepo), tstrerror(terrno));
|
|
||||||
goto _exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
tsdbFitRetention(pRepo);
|
|
||||||
|
|
||||||
_exit:
|
|
||||||
tdFreeDataCols(pDataCols);
|
|
||||||
tsdbDestroyCommitIters(iters, pMem->maxTables);
|
|
||||||
tsdbDestroyHelper(&whelper);
|
|
||||||
tsdbInfo("vgId:%d commit over", pRepo->config.tsdbId);
|
|
||||||
tsdbEndCommit(pRepo);
|
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------- LOCAL FUNCTIONS ----------------
|
// ---------------- LOCAL FUNCTIONS ----------------
|
||||||
static SMemTable* tsdbNewMemTable(STsdbRepo *pRepo) {
|
static SMemTable* tsdbNewMemTable(STsdbRepo *pRepo) {
|
||||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||||
|
@ -508,240 +452,11 @@ static void tsdbFreeTableData(STableData *pTableData) {
|
||||||
|
|
||||||
static char *tsdbGetTsTupleKey(const void *data) { return dataRowTuple((SDataRow)data); }
|
static char *tsdbGetTsTupleKey(const void *data) { return dataRowTuple((SDataRow)data); }
|
||||||
|
|
||||||
|
|
||||||
static int tsdbCommitMeta(STsdbRepo *pRepo) {
|
|
||||||
SMemTable *pMem = pRepo->imem;
|
|
||||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
|
||||||
SActObj * pAct = NULL;
|
|
||||||
SActCont * pCont = NULL;
|
|
||||||
|
|
||||||
if (listNEles(pMem->actList) > 0) {
|
|
||||||
if (tdKVStoreStartCommit(pMeta->pStore) < 0) {
|
|
||||||
tsdbError("vgId:%d failed to commit data while start commit meta since %s", REPO_ID(pRepo), tstrerror(terrno));
|
|
||||||
goto _err;
|
|
||||||
}
|
|
||||||
|
|
||||||
SListNode *pNode = NULL;
|
|
||||||
|
|
||||||
while ((pNode = tdListPopHead(pMem->actList)) != NULL) {
|
|
||||||
pAct = (SActObj *)pNode->data;
|
|
||||||
if (pAct->act == TSDB_UPDATE_META) {
|
|
||||||
pCont = (SActCont *)POINTER_SHIFT(pAct, sizeof(SActObj));
|
|
||||||
if (tdUpdateKVStoreRecord(pMeta->pStore, pAct->uid, (void *)(pCont->cont), pCont->len) < 0) {
|
|
||||||
tsdbError("vgId:%d failed to update meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
|
|
||||||
tstrerror(terrno));
|
|
||||||
tdKVStoreEndCommit(pMeta->pStore);
|
|
||||||
goto _err;
|
|
||||||
}
|
|
||||||
} else if (pAct->act == TSDB_DROP_META) {
|
|
||||||
if (tdDropKVStoreRecord(pMeta->pStore, pAct->uid) < 0) {
|
|
||||||
tsdbError("vgId:%d failed to drop meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
|
|
||||||
tstrerror(terrno));
|
|
||||||
tdKVStoreEndCommit(pMeta->pStore);
|
|
||||||
goto _err;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ASSERT(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tdKVStoreEndCommit(pMeta->pStore) < 0) {
|
|
||||||
tsdbError("vgId:%d failed to commit data while end commit meta since %s", REPO_ID(pRepo), tstrerror(terrno));
|
|
||||||
goto _err;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
_err:
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tsdbEndCommit(STsdbRepo *pRepo) {
|
|
||||||
if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_OVER);
|
|
||||||
sem_post(&(pRepo->readyToCommit));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey) {
|
|
||||||
for (int i = 0; i < nIters; i++) {
|
|
||||||
TSKEY nextKey = tsdbNextIterKey((iters + i)->pIter);
|
|
||||||
if (nextKey != TSDB_DATA_TIMESTAMP_NULL && (nextKey >= minKey && nextKey <= maxKey)) return 1;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TSKEY *minKey, TSKEY *maxKey) {
|
void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TSKEY *minKey, TSKEY *maxKey) {
|
||||||
*minKey = fileId * daysPerFile * tsMsPerDay[precision];
|
*minKey = fileId * daysPerFile * tsMsPerDay[precision];
|
||||||
*maxKey = *minKey + daysPerFile * tsMsPerDay[precision] - 1;
|
*maxKey = *minKey + daysPerFile * tsMsPerDay[precision] - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols) {
|
|
||||||
char * dataDir = NULL;
|
|
||||||
STsdbCfg * pCfg = &pRepo->config;
|
|
||||||
STsdbFileH *pFileH = pRepo->tsdbFileH;
|
|
||||||
SFileGroup *pGroup = NULL;
|
|
||||||
SMemTable * pMem = pRepo->imem;
|
|
||||||
bool newLast = false;
|
|
||||||
|
|
||||||
TSKEY minKey = 0, maxKey = 0;
|
|
||||||
tsdbGetFidKeyRange(pCfg->daysPerFile, pCfg->precision, fid, &minKey, &maxKey);
|
|
||||||
|
|
||||||
// Check if there are data to commit to this file
|
|
||||||
int hasDataToCommit = tsdbHasDataToCommit(iters, pMem->maxTables, minKey, maxKey);
|
|
||||||
if (!hasDataToCommit) {
|
|
||||||
tsdbDebug("vgId:%d no data to commit to file %d", REPO_ID(pRepo), fid);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create and open files for commit
|
|
||||||
dataDir = tsdbGetDataDirName(pRepo->rootDir);
|
|
||||||
if (dataDir == NULL) {
|
|
||||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((pGroup = tsdbCreateFGroupIfNeed(pRepo, dataDir, fid)) == NULL) {
|
|
||||||
tsdbError("vgId:%d failed to create file group %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
|
|
||||||
goto _err;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open files for write/read
|
|
||||||
if (tsdbSetAndOpenHelperFile(pHelper, pGroup) < 0) {
|
|
||||||
tsdbError("vgId:%d failed to set helper file since %s", REPO_ID(pRepo), tstrerror(terrno));
|
|
||||||
goto _err;
|
|
||||||
}
|
|
||||||
|
|
||||||
newLast = TSDB_NLAST_FILE_OPENED(pHelper);
|
|
||||||
|
|
||||||
if (tsdbLoadCompIdx(pHelper, NULL) < 0) {
|
|
||||||
tsdbError("vgId:%d failed to load SCompIdx part since %s", REPO_ID(pRepo), tstrerror(terrno));
|
|
||||||
goto _err;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loop to commit data in each table
|
|
||||||
for (int tid = 1; tid < pMem->maxTables; tid++) {
|
|
||||||
SCommitIter *pIter = iters + tid;
|
|
||||||
if (pIter->pTable == NULL) continue;
|
|
||||||
|
|
||||||
taosRLockLatch(&(pIter->pTable->latch));
|
|
||||||
|
|
||||||
if (tsdbSetHelperTable(pHelper, pIter->pTable, pRepo) < 0) goto _err;
|
|
||||||
|
|
||||||
if (pIter->pIter != NULL) {
|
|
||||||
if (tdInitDataCols(pDataCols, tsdbGetTableSchemaImpl(pIter->pTable, false, false, -1)) < 0) {
|
|
||||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
|
||||||
goto _err;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tsdbCommitTableData(pHelper, pIter, pDataCols, maxKey) < 0) {
|
|
||||||
taosRUnLockLatch(&(pIter->pTable->latch));
|
|
||||||
tsdbError("vgId:%d failed to write data of table %s tid %d uid %" PRIu64 " since %s", REPO_ID(pRepo),
|
|
||||||
TABLE_CHAR_NAME(pIter->pTable), TABLE_TID(pIter->pTable), TABLE_UID(pIter->pTable),
|
|
||||||
tstrerror(terrno));
|
|
||||||
goto _err;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
taosRUnLockLatch(&(pIter->pTable->latch));
|
|
||||||
|
|
||||||
// Move the last block to the new .l file if neccessary
|
|
||||||
if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) {
|
|
||||||
tsdbError("vgId:%d, failed to move last block, since %s", REPO_ID(pRepo), tstrerror(terrno));
|
|
||||||
goto _err;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the SCompBlock part
|
|
||||||
if (tsdbWriteCompInfo(pHelper) < 0) {
|
|
||||||
tsdbError("vgId:%d, failed to write compInfo part since %s", REPO_ID(pRepo), tstrerror(terrno));
|
|
||||||
goto _err;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tsdbWriteCompIdx(pHelper) < 0) {
|
|
||||||
tsdbError("vgId:%d failed to write compIdx part to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
|
|
||||||
goto _err;
|
|
||||||
}
|
|
||||||
|
|
||||||
tfree(dataDir);
|
|
||||||
tsdbCloseHelperFile(pHelper, 0, pGroup);
|
|
||||||
|
|
||||||
pthread_rwlock_wrlock(&(pFileH->fhlock));
|
|
||||||
|
|
||||||
(void)rename(helperNewHeadF(pHelper)->fname, helperHeadF(pHelper)->fname);
|
|
||||||
pGroup->files[TSDB_FILE_TYPE_HEAD].info = helperNewHeadF(pHelper)->info;
|
|
||||||
|
|
||||||
if (newLast) {
|
|
||||||
(void)rename(helperNewLastF(pHelper)->fname, helperLastF(pHelper)->fname);
|
|
||||||
pGroup->files[TSDB_FILE_TYPE_LAST].info = helperNewLastF(pHelper)->info;
|
|
||||||
} else {
|
|
||||||
pGroup->files[TSDB_FILE_TYPE_LAST].info = helperLastF(pHelper)->info;
|
|
||||||
}
|
|
||||||
|
|
||||||
pGroup->files[TSDB_FILE_TYPE_DATA].info = helperDataF(pHelper)->info;
|
|
||||||
|
|
||||||
pthread_rwlock_unlock(&(pFileH->fhlock));
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
_err:
|
|
||||||
tfree(dataDir);
|
|
||||||
tsdbCloseHelperFile(pHelper, 1, NULL);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) {
|
|
||||||
SMemTable *pMem = pRepo->imem;
|
|
||||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
|
||||||
|
|
||||||
SCommitIter *iters = (SCommitIter *)calloc(pMem->maxTables, sizeof(SCommitIter));
|
|
||||||
if (iters == NULL) {
|
|
||||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tsdbRLockRepoMeta(pRepo) < 0) goto _err;
|
|
||||||
|
|
||||||
// reference all tables
|
|
||||||
for (int i = 0; i < pMem->maxTables; i++) {
|
|
||||||
if (pMeta->tables[i] != NULL) {
|
|
||||||
tsdbRefTable(pMeta->tables[i]);
|
|
||||||
iters[i].pTable = pMeta->tables[i];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tsdbUnlockRepoMeta(pRepo) < 0) goto _err;
|
|
||||||
|
|
||||||
for (int i = 0; i < pMem->maxTables; i++) {
|
|
||||||
if ((iters[i].pTable != NULL) && (pMem->tData[i] != NULL) && (TABLE_UID(iters[i].pTable) == pMem->tData[i]->uid)) {
|
|
||||||
if ((iters[i].pIter = tSkipListCreateIter(pMem->tData[i]->pData)) == NULL) {
|
|
||||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
|
||||||
goto _err;
|
|
||||||
}
|
|
||||||
|
|
||||||
tSkipListIterNext(iters[i].pIter);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return iters;
|
|
||||||
|
|
||||||
_err:
|
|
||||||
tsdbDestroyCommitIters(iters, pMem->maxTables);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables) {
|
|
||||||
if (iters == NULL) return;
|
|
||||||
|
|
||||||
for (int i = 1; i < maxTables; i++) {
|
|
||||||
if (iters[i].pTable != NULL) {
|
|
||||||
tsdbUnRefTable(iters[i].pTable);
|
|
||||||
tSkipListDestroyIter(iters[i].pIter);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
free(iters);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables) {
|
static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables) {
|
||||||
ASSERT(pMemTable->maxTables < maxTables);
|
ASSERT(pMemTable->maxTables < maxTables);
|
||||||
|
|
||||||
|
|
|
@ -236,6 +236,7 @@ int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLe
|
||||||
rInfo.offset = lseek(pStore->fd, 0, SEEK_CUR);
|
rInfo.offset = lseek(pStore->fd, 0, SEEK_CUR);
|
||||||
if (rInfo.offset < 0) {
|
if (rInfo.offset < 0) {
|
||||||
uError("failed to lseek file %s since %s", pStore->fname, strerror(errno));
|
uError("failed to lseek file %s since %s", pStore->fname, strerror(errno));
|
||||||
|
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -254,6 +255,7 @@ int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLe
|
||||||
|
|
||||||
if (taosWrite(pStore->fd, cont, contLen) < contLen) {
|
if (taosWrite(pStore->fd, cont, contLen) < contLen) {
|
||||||
uError("failed to write %d bytes to file %s since %s", contLen, pStore->fname, strerror(errno));
|
uError("failed to write %d bytes to file %s since %s", contLen, pStore->fname, strerror(errno));
|
||||||
|
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -145,6 +145,7 @@ void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) {
|
||||||
// forward to put the rest of data
|
// forward to put the rest of data
|
||||||
for (int idata = 1; idata < ndata; idata++) {
|
for (int idata = 1; idata < ndata; idata++) {
|
||||||
pDataKey = pSkipList->keyFn(ppData[idata]);
|
pDataKey = pSkipList->keyFn(ppData[idata]);
|
||||||
|
hasDup = false;
|
||||||
|
|
||||||
// Compare max key
|
// Compare max key
|
||||||
pKey = SL_GET_MAX_KEY(pSkipList);
|
pKey = SL_GET_MAX_KEY(pSkipList);
|
||||||
|
@ -153,8 +154,6 @@ void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) {
|
||||||
for (int i = 0; i < pSkipList->maxLevel; i++) {
|
for (int i = 0; i < pSkipList->maxLevel; i++) {
|
||||||
forward[i] = SL_NODE_GET_BACKWARD_POINTER(pSkipList->pTail, i);
|
forward[i] = SL_NODE_GET_BACKWARD_POINTER(pSkipList->pTail, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
hasDup = false;
|
|
||||||
} else {
|
} else {
|
||||||
SSkipListNode *px = pSkipList->pHead;
|
SSkipListNode *px = pSkipList->pHead;
|
||||||
for (int i = pSkipList->maxLevel - 1; i >= 0; --i) {
|
for (int i = pSkipList->maxLevel - 1; i >= 0; --i) {
|
||||||
|
@ -173,7 +172,7 @@ void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) {
|
||||||
|
|
||||||
compare = pSkipList->comparFn(pKey, pDataKey);
|
compare = pSkipList->comparFn(pKey, pDataKey);
|
||||||
if (compare >= 0) {
|
if (compare >= 0) {
|
||||||
if (compare == 0) hasDup = true;
|
if (compare == 0 && !hasDup) hasDup = true;
|
||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
px = p;
|
px = p;
|
||||||
|
|
|
@ -58,13 +58,13 @@ uint32_t taosGetIpFromFqdn(const char *fqdn) {
|
||||||
} else {
|
} else {
|
||||||
#ifdef EAI_SYSTEM
|
#ifdef EAI_SYSTEM
|
||||||
if (ret == EAI_SYSTEM) {
|
if (ret == EAI_SYSTEM) {
|
||||||
uError("failed to get the ip address, fqdn:%s, code:%d, reason:%s", fqdn, ret, strerror(errno));
|
uError("failed to get the ip address, fqdn:%s, since:%s", fqdn, strerror(errno));
|
||||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||||
} else {
|
} else {
|
||||||
uError("failed to get the ip address, fqdn:%s, code:%d, reason:%s", fqdn, ret, gai_strerror(ret));
|
uError("failed to get the ip address, fqdn:%s, since:%s", fqdn, gai_strerror(ret));
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
uError("failed to get the ip address, fqdn:%s, code:%d, reason:%s", fqdn, ret, gai_strerror(ret));
|
uError("failed to get the ip address, fqdn:%s, since:%s", fqdn, gai_strerror(ret));
|
||||||
#endif
|
#endif
|
||||||
return 0xFFFFFFFF;
|
return 0xFFFFFFFF;
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,10 +37,13 @@ extern int32_t vDebugFlag;
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int32_t vgId; // global vnode group ID
|
int32_t vgId; // global vnode group ID
|
||||||
int32_t refCount; // reference count
|
int32_t refCount; // reference count
|
||||||
int32_t delay;
|
int32_t queuedWMsg;
|
||||||
|
int32_t queuedRMsg;
|
||||||
|
int32_t delayMs;
|
||||||
int8_t status;
|
int8_t status;
|
||||||
int8_t role;
|
int8_t role;
|
||||||
int8_t accessState;
|
int8_t accessState;
|
||||||
|
int8_t isFull;
|
||||||
uint64_t version; // current version
|
uint64_t version; // current version
|
||||||
uint64_t fversion; // version on saved data file
|
uint64_t fversion; // version on saved data file
|
||||||
void *wqueue;
|
void *wqueue;
|
||||||
|
@ -58,7 +61,7 @@ typedef struct {
|
||||||
char *rootDir;
|
char *rootDir;
|
||||||
tsem_t sem;
|
tsem_t sem;
|
||||||
int8_t dropped;
|
int8_t dropped;
|
||||||
char db[TSDB_DB_NAME_LEN];
|
char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
|
||||||
} SVnodeObj;
|
} SVnodeObj;
|
||||||
|
|
||||||
void vnodeInitWriteFp(void);
|
void vnodeInitWriteFp(void);
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
#include "vnodeCfg.h"
|
#include "vnodeCfg.h"
|
||||||
|
|
||||||
static void vnodeLoadCfg(SVnodeObj *pVnode, SCreateVnodeMsg* vnodeMsg) {
|
static void vnodeLoadCfg(SVnodeObj *pVnode, SCreateVnodeMsg* vnodeMsg) {
|
||||||
strcpy(pVnode->db, vnodeMsg->db);
|
tstrncpy(pVnode->db, vnodeMsg->db, sizeof(pVnode->db));
|
||||||
pVnode->cfgVersion = vnodeMsg->cfg.cfgVersion;
|
pVnode->cfgVersion = vnodeMsg->cfg.cfgVersion;
|
||||||
pVnode->tsdbCfg.cacheBlockSize = vnodeMsg->cfg.cacheBlockSize;
|
pVnode->tsdbCfg.cacheBlockSize = vnodeMsg->cfg.cacheBlockSize;
|
||||||
pVnode->tsdbCfg.totalBlocks = vnodeMsg->cfg.totalBlocks;
|
pVnode->tsdbCfg.totalBlocks = vnodeMsg->cfg.totalBlocks;
|
||||||
|
@ -97,7 +97,7 @@ int32_t vnodeReadCfg(SVnodeObj *pVnode) {
|
||||||
vError("vgId:%d, failed to read %s, db not found", pVnode->vgId, file);
|
vError("vgId:%d, failed to read %s, db not found", pVnode->vgId, file);
|
||||||
goto PARSE_VCFG_ERROR;
|
goto PARSE_VCFG_ERROR;
|
||||||
}
|
}
|
||||||
strcpy(vnodeMsg.db, db->valuestring);
|
tstrncpy(vnodeMsg.db, db->valuestring, sizeof(vnodeMsg.db));
|
||||||
|
|
||||||
cJSON *cfgVersion = cJSON_GetObjectItem(root, "cfgVersion");
|
cJSON *cfgVersion = cJSON_GetObjectItem(root, "cfgVersion");
|
||||||
if (!cfgVersion || cfgVersion->type != cJSON_Number) {
|
if (!cfgVersion || cfgVersion->type != cJSON_Number) {
|
||||||
|
|
|
@ -30,7 +30,7 @@
|
||||||
|
|
||||||
static SHashObj*tsVnodesHash;
|
static SHashObj*tsVnodesHash;
|
||||||
static void vnodeCleanUp(SVnodeObj *pVnode);
|
static void vnodeCleanUp(SVnodeObj *pVnode);
|
||||||
static int vnodeProcessTsdbStatus(void *arg, int status);
|
static int vnodeProcessTsdbStatus(void *arg, int status, int eno);
|
||||||
static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fversion);
|
static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fversion);
|
||||||
static int vnodeGetWalInfo(void *ahandle, char *fileName, int64_t *fileId);
|
static int vnodeGetWalInfo(void *ahandle, char *fileName, int64_t *fileId);
|
||||||
static void vnodeNotifyRole(void *ahandle, int8_t role);
|
static void vnodeNotifyRole(void *ahandle, int8_t role);
|
||||||
|
@ -378,9 +378,10 @@ int32_t vnodeClose(int32_t vgId) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vnodeRelease(void *pVnodeRaw) {
|
void vnodeRelease(void *vparam) {
|
||||||
if (pVnodeRaw == NULL) return;
|
if (vparam == NULL) return;
|
||||||
SVnodeObj *pVnode = pVnodeRaw;
|
SVnodeObj *pVnode = vparam;
|
||||||
|
int32_t code = 0;
|
||||||
int32_t vgId = pVnode->vgId;
|
int32_t vgId = pVnode->vgId;
|
||||||
|
|
||||||
int32_t refCount = atomic_sub_fetch_32(&pVnode->refCount, 1);
|
int32_t refCount = atomic_sub_fetch_32(&pVnode->refCount, 1);
|
||||||
|
@ -406,7 +407,7 @@ void vnodeRelease(void *pVnodeRaw) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pVnode->tsdb) {
|
if (pVnode->tsdb) {
|
||||||
tsdbCloseRepo(pVnode->tsdb, 1);
|
code = tsdbCloseRepo(pVnode->tsdb, 1);
|
||||||
pVnode->tsdb = NULL;
|
pVnode->tsdb = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -418,7 +419,11 @@ void vnodeRelease(void *pVnodeRaw) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pVnode->wal) {
|
if (pVnode->wal) {
|
||||||
walRemoveAllOldFiles(pVnode->wal);
|
if (code != 0) {
|
||||||
|
vError("vgId:%d, failed to commit while close tsdb repo, keep wal", pVnode->vgId);
|
||||||
|
} else {
|
||||||
|
walRemoveAllOldFiles(pVnode->wal);
|
||||||
|
}
|
||||||
walClose(pVnode->wal);
|
walClose(pVnode->wal);
|
||||||
pVnode->wal = NULL;
|
pVnode->wal = NULL;
|
||||||
}
|
}
|
||||||
|
@ -590,9 +595,16 @@ static void vnodeCleanUp(SVnodeObj *pVnode) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: this is a simple implement
|
// TODO: this is a simple implement
|
||||||
static int vnodeProcessTsdbStatus(void *arg, int status) {
|
static int vnodeProcessTsdbStatus(void *arg, int status, int eno) {
|
||||||
SVnodeObj *pVnode = arg;
|
SVnodeObj *pVnode = arg;
|
||||||
|
|
||||||
|
if (eno != TSDB_CODE_SUCCESS) {
|
||||||
|
vError("vgId:%d, failed to commit since %s, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, tstrerror(eno),
|
||||||
|
pVnode->fversion, pVnode->version);
|
||||||
|
pVnode->isFull = 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (status == TSDB_STATUS_COMMIT_START) {
|
if (status == TSDB_STATUS_COMMIT_START) {
|
||||||
pVnode->fversion = pVnode->version;
|
pVnode->fversion = pVnode->version;
|
||||||
vDebug("vgId:%d, start commit, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, pVnode->fversion, pVnode->version);
|
vDebug("vgId:%d, start commit, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, pVnode->fversion, pVnode->version);
|
||||||
|
@ -605,6 +617,7 @@ static int vnodeProcessTsdbStatus(void *arg, int status) {
|
||||||
|
|
||||||
if (status == TSDB_STATUS_COMMIT_OVER) {
|
if (status == TSDB_STATUS_COMMIT_OVER) {
|
||||||
vDebug("vgId:%d, commit over, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, pVnode->fversion, pVnode->version);
|
vDebug("vgId:%d, commit over, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, pVnode->fversion, pVnode->version);
|
||||||
|
pVnode->isFull = 0;
|
||||||
walRemoveOneOldFile(pVnode->wal);
|
walRemoveOneOldFile(pVnode->wal);
|
||||||
return vnodeSaveVersion(pVnode);
|
return vnodeSaveVersion(pVnode);
|
||||||
}
|
}
|
||||||
|
@ -630,18 +643,19 @@ static void vnodeNotifyRole(void *ahandle, int8_t role) {
|
||||||
pVnode->role = role;
|
pVnode->role = role;
|
||||||
dnodeSendStatusMsgToMnode();
|
dnodeSendStatusMsgToMnode();
|
||||||
|
|
||||||
if (pVnode->role == TAOS_SYNC_ROLE_MASTER)
|
if (pVnode->role == TAOS_SYNC_ROLE_MASTER) {
|
||||||
cqStart(pVnode->cq);
|
cqStart(pVnode->cq);
|
||||||
else
|
} else {
|
||||||
cqStop(pVnode->cq);
|
cqStop(pVnode->cq);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vnodeCtrlFlow(void *ahandle, int32_t mseconds) {
|
static void vnodeCtrlFlow(void *ahandle, int32_t mseconds) {
|
||||||
SVnodeObj *pVnode = ahandle;
|
SVnodeObj *pVnode = ahandle;
|
||||||
if (pVnode->delay != mseconds) {
|
if (pVnode->delayMs != mseconds) {
|
||||||
vInfo("vgId:%d, sync flow control, mseconds:%d", pVnode->vgId, mseconds);
|
pVnode->delayMs = mseconds;
|
||||||
|
vDebug("vgId:%d, sync flow control, mseconds:%d", pVnode->vgId, mseconds);
|
||||||
}
|
}
|
||||||
pVnode->delay = mseconds;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vnodeResetTsdb(SVnodeObj *pVnode) {
|
static int vnodeResetTsdb(SVnodeObj *pVnode) {
|
||||||
|
|
|
@ -41,8 +41,8 @@ void vnodeInitReadFp(void) {
|
||||||
// still required, or there will be a deadlock, so we don’t do any check here, but put the check codes before the
|
// still required, or there will be a deadlock, so we don’t do any check here, but put the check codes before the
|
||||||
// request enters the queue
|
// request enters the queue
|
||||||
//
|
//
|
||||||
int32_t vnodeProcessRead(void *param, SVReadMsg *pRead) {
|
int32_t vnodeProcessRead(void *vparam, SVReadMsg *pRead) {
|
||||||
SVnodeObj *pVnode = (SVnodeObj *)param;
|
SVnodeObj *pVnode = vparam;
|
||||||
int32_t msgType = pRead->msgType;
|
int32_t msgType = pRead->msgType;
|
||||||
|
|
||||||
if (vnodeProcessReadMsgFp[msgType] == NULL) {
|
if (vnodeProcessReadMsgFp[msgType] == NULL) {
|
||||||
|
@ -53,8 +53,8 @@ int32_t vnodeProcessRead(void *param, SVReadMsg *pRead) {
|
||||||
return (*vnodeProcessReadMsgFp[msgType])(pVnode, pRead);
|
return (*vnodeProcessReadMsgFp[msgType])(pVnode, pRead);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t vnodeCheckRead(void *param) {
|
static int32_t vnodeCheckRead(void *vparam) {
|
||||||
SVnodeObj *pVnode = param;
|
SVnodeObj *pVnode = vparam;
|
||||||
if (pVnode->status != TAOS_VN_STATUS_READY) {
|
if (pVnode->status != TAOS_VN_STATUS_READY) {
|
||||||
vDebug("vgId:%d, vnode status is %s, refCount:%d pVnode:%p", pVnode->vgId, vnodeStatus[pVnode->status],
|
vDebug("vgId:%d, vnode status is %s, refCount:%d pVnode:%p", pVnode->vgId, vnodeStatus[pVnode->status],
|
||||||
pVnode->refCount, pVnode);
|
pVnode->refCount, pVnode);
|
||||||
|
@ -76,6 +76,16 @@ static int32_t vnodeCheckRead(void *param) {
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void vnodeFreeFromRQueue(void *vparam, SVReadMsg *pRead) {
|
||||||
|
SVnodeObj *pVnode = vparam;
|
||||||
|
|
||||||
|
atomic_sub_fetch_32(&pVnode->queuedRMsg, 1);
|
||||||
|
vTrace("vgId:%d, free from vrqueue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedRMsg);
|
||||||
|
|
||||||
|
taosFreeQitem(pRead);
|
||||||
|
vnodeRelease(pVnode);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t vnodeWriteToRQueue(void *vparam, void *pCont, int32_t contLen, int8_t qtype, void *rparam) {
|
int32_t vnodeWriteToRQueue(void *vparam, void *pCont, int32_t contLen, int8_t qtype, void *rparam) {
|
||||||
SVnodeObj *pVnode = vparam;
|
SVnodeObj *pVnode = vparam;
|
||||||
|
|
||||||
|
@ -108,7 +118,8 @@ int32_t vnodeWriteToRQueue(void *vparam, void *pCont, int32_t contLen, int8_t qt
|
||||||
pRead->qtype = qtype;
|
pRead->qtype = qtype;
|
||||||
|
|
||||||
atomic_add_fetch_32(&pVnode->refCount, 1);
|
atomic_add_fetch_32(&pVnode->refCount, 1);
|
||||||
vTrace("vgId:%d, get vnode rqueue, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode);
|
atomic_add_fetch_32(&pVnode->queuedRMsg, 1);
|
||||||
|
vTrace("vgId:%d, write into vrqueue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedRMsg);
|
||||||
|
|
||||||
taosWriteQitem(pVnode->rqueue, qtype, pRead);
|
taosWriteQitem(pVnode->rqueue, qtype, pRead);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
|
|
|
@ -28,13 +28,15 @@
|
||||||
#include "syncInt.h"
|
#include "syncInt.h"
|
||||||
#include "tcq.h"
|
#include "tcq.h"
|
||||||
|
|
||||||
static int32_t (*vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *, void *, SRspRet *);
|
#define MAX_QUEUED_MSG_NUM 10000
|
||||||
static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
|
||||||
static int32_t vnodeProcessCreateTableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
static int32_t (*vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *, void *pCont, SRspRet *);
|
||||||
static int32_t vnodeProcessDropTableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pCont, SRspRet *);
|
||||||
static int32_t vnodeProcessAlterTableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
static int32_t vnodeProcessCreateTableMsg(SVnodeObj *pVnode, void *pCont, SRspRet *);
|
||||||
static int32_t vnodeProcessDropStableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
static int32_t vnodeProcessDropTableMsg(SVnodeObj *pVnode, void *pCont, SRspRet *);
|
||||||
static int32_t vnodeProcessUpdateTagValMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pRet);
|
static int32_t vnodeProcessAlterTableMsg(SVnodeObj *pVnode, void *pCont, SRspRet *);
|
||||||
|
static int32_t vnodeProcessDropStableMsg(SVnodeObj *pVnode, void *pCont, SRspRet *);
|
||||||
|
static int32_t vnodeProcessUpdateTagValMsg(SVnodeObj *pVnode, void *pCont, SRspRet *);
|
||||||
|
|
||||||
void vnodeInitWriteFp(void) {
|
void vnodeInitWriteFp(void) {
|
||||||
vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_SUBMIT] = vnodeProcessSubmitMsg;
|
vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_SUBMIT] = vnodeProcessSubmitMsg;
|
||||||
|
@ -75,7 +77,7 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara
|
||||||
|
|
||||||
// assign version
|
// assign version
|
||||||
pHead->version = pVnode->version + 1;
|
pHead->version = pVnode->version + 1;
|
||||||
if (pVnode->delay) usleep(pVnode->delay * 1000);
|
if (pVnode->delayMs) taosMsleep(pVnode->delayMs);
|
||||||
|
|
||||||
} else { // from wal or forward
|
} else { // from wal or forward
|
||||||
// for data from WAL or forward, version may be smaller
|
// for data from WAL or forward, version may be smaller
|
||||||
|
@ -100,8 +102,8 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara
|
||||||
return syncCode;
|
return syncCode;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t vnodeCheckWrite(void *param) {
|
static int32_t vnodeCheckWrite(void *vparam) {
|
||||||
SVnodeObj *pVnode = param;
|
SVnodeObj *pVnode = vparam;
|
||||||
if (!(pVnode->accessState & TSDB_VN_WRITE_ACCCESS)) {
|
if (!(pVnode->accessState & TSDB_VN_WRITE_ACCCESS)) {
|
||||||
vDebug("vgId:%d, no write auth, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode);
|
vDebug("vgId:%d, no write auth, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode);
|
||||||
return TSDB_CODE_VND_NO_WRITE_AUTH;
|
return TSDB_CODE_VND_NO_WRITE_AUTH;
|
||||||
|
@ -119,11 +121,16 @@ static int32_t vnodeCheckWrite(void *param) {
|
||||||
return TSDB_CODE_APP_NOT_READY;
|
return TSDB_CODE_APP_NOT_READY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pVnode->isFull) {
|
||||||
|
vDebug("vgId:%d, vnode is full, refCount:%d", pVnode->vgId, pVnode->refCount);
|
||||||
|
return TSDB_CODE_VND_IS_FULL;
|
||||||
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vnodeConfirmForward(void *param, uint64_t version, int32_t code) {
|
void vnodeConfirmForward(void *vparam, uint64_t version, int32_t code) {
|
||||||
SVnodeObj *pVnode = (SVnodeObj *)param;
|
SVnodeObj *pVnode = vparam;
|
||||||
syncConfirmForward(pVnode->sync, version, code);
|
syncConfirmForward(pVnode->sync, version, code);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -237,8 +244,25 @@ int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rpar
|
||||||
memcpy(pWrite->pHead, pHead, sizeof(SWalHead) + pHead->len);
|
memcpy(pWrite->pHead, pHead, sizeof(SWalHead) + pHead->len);
|
||||||
|
|
||||||
atomic_add_fetch_32(&pVnode->refCount, 1);
|
atomic_add_fetch_32(&pVnode->refCount, 1);
|
||||||
vTrace("vgId:%d, get vnode wqueue, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode);
|
|
||||||
|
int32_t queued = atomic_add_fetch_32(&pVnode->queuedWMsg, 1);
|
||||||
|
if (queued > MAX_QUEUED_MSG_NUM) {
|
||||||
|
vDebug("vgId:%d, too many msg:%d in vwqueue, flow control", pVnode->vgId, queued);
|
||||||
|
taosMsleep(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
vTrace("vgId:%d, write into vwqueue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedWMsg);
|
||||||
|
|
||||||
taosWriteQitem(pVnode->wqueue, qtype, pWrite);
|
taosWriteQitem(pVnode->wqueue, qtype, pWrite);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void vnodeFreeFromWQueue(void *vparam, SVWriteMsg *pWrite) {
|
||||||
|
SVnodeObj *pVnode = vparam;
|
||||||
|
|
||||||
|
atomic_sub_fetch_32(&pVnode->queuedWMsg, 1);
|
||||||
|
vTrace("vgId:%d, free from vwqueue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedWMsg);
|
||||||
|
|
||||||
|
taosFreeQitem(pWrite);
|
||||||
|
vnodeRelease(pVnode);
|
||||||
|
}
|
||||||
|
|
|
@ -37,7 +37,7 @@ extern int32_t wDebugFlag;
|
||||||
#define WAL_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + 16)
|
#define WAL_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + 16)
|
||||||
#define WAL_SIGNATURE ((uint32_t)(0xFAFBFDFE))
|
#define WAL_SIGNATURE ((uint32_t)(0xFAFBFDFE))
|
||||||
#define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12)
|
#define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12)
|
||||||
#define WAL_FILE_LEN (TSDB_FILENAME_LEN + 32)
|
#define WAL_FILE_LEN (WAL_PATH_LEN + 32)
|
||||||
#define WAL_FILE_NUM 3
|
#define WAL_FILE_NUM 3
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
|
|
@ -132,7 +132,7 @@ https://www.taosdata.com/cn/all-downloads/
|
||||||
配置完成后,在命令行内使用taos shell连接server端
|
配置完成后,在命令行内使用taos shell连接server端
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
C:\TDengine>taos
|
C:\TDengine>taos -h td01
|
||||||
Welcome to the TDengine shell from Linux, Client Version:2.0.1.1
|
Welcome to the TDengine shell from Linux, Client Version:2.0.1.1
|
||||||
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
|
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,33 @@
|
||||||
|
README.md
|
||||||
|
target/
|
||||||
|
!.mvn/wrapper/maven-wrapper.jar
|
||||||
|
!**/src/main/**/target/
|
||||||
|
!**/src/test/**/target/
|
||||||
|
|
||||||
|
### STS ###
|
||||||
|
.apt_generated
|
||||||
|
.classpath
|
||||||
|
.factorypath
|
||||||
|
.project
|
||||||
|
.settings
|
||||||
|
.springBeans
|
||||||
|
.sts4-cache
|
||||||
|
|
||||||
|
### IntelliJ IDEA ###
|
||||||
|
.idea
|
||||||
|
*.iws
|
||||||
|
*.iml
|
||||||
|
*.ipr
|
||||||
|
|
||||||
|
### NetBeans ###
|
||||||
|
/nbproject/private/
|
||||||
|
/nbbuild/
|
||||||
|
/dist/
|
||||||
|
/nbdist/
|
||||||
|
/.nb-gradle/
|
||||||
|
build/
|
||||||
|
!**/src/main/**/build/
|
||||||
|
!**/src/test/**/build/
|
||||||
|
|
||||||
|
### VS Code ###
|
||||||
|
.vscode/
|
118
tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/MavenWrapperDownloader.java
vendored
Normal file
118
tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/MavenWrapperDownloader.java
vendored
Normal file
|
@ -0,0 +1,118 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2007-present the original author or authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import java.net.*;
|
||||||
|
import java.io.*;
|
||||||
|
import java.nio.channels.*;
|
||||||
|
import java.util.Properties;
|
||||||
|
|
||||||
|
public class MavenWrapperDownloader {
|
||||||
|
|
||||||
|
private static final String WRAPPER_VERSION = "0.5.6";
|
||||||
|
/**
|
||||||
|
* Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
|
||||||
|
*/
|
||||||
|
private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
|
||||||
|
+ WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
|
||||||
|
* use instead of the default one.
|
||||||
|
*/
|
||||||
|
private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
|
||||||
|
".mvn/wrapper/maven-wrapper.properties";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Path where the maven-wrapper.jar will be saved to.
|
||||||
|
*/
|
||||||
|
private static final String MAVEN_WRAPPER_JAR_PATH =
|
||||||
|
".mvn/wrapper/maven-wrapper.jar";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Name of the property which should be used to override the default download url for the wrapper.
|
||||||
|
*/
|
||||||
|
private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
|
||||||
|
|
||||||
|
public static void main(String args[]) {
|
||||||
|
System.out.println("- Downloader started");
|
||||||
|
File baseDirectory = new File(args[0]);
|
||||||
|
System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
|
||||||
|
|
||||||
|
// If the maven-wrapper.properties exists, read it and check if it contains a custom
|
||||||
|
// wrapperUrl parameter.
|
||||||
|
File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
|
||||||
|
String url = DEFAULT_DOWNLOAD_URL;
|
||||||
|
if (mavenWrapperPropertyFile.exists()) {
|
||||||
|
FileInputStream mavenWrapperPropertyFileInputStream = null;
|
||||||
|
try {
|
||||||
|
mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
|
||||||
|
Properties mavenWrapperProperties = new Properties();
|
||||||
|
mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
|
||||||
|
url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
|
||||||
|
} catch (IOException e) {
|
||||||
|
System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
|
||||||
|
} finally {
|
||||||
|
try {
|
||||||
|
if (mavenWrapperPropertyFileInputStream != null) {
|
||||||
|
mavenWrapperPropertyFileInputStream.close();
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
// Ignore ...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
System.out.println("- Downloading from: " + url);
|
||||||
|
|
||||||
|
File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
|
||||||
|
if (!outputFile.getParentFile().exists()) {
|
||||||
|
if (!outputFile.getParentFile().mkdirs()) {
|
||||||
|
System.out.println(
|
||||||
|
"- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
|
||||||
|
try {
|
||||||
|
downloadFileFromURL(url, outputFile);
|
||||||
|
System.out.println("Done");
|
||||||
|
System.exit(0);
|
||||||
|
} catch (Throwable e) {
|
||||||
|
System.out.println("- Error downloading");
|
||||||
|
e.printStackTrace();
|
||||||
|
System.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void downloadFileFromURL(String urlString, File destination) throws Exception {
|
||||||
|
if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) {
|
||||||
|
String username = System.getenv("MVNW_USERNAME");
|
||||||
|
char[] password = System.getenv("MVNW_PASSWORD").toCharArray();
|
||||||
|
Authenticator.setDefault(new Authenticator() {
|
||||||
|
@Override
|
||||||
|
protected PasswordAuthentication getPasswordAuthentication() {
|
||||||
|
return new PasswordAuthentication(username, password);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
URL website = new URL(urlString);
|
||||||
|
ReadableByteChannel rbc;
|
||||||
|
rbc = Channels.newChannel(website.openStream());
|
||||||
|
FileOutputStream fos = new FileOutputStream(destination);
|
||||||
|
fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
|
||||||
|
fos.close();
|
||||||
|
rbc.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
Binary file not shown.
|
@ -0,0 +1,2 @@
|
||||||
|
distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip
|
||||||
|
wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar
|
|
@ -0,0 +1,322 @@
|
||||||
|
#!/bin/sh
|
||||||
|
# ----------------------------------------------------------------------------
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
# ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# ----------------------------------------------------------------------------
|
||||||
|
# Maven Start Up Batch script
|
||||||
|
#
|
||||||
|
# Required ENV vars:
|
||||||
|
# ------------------
|
||||||
|
# JAVA_HOME - location of a JDK home dir
|
||||||
|
#
|
||||||
|
# Optional ENV vars
|
||||||
|
# -----------------
|
||||||
|
# M2_HOME - location of maven2's installed home dir
|
||||||
|
# MAVEN_OPTS - parameters passed to the Java VM when running Maven
|
||||||
|
# e.g. to debug Maven itself, use
|
||||||
|
# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
|
||||||
|
# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
|
||||||
|
# ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
if [ -z "$MAVEN_SKIP_RC" ]; then
|
||||||
|
|
||||||
|
if [ -f /etc/mavenrc ]; then
|
||||||
|
. /etc/mavenrc
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f "$HOME/.mavenrc" ]; then
|
||||||
|
. "$HOME/.mavenrc"
|
||||||
|
fi
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
# OS specific support. $var _must_ be set to either true or false.
|
||||||
|
cygwin=false
|
||||||
|
darwin=false
|
||||||
|
mingw=false
|
||||||
|
case "$(uname)" in
|
||||||
|
CYGWIN*) cygwin=true ;;
|
||||||
|
MINGW*) mingw=true ;;
|
||||||
|
Darwin*)
|
||||||
|
darwin=true
|
||||||
|
# Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
|
||||||
|
# See https://developer.apple.com/library/mac/qa/qa1170/_index.html
|
||||||
|
if [ -z "$JAVA_HOME" ]; then
|
||||||
|
if [ -x "/usr/libexec/java_home" ]; then
|
||||||
|
export JAVA_HOME="$(/usr/libexec/java_home)"
|
||||||
|
else
|
||||||
|
export JAVA_HOME="/Library/Java/Home"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
if [ -z "$JAVA_HOME" ]; then
|
||||||
|
if [ -r /etc/gentoo-release ]; then
|
||||||
|
JAVA_HOME=$(java-config --jre-home)
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$M2_HOME" ]; then
|
||||||
|
## resolve links - $0 may be a link to maven's home
|
||||||
|
PRG="$0"
|
||||||
|
|
||||||
|
# need this for relative symlinks
|
||||||
|
while [ -h "$PRG" ]; do
|
||||||
|
ls=$(ls -ld "$PRG")
|
||||||
|
link=$(expr "$ls" : '.*-> \(.*\)$')
|
||||||
|
if expr "$link" : '/.*' >/dev/null; then
|
||||||
|
PRG="$link"
|
||||||
|
else
|
||||||
|
PRG="$(dirname "$PRG")/$link"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
saveddir=$(pwd)
|
||||||
|
|
||||||
|
M2_HOME=$(dirname "$PRG")/..
|
||||||
|
|
||||||
|
# make it fully qualified
|
||||||
|
M2_HOME=$(cd "$M2_HOME" && pwd)
|
||||||
|
|
||||||
|
cd "$saveddir"
|
||||||
|
# echo Using m2 at $M2_HOME
|
||||||
|
fi
|
||||||
|
|
||||||
|
# For Cygwin, ensure paths are in UNIX format before anything is touched
|
||||||
|
if $cygwin; then
|
||||||
|
[ -n "$M2_HOME" ] &&
|
||||||
|
M2_HOME=$(cygpath --unix "$M2_HOME")
|
||||||
|
[ -n "$JAVA_HOME" ] &&
|
||||||
|
JAVA_HOME=$(cygpath --unix "$JAVA_HOME")
|
||||||
|
[ -n "$CLASSPATH" ] &&
|
||||||
|
CLASSPATH=$(cygpath --path --unix "$CLASSPATH")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# For Mingw, ensure paths are in UNIX format before anything is touched
|
||||||
|
if $mingw; then
|
||||||
|
[ -n "$M2_HOME" ] &&
|
||||||
|
M2_HOME="$( (
|
||||||
|
cd "$M2_HOME"
|
||||||
|
pwd
|
||||||
|
))"
|
||||||
|
[ -n "$JAVA_HOME" ] &&
|
||||||
|
JAVA_HOME="$( (
|
||||||
|
cd "$JAVA_HOME"
|
||||||
|
pwd
|
||||||
|
))"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$JAVA_HOME" ]; then
|
||||||
|
javaExecutable="$(which javac)"
|
||||||
|
if [ -n "$javaExecutable" ] && ! [ "$(expr \"$javaExecutable\" : '\([^ ]*\)')" = "no" ]; then
|
||||||
|
# readlink(1) is not available as standard on Solaris 10.
|
||||||
|
readLink=$(which readlink)
|
||||||
|
if [ ! $(expr "$readLink" : '\([^ ]*\)') = "no" ]; then
|
||||||
|
if $darwin; then
|
||||||
|
javaHome="$(dirname \"$javaExecutable\")"
|
||||||
|
javaExecutable="$(cd \"$javaHome\" && pwd -P)/javac"
|
||||||
|
else
|
||||||
|
javaExecutable="$(readlink -f \"$javaExecutable\")"
|
||||||
|
fi
|
||||||
|
javaHome="$(dirname \"$javaExecutable\")"
|
||||||
|
javaHome=$(expr "$javaHome" : '\(.*\)/bin')
|
||||||
|
JAVA_HOME="$javaHome"
|
||||||
|
export JAVA_HOME
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$JAVACMD" ]; then
|
||||||
|
if [ -n "$JAVA_HOME" ]; then
|
||||||
|
if [ -x "$JAVA_HOME/jre/sh/java" ]; then
|
||||||
|
# IBM's JDK on AIX uses strange locations for the executables
|
||||||
|
JAVACMD="$JAVA_HOME/jre/sh/java"
|
||||||
|
else
|
||||||
|
JAVACMD="$JAVA_HOME/bin/java"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
JAVACMD="$(which java)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -x "$JAVACMD" ]; then
|
||||||
|
echo "Error: JAVA_HOME is not defined correctly." >&2
|
||||||
|
echo " We cannot execute $JAVACMD" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$JAVA_HOME" ]; then
|
||||||
|
echo "Warning: JAVA_HOME environment variable is not set."
|
||||||
|
fi
|
||||||
|
|
||||||
|
CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
|
||||||
|
|
||||||
|
# traverses directory structure from process work directory to filesystem root
|
||||||
|
# first directory with .mvn subdirectory is considered project base directory
|
||||||
|
find_maven_basedir() {
|
||||||
|
|
||||||
|
if [ -z "$1" ]; then
|
||||||
|
echo "Path not specified to find_maven_basedir"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
basedir="$1"
|
||||||
|
wdir="$1"
|
||||||
|
while [ "$wdir" != '/' ]; do
|
||||||
|
if [ -d "$wdir"/.mvn ]; then
|
||||||
|
basedir=$wdir
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
# workaround for JBEAP-8937 (on Solaris 10/Sparc)
|
||||||
|
if [ -d "${wdir}" ]; then
|
||||||
|
wdir=$(
|
||||||
|
cd "$wdir/.."
|
||||||
|
pwd
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
# end of workaround
|
||||||
|
done
|
||||||
|
echo "${basedir}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# concatenates all lines of a file
|
||||||
|
concat_lines() {
|
||||||
|
if [ -f "$1" ]; then
|
||||||
|
echo "$(tr -s '\n' ' ' <"$1")"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
BASE_DIR=$(find_maven_basedir "$(pwd)")
|
||||||
|
if [ -z "$BASE_DIR" ]; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
##########################################################################################
|
||||||
|
# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
|
||||||
|
# This allows using the maven wrapper in projects that prohibit checking in binary data.
|
||||||
|
##########################################################################################
|
||||||
|
if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then
|
||||||
|
if [ "$MVNW_VERBOSE" = true ]; then
|
||||||
|
echo "Found .mvn/wrapper/maven-wrapper.jar"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
if [ "$MVNW_VERBOSE" = true ]; then
|
||||||
|
echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
|
||||||
|
fi
|
||||||
|
if [ -n "$MVNW_REPOURL" ]; then
|
||||||
|
jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||||
|
else
|
||||||
|
jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||||
|
fi
|
||||||
|
while IFS="=" read key value; do
|
||||||
|
case "$key" in wrapperUrl)
|
||||||
|
jarUrl="$value"
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done <"$BASE_DIR/.mvn/wrapper/maven-wrapper.properties"
|
||||||
|
if [ "$MVNW_VERBOSE" = true ]; then
|
||||||
|
echo "Downloading from: $jarUrl"
|
||||||
|
fi
|
||||||
|
wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar"
|
||||||
|
if $cygwin; then
|
||||||
|
wrapperJarPath=$(cygpath --path --windows "$wrapperJarPath")
|
||||||
|
fi
|
||||||
|
|
||||||
|
if command -v wget >/dev/null; then
|
||||||
|
if [ "$MVNW_VERBOSE" = true ]; then
|
||||||
|
echo "Found wget ... using wget"
|
||||||
|
fi
|
||||||
|
if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
|
||||||
|
wget "$jarUrl" -O "$wrapperJarPath"
|
||||||
|
else
|
||||||
|
wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath"
|
||||||
|
fi
|
||||||
|
elif command -v curl >/dev/null; then
|
||||||
|
if [ "$MVNW_VERBOSE" = true ]; then
|
||||||
|
echo "Found curl ... using curl"
|
||||||
|
fi
|
||||||
|
if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
|
||||||
|
curl -o "$wrapperJarPath" "$jarUrl" -f
|
||||||
|
else
|
||||||
|
curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f
|
||||||
|
fi
|
||||||
|
|
||||||
|
else
|
||||||
|
if [ "$MVNW_VERBOSE" = true ]; then
|
||||||
|
echo "Falling back to using Java to download"
|
||||||
|
fi
|
||||||
|
javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java"
|
||||||
|
# For Cygwin, switch paths to Windows format before running javac
|
||||||
|
if $cygwin; then
|
||||||
|
javaClass=$(cygpath --path --windows "$javaClass")
|
||||||
|
fi
|
||||||
|
if [ -e "$javaClass" ]; then
|
||||||
|
if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
|
||||||
|
if [ "$MVNW_VERBOSE" = true ]; then
|
||||||
|
echo " - Compiling MavenWrapperDownloader.java ..."
|
||||||
|
fi
|
||||||
|
# Compiling the Java class
|
||||||
|
("$JAVA_HOME/bin/javac" "$javaClass")
|
||||||
|
fi
|
||||||
|
if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
|
||||||
|
# Running the downloader
|
||||||
|
if [ "$MVNW_VERBOSE" = true ]; then
|
||||||
|
echo " - Running MavenWrapperDownloader.java ..."
|
||||||
|
fi
|
||||||
|
("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR")
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
##########################################################################################
|
||||||
|
# End of extension
|
||||||
|
##########################################################################################
|
||||||
|
|
||||||
|
export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}
|
||||||
|
if [ "$MVNW_VERBOSE" = true ]; then
|
||||||
|
echo $MAVEN_PROJECTBASEDIR
|
||||||
|
fi
|
||||||
|
MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
|
||||||
|
|
||||||
|
# For Cygwin, switch paths to Windows format before running java
|
||||||
|
if $cygwin; then
|
||||||
|
[ -n "$M2_HOME" ] &&
|
||||||
|
M2_HOME=$(cygpath --path --windows "$M2_HOME")
|
||||||
|
[ -n "$JAVA_HOME" ] &&
|
||||||
|
JAVA_HOME=$(cygpath --path --windows "$JAVA_HOME")
|
||||||
|
[ -n "$CLASSPATH" ] &&
|
||||||
|
CLASSPATH=$(cygpath --path --windows "$CLASSPATH")
|
||||||
|
[ -n "$MAVEN_PROJECTBASEDIR" ] &&
|
||||||
|
MAVEN_PROJECTBASEDIR=$(cygpath --path --windows "$MAVEN_PROJECTBASEDIR")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Provide a "standardized" way to retrieve the CLI args that will
|
||||||
|
# work with both Windows and non-Windows executions.
|
||||||
|
MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
|
||||||
|
export MAVEN_CMD_LINE_ARGS
|
||||||
|
|
||||||
|
WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
|
||||||
|
|
||||||
|
exec "$JAVACMD" \
|
||||||
|
$MAVEN_OPTS \
|
||||||
|
-classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
|
||||||
|
"-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
|
||||||
|
${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
|
|
@ -0,0 +1,182 @@
|
||||||
|
@REM ----------------------------------------------------------------------------
|
||||||
|
@REM Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
@REM or more contributor license agreements. See the NOTICE file
|
||||||
|
@REM distributed with this work for additional information
|
||||||
|
@REM regarding copyright ownership. The ASF licenses this file
|
||||||
|
@REM to you under the Apache License, Version 2.0 (the
|
||||||
|
@REM "License"); you may not use this file except in compliance
|
||||||
|
@REM with the License. You may obtain a copy of the License at
|
||||||
|
@REM
|
||||||
|
@REM https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
@REM
|
||||||
|
@REM Unless required by applicable law or agreed to in writing,
|
||||||
|
@REM software distributed under the License is distributed on an
|
||||||
|
@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
@REM KIND, either express or implied. See the License for the
|
||||||
|
@REM specific language governing permissions and limitations
|
||||||
|
@REM under the License.
|
||||||
|
@REM ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
@REM ----------------------------------------------------------------------------
|
||||||
|
@REM Maven Start Up Batch script
|
||||||
|
@REM
|
||||||
|
@REM Required ENV vars:
|
||||||
|
@REM JAVA_HOME - location of a JDK home dir
|
||||||
|
@REM
|
||||||
|
@REM Optional ENV vars
|
||||||
|
@REM M2_HOME - location of maven2's installed home dir
|
||||||
|
@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
|
||||||
|
@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending
|
||||||
|
@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
|
||||||
|
@REM e.g. to debug Maven itself, use
|
||||||
|
@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
|
||||||
|
@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
|
||||||
|
@REM ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
|
||||||
|
@echo off
|
||||||
|
@REM set title of command window
|
||||||
|
title %0
|
||||||
|
@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on'
|
||||||
|
@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
|
||||||
|
|
||||||
|
@REM set %HOME% to equivalent of $HOME
|
||||||
|
if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
|
||||||
|
|
||||||
|
@REM Execute a user defined script before this one
|
||||||
|
if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
|
||||||
|
@REM check for pre script, once with legacy .bat ending and once with .cmd ending
|
||||||
|
if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
|
||||||
|
if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
|
||||||
|
:skipRcPre
|
||||||
|
|
||||||
|
@setlocal
|
||||||
|
|
||||||
|
set ERROR_CODE=0
|
||||||
|
|
||||||
|
@REM To isolate internal variables from possible post scripts, we use another setlocal
|
||||||
|
@setlocal
|
||||||
|
|
||||||
|
@REM ==== START VALIDATION ====
|
||||||
|
if not "%JAVA_HOME%" == "" goto OkJHome
|
||||||
|
|
||||||
|
echo.
|
||||||
|
echo Error: JAVA_HOME not found in your environment. >&2
|
||||||
|
echo Please set the JAVA_HOME variable in your environment to match the >&2
|
||||||
|
echo location of your Java installation. >&2
|
||||||
|
echo.
|
||||||
|
goto error
|
||||||
|
|
||||||
|
:OkJHome
|
||||||
|
if exist "%JAVA_HOME%\bin\java.exe" goto init
|
||||||
|
|
||||||
|
echo.
|
||||||
|
echo Error: JAVA_HOME is set to an invalid directory. >&2
|
||||||
|
echo JAVA_HOME = "%JAVA_HOME%" >&2
|
||||||
|
echo Please set the JAVA_HOME variable in your environment to match the >&2
|
||||||
|
echo location of your Java installation. >&2
|
||||||
|
echo.
|
||||||
|
goto error
|
||||||
|
|
||||||
|
@REM ==== END VALIDATION ====
|
||||||
|
|
||||||
|
:init
|
||||||
|
|
||||||
|
@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
|
||||||
|
@REM Fallback to current working directory if not found.
|
||||||
|
|
||||||
|
set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
|
||||||
|
IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
|
||||||
|
|
||||||
|
set EXEC_DIR=%CD%
|
||||||
|
set WDIR=%EXEC_DIR%
|
||||||
|
:findBaseDir
|
||||||
|
IF EXIST "%WDIR%"\.mvn goto baseDirFound
|
||||||
|
cd ..
|
||||||
|
IF "%WDIR%"=="%CD%" goto baseDirNotFound
|
||||||
|
set WDIR=%CD%
|
||||||
|
goto findBaseDir
|
||||||
|
|
||||||
|
:baseDirFound
|
||||||
|
set MAVEN_PROJECTBASEDIR=%WDIR%
|
||||||
|
cd "%EXEC_DIR%"
|
||||||
|
goto endDetectBaseDir
|
||||||
|
|
||||||
|
:baseDirNotFound
|
||||||
|
set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
|
||||||
|
cd "%EXEC_DIR%"
|
||||||
|
|
||||||
|
:endDetectBaseDir
|
||||||
|
|
||||||
|
IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
|
||||||
|
|
||||||
|
@setlocal EnableExtensions EnableDelayedExpansion
|
||||||
|
for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
|
||||||
|
@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
|
||||||
|
|
||||||
|
:endReadAdditionalConfig
|
||||||
|
|
||||||
|
SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
|
||||||
|
set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
|
||||||
|
set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
|
||||||
|
|
||||||
|
set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||||
|
|
||||||
|
FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO (
|
||||||
|
IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
|
||||||
|
)
|
||||||
|
|
||||||
|
@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
|
||||||
|
@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
|
||||||
|
if exist %WRAPPER_JAR% (
|
||||||
|
if "%MVNW_VERBOSE%" == "true" (
|
||||||
|
echo Found %WRAPPER_JAR%
|
||||||
|
)
|
||||||
|
) else (
|
||||||
|
if not "%MVNW_REPOURL%" == "" (
|
||||||
|
SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||||
|
)
|
||||||
|
if "%MVNW_VERBOSE%" == "true" (
|
||||||
|
echo Couldn't find %WRAPPER_JAR%, downloading it ...
|
||||||
|
echo Downloading from: %DOWNLOAD_URL%
|
||||||
|
)
|
||||||
|
|
||||||
|
powershell -Command "&{"^
|
||||||
|
"$webclient = new-object System.Net.WebClient;"^
|
||||||
|
"if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^
|
||||||
|
"$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^
|
||||||
|
"}"^
|
||||||
|
"[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^
|
||||||
|
"}"
|
||||||
|
if "%MVNW_VERBOSE%" == "true" (
|
||||||
|
echo Finished downloading %WRAPPER_JAR%
|
||||||
|
)
|
||||||
|
)
|
||||||
|
@REM End of extension
|
||||||
|
|
||||||
|
@REM Provide a "standardized" way to retrieve the CLI args that will
|
||||||
|
@REM work with both Windows and non-Windows executions.
|
||||||
|
set MAVEN_CMD_LINE_ARGS=%*
|
||||||
|
|
||||||
|
%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
|
||||||
|
if ERRORLEVEL 1 goto error
|
||||||
|
goto end
|
||||||
|
|
||||||
|
:error
|
||||||
|
set ERROR_CODE=1
|
||||||
|
|
||||||
|
:end
|
||||||
|
@endlocal & set ERROR_CODE=%ERROR_CODE%
|
||||||
|
|
||||||
|
if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
|
||||||
|
@REM check for post script, once with legacy .bat ending and once with .cmd ending
|
||||||
|
if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
|
||||||
|
if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
|
||||||
|
:skipRcPost
|
||||||
|
|
||||||
|
@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
|
||||||
|
if "%MAVEN_BATCH_PAUSE%" == "on" pause
|
||||||
|
|
||||||
|
if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
|
||||||
|
|
||||||
|
exit /B %ERROR_CODE%
|
|
@ -0,0 +1,101 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
<parent>
|
||||||
|
<groupId>org.springframework.boot</groupId>
|
||||||
|
<artifactId>spring-boot-starter-parent</artifactId>
|
||||||
|
<version>2.4.0</version>
|
||||||
|
<relativePath/> <!-- lookup parent from repository -->
|
||||||
|
</parent>
|
||||||
|
<groupId>com.taosdata.example</groupId>
|
||||||
|
<artifactId>mybatisplus-demo</artifactId>
|
||||||
|
<version>0.0.1-SNAPSHOT</version>
|
||||||
|
<name>mybatisplus-demo</name>
|
||||||
|
<description>Demo project for tdengine</description>
|
||||||
|
|
||||||
|
<properties>
|
||||||
|
<java.version>1.8</java.version>
|
||||||
|
</properties>
|
||||||
|
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.springframework.boot</groupId>
|
||||||
|
<artifactId>spring-boot-starter</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.projectlombok</groupId>
|
||||||
|
<artifactId>lombok</artifactId>
|
||||||
|
<optional>true</optional>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.baomidou</groupId>
|
||||||
|
<artifactId>mybatis-plus-boot-starter</artifactId>
|
||||||
|
<version>3.1.2</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.h2database</groupId>
|
||||||
|
<artifactId>h2</artifactId>
|
||||||
|
<scope>runtime</scope>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
|
<version>2.0.11</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<!-- https://mvnrepository.com/artifact/mysql/mysql-connector-java -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>mysql</groupId>
|
||||||
|
<artifactId>mysql-connector-java</artifactId>
|
||||||
|
<version>5.1.47</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.springframework.boot</groupId>
|
||||||
|
<artifactId>spring-boot-starter-web</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.springframework.boot</groupId>
|
||||||
|
<artifactId>spring-boot-devtools</artifactId>
|
||||||
|
<scope>runtime</scope>
|
||||||
|
<optional>true</optional>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.springframework.boot</groupId>
|
||||||
|
<artifactId>spring-boot-starter-test</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>junit</groupId>
|
||||||
|
<artifactId>junit</artifactId>
|
||||||
|
<version>4.12</version>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.springframework.boot</groupId>
|
||||||
|
<artifactId>spring-boot-maven-plugin</artifactId>
|
||||||
|
</plugin>
|
||||||
|
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-surefire-plugin</artifactId>
|
||||||
|
<version>2.17</version>
|
||||||
|
<configuration>
|
||||||
|
<includes>
|
||||||
|
<include>**/*Test.java</include>
|
||||||
|
</includes>
|
||||||
|
<excludes>
|
||||||
|
<exclude>**/Abstract*.java</exclude>
|
||||||
|
</excludes>
|
||||||
|
</configuration>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
|
||||||
|
|
||||||
|
</project>
|
|
@ -0,0 +1,15 @@
|
||||||
|
package com.taosdata.example.mybatisplusdemo;
|
||||||
|
|
||||||
|
import org.mybatis.spring.annotation.MapperScan;
|
||||||
|
import org.springframework.boot.SpringApplication;
|
||||||
|
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||||
|
|
||||||
|
@SpringBootApplication
|
||||||
|
@MapperScan("com.taosdata.example.mybatisplusdemo.mapper")
|
||||||
|
public class MybatisplusDemoApplication {
|
||||||
|
|
||||||
|
public static void main(String[] args) {
|
||||||
|
SpringApplication.run(MybatisplusDemoApplication.class, args);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,34 @@
|
||||||
|
package com.taosdata.example.mybatisplusdemo.config;
|
||||||
|
|
||||||
|
import com.baomidou.mybatisplus.extension.plugins.PaginationInterceptor;
|
||||||
|
import org.springframework.context.annotation.Bean;
|
||||||
|
import org.springframework.context.annotation.Configuration;
|
||||||
|
|
||||||
|
@Configuration
|
||||||
|
public class MybatisPlusConfig {
|
||||||
|
|
||||||
|
|
||||||
|
/** mybatis 3.4.1 pagination config start ***/
|
||||||
|
// @Bean
|
||||||
|
// public MybatisPlusInterceptor mybatisPlusInterceptor() {
|
||||||
|
// MybatisPlusInterceptor interceptor = new MybatisPlusInterceptor();
|
||||||
|
// interceptor.addInnerInterceptor(new PaginationInnerInterceptor());
|
||||||
|
// return interceptor;
|
||||||
|
// }
|
||||||
|
|
||||||
|
// @Bean
|
||||||
|
// public ConfigurationCustomizer configurationCustomizer() {
|
||||||
|
// return configuration -> configuration.setUseDeprecatedExecutor(false);
|
||||||
|
// }
|
||||||
|
|
||||||
|
@Bean
|
||||||
|
public PaginationInterceptor paginationInterceptor() {
|
||||||
|
// return new PaginationInterceptor();
|
||||||
|
PaginationInterceptor paginationInterceptor = new PaginationInterceptor();
|
||||||
|
//TODO: mybatis-plus do not support TDengine, use postgresql Dialect
|
||||||
|
paginationInterceptor.setDialectType("postgresql");
|
||||||
|
|
||||||
|
return paginationInterceptor;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,15 @@
|
||||||
|
package com.taosdata.example.mybatisplusdemo.domain;
|
||||||
|
|
||||||
|
import lombok.Data;
|
||||||
|
|
||||||
|
import java.sql.Timestamp;
|
||||||
|
|
||||||
|
@Data
|
||||||
|
public class Temperature {
|
||||||
|
|
||||||
|
private Timestamp ts;
|
||||||
|
private float temperature;
|
||||||
|
private String location;
|
||||||
|
private int tbIndex;
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,15 @@
|
||||||
|
package com.taosdata.example.mybatisplusdemo.domain;
|
||||||
|
|
||||||
|
import lombok.Data;
|
||||||
|
|
||||||
|
import java.sql.Timestamp;
|
||||||
|
|
||||||
|
@Data
|
||||||
|
public class Weather {
|
||||||
|
|
||||||
|
private Timestamp ts;
|
||||||
|
private float temperature;
|
||||||
|
private int humidity;
|
||||||
|
private String location;
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,23 @@
|
||||||
|
package com.taosdata.example.mybatisplusdemo.mapper;
|
||||||
|
|
||||||
|
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
|
||||||
|
import com.taosdata.example.mybatisplusdemo.domain.Temperature;
|
||||||
|
import org.apache.ibatis.annotations.Insert;
|
||||||
|
import org.apache.ibatis.annotations.Param;
|
||||||
|
import org.apache.ibatis.annotations.Update;
|
||||||
|
|
||||||
|
public interface TemperatureMapper extends BaseMapper<Temperature> {
|
||||||
|
|
||||||
|
@Update("CREATE TABLE if not exists temperature(ts timestamp, temperature float) tags(location nchar(64), tbIndex int)")
|
||||||
|
int createSuperTable();
|
||||||
|
|
||||||
|
@Update("create table #{tbName} using temperature tags( #{location}, #{tbindex})")
|
||||||
|
int createTable(@Param("tbName") String tbName, @Param("location") String location, @Param("tbindex") int tbindex);
|
||||||
|
|
||||||
|
@Update("drop table if exists temperature")
|
||||||
|
void dropSuperTable();
|
||||||
|
|
||||||
|
@Insert("insert into t${tbIndex}(ts, temperature) values(#{ts}, #{temperature})")
|
||||||
|
int insertOne(Temperature one);
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,8 @@
|
||||||
|
package com.taosdata.example.mybatisplusdemo.mapper;
|
||||||
|
|
||||||
|
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
|
||||||
|
import com.taosdata.example.mybatisplusdemo.domain.Weather;
|
||||||
|
|
||||||
|
public interface WeatherMapper extends BaseMapper<Weather> {
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,34 @@
|
||||||
|
spring:
|
||||||
|
datasource:
|
||||||
|
# driver-class-name: org.h2.Driver
|
||||||
|
# schema: classpath:db/schema-mysql.sql
|
||||||
|
# data: classpath:db/data-mysql.sql
|
||||||
|
# url: jdbc:h2:mem:test
|
||||||
|
# username: root
|
||||||
|
# password: test
|
||||||
|
|
||||||
|
# driver-class-name: com.mysql.jdbc.Driver
|
||||||
|
# url: jdbc:mysql://master:3306/test?useSSL=false
|
||||||
|
# username: root
|
||||||
|
# password: 123456
|
||||||
|
|
||||||
|
driver-class-name: com.taosdata.jdbc.TSDBDriver
|
||||||
|
url: jdbc:TAOS://localhost:6030/mp_test
|
||||||
|
user: root
|
||||||
|
password: taosdata
|
||||||
|
charset: UTF-8
|
||||||
|
locale: en_US.UTF-8
|
||||||
|
timezone: UTC-8
|
||||||
|
|
||||||
|
mybatis-plus:
|
||||||
|
configuration:
|
||||||
|
map-underscore-to-camel-case: false
|
||||||
|
|
||||||
|
logging:
|
||||||
|
level:
|
||||||
|
com:
|
||||||
|
taosdata:
|
||||||
|
example:
|
||||||
|
mybatisplusdemo:
|
||||||
|
mapper: debug
|
||||||
|
|
|
@ -0,0 +1,140 @@
|
||||||
|
package com.taosdata.example.mybatisplusdemo.mapper;
|
||||||
|
|
||||||
|
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
|
||||||
|
import com.baomidou.mybatisplus.core.metadata.IPage;
|
||||||
|
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
|
||||||
|
import com.taosdata.example.mybatisplusdemo.domain.Temperature;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.runner.RunWith;
|
||||||
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
import org.springframework.boot.test.context.SpringBootTest;
|
||||||
|
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||||
|
|
||||||
|
import java.sql.Timestamp;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Random;
|
||||||
|
|
||||||
|
@RunWith(SpringJUnit4ClassRunner.class)
|
||||||
|
@SpringBootTest
|
||||||
|
public class TemperatureMapperTest {
|
||||||
|
|
||||||
|
private static Random random = new Random(System.currentTimeMillis());
|
||||||
|
private static String[] locations = {"北京", "上海", "深圳", "广州", "杭州"};
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void before() {
|
||||||
|
mapper.dropSuperTable();
|
||||||
|
// create table temperature
|
||||||
|
mapper.createSuperTable();
|
||||||
|
// create table t_X using temperature
|
||||||
|
for (int i = 0; i < 10; i++) {
|
||||||
|
mapper.createTable("t" + i, locations[random.nextInt(locations.length)], i);
|
||||||
|
}
|
||||||
|
// insert into table
|
||||||
|
int affectRows = 0;
|
||||||
|
// insert 10 tables
|
||||||
|
for (int i = 0; i < 10; i++) {
|
||||||
|
// each table insert 5 rows
|
||||||
|
for (int j = 0; j < 5; j++) {
|
||||||
|
Temperature one = new Temperature();
|
||||||
|
one.setTs(new Timestamp(1605024000000l));
|
||||||
|
one.setTemperature(random.nextFloat() * 50);
|
||||||
|
one.setLocation("望京");
|
||||||
|
one.setTbIndex(i);
|
||||||
|
affectRows += mapper.insertOne(one);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Assert.assertEquals(50, affectRows);
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void after() {
|
||||||
|
mapper.dropSuperTable();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private TemperatureMapper mapper;
|
||||||
|
|
||||||
|
/***
|
||||||
|
* test SelectList
|
||||||
|
* **/
|
||||||
|
@Test
|
||||||
|
public void testSelectList() {
|
||||||
|
List<Temperature> temperatureList = mapper.selectList(null);
|
||||||
|
temperatureList.forEach(System.out::println);
|
||||||
|
}
|
||||||
|
|
||||||
|
/***
|
||||||
|
* test InsertOne which is a custom metheod
|
||||||
|
* ***/
|
||||||
|
@Test
|
||||||
|
public void testInsert() {
|
||||||
|
Temperature one = new Temperature();
|
||||||
|
one.setTs(new Timestamp(1605024000000l));
|
||||||
|
one.setTemperature(random.nextFloat() * 50);
|
||||||
|
one.setLocation("望京");
|
||||||
|
int affectRows = mapper.insertOne(one);
|
||||||
|
Assert.assertEquals(1, affectRows);
|
||||||
|
}
|
||||||
|
|
||||||
|
/***
|
||||||
|
* test SelectOne
|
||||||
|
* **/
|
||||||
|
@Test
|
||||||
|
public void testSelectOne() {
|
||||||
|
QueryWrapper<Temperature> wrapper = new QueryWrapper<>();
|
||||||
|
wrapper.eq("location", "beijing");
|
||||||
|
Temperature one = mapper.selectOne(wrapper);
|
||||||
|
System.out.println(one);
|
||||||
|
Assert.assertNotNull(one);
|
||||||
|
}
|
||||||
|
|
||||||
|
/***
|
||||||
|
* test select By map
|
||||||
|
* ***/
|
||||||
|
@Test
|
||||||
|
public void testSelectByMap() {
|
||||||
|
Map<String, Object> map = new HashMap<>();
|
||||||
|
map.put("location", "beijing");
|
||||||
|
List<Temperature> temperatures = mapper.selectByMap(map);
|
||||||
|
Assert.assertEquals(1, temperatures.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
/***
|
||||||
|
* test selectObjs
|
||||||
|
* **/
|
||||||
|
@Test
|
||||||
|
public void testSelectObjs() {
|
||||||
|
List<Object> ts = mapper.selectObjs(null);
|
||||||
|
System.out.println(ts);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* test selectC ount
|
||||||
|
* **/
|
||||||
|
@Test
|
||||||
|
public void testSelectCount() {
|
||||||
|
int count = mapper.selectCount(null);
|
||||||
|
Assert.assertEquals(5, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
/****
|
||||||
|
* 分页
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testSelectPage() {
|
||||||
|
IPage page = new Page(1, 2);
|
||||||
|
IPage<Temperature> temperatureIPage = mapper.selectPage(page, null);
|
||||||
|
System.out.println("total : " + temperatureIPage.getTotal());
|
||||||
|
System.out.println("pages : " + temperatureIPage.getPages());
|
||||||
|
for (Temperature temperature : temperatureIPage.getRecords()) {
|
||||||
|
System.out.println(temperature);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,88 @@
|
||||||
|
package com.taosdata.example.mybatisplusdemo.mapper;
|
||||||
|
|
||||||
|
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
|
||||||
|
import com.baomidou.mybatisplus.core.metadata.IPage;
|
||||||
|
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
|
||||||
|
import com.taosdata.example.mybatisplusdemo.domain.Weather;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.runner.RunWith;
|
||||||
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
import org.springframework.boot.test.context.SpringBootTest;
|
||||||
|
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||||
|
|
||||||
|
import java.sql.Timestamp;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Random;
|
||||||
|
|
||||||
|
@RunWith(SpringJUnit4ClassRunner.class)
|
||||||
|
@SpringBootTest
|
||||||
|
public class WeatherMapperTest {
|
||||||
|
|
||||||
|
private static Random random = new Random(System.currentTimeMillis());
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private WeatherMapper mapper;
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSelectList() {
|
||||||
|
List<Weather> weathers = mapper.selectList(null);
|
||||||
|
weathers.forEach(System.out::println);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testInsert() {
|
||||||
|
Weather one = new Weather();
|
||||||
|
one.setTs(new Timestamp(1605024000000l));
|
||||||
|
one.setTemperature(random.nextFloat() * 50);
|
||||||
|
one.setHumidity(random.nextInt(100));
|
||||||
|
one.setLocation("望京");
|
||||||
|
int affectRows = mapper.insert(one);
|
||||||
|
Assert.assertEquals(1, affectRows);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSelectOne() {
|
||||||
|
QueryWrapper<Weather> wrapper = new QueryWrapper<>();
|
||||||
|
wrapper.eq("location", "beijing");
|
||||||
|
Weather one = mapper.selectOne(wrapper);
|
||||||
|
System.out.println(one);
|
||||||
|
Assert.assertEquals(12.22f, one.getTemperature(), 0.00f);
|
||||||
|
Assert.assertEquals("beijing", one.getLocation());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSelectByMap() {
|
||||||
|
Map<String, Object> map = new HashMap<>();
|
||||||
|
map.put("location", "beijing");
|
||||||
|
List<Weather> weathers = mapper.selectByMap(map);
|
||||||
|
Assert.assertEquals(1, weathers.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSelectObjs() {
|
||||||
|
List<Object> ts = mapper.selectObjs(null);
|
||||||
|
System.out.println(ts);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSelectCount() {
|
||||||
|
int count = mapper.selectCount(null);
|
||||||
|
// Assert.assertEquals(5, count);
|
||||||
|
System.out.println(count);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSelectPage() {
|
||||||
|
IPage page = new Page(1, 2);
|
||||||
|
IPage<Weather> weatherIPage = mapper.selectPage(page, null);
|
||||||
|
System.out.println("total : " + weatherIPage.getTotal());
|
||||||
|
System.out.println("pages : " + weatherIPage.getPages());
|
||||||
|
for (Weather weather : weatherIPage.getRecords()) {
|
||||||
|
System.out.println(weather);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -50,10 +50,10 @@ static void queryDB(TAOS *taos, char *command) {
|
||||||
taos_free_result(pSql);
|
taos_free_result(pSql);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Test(char *qstr, const char *input, int i);
|
||||||
|
|
||||||
int main(int argc, char *argv[]) {
|
int main(int argc, char *argv[]) {
|
||||||
TAOS * taos;
|
|
||||||
char qstr[1024];
|
char qstr[1024];
|
||||||
TAOS_RES *result;
|
|
||||||
|
|
||||||
// connect to server
|
// connect to server
|
||||||
if (argc < 2) {
|
if (argc < 2) {
|
||||||
|
@ -63,41 +63,26 @@ int main(int argc, char *argv[]) {
|
||||||
|
|
||||||
// init TAOS
|
// init TAOS
|
||||||
taos_init();
|
taos_init();
|
||||||
|
for (int i = 0; i < 4000000; i++) {
|
||||||
taos = taos_connect(argv[1], "root", "taosdata", NULL, 0);
|
Test(qstr, argv[1], i);
|
||||||
|
}
|
||||||
|
taos_cleanup();
|
||||||
|
}
|
||||||
|
void Test(char *qstr, const char *input, int index) {
|
||||||
|
TAOS *taos = taos_connect(input, "root", "taosdata", NULL, 0);
|
||||||
|
printf("==================test at %d\n================================", index);
|
||||||
|
queryDB(taos, "drop database if exists demo");
|
||||||
|
queryDB(taos, "create database demo");
|
||||||
|
TAOS_RES *result;
|
||||||
if (taos == NULL) {
|
if (taos == NULL) {
|
||||||
printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/);
|
printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
printf("success to connect to server\n");
|
|
||||||
|
|
||||||
|
|
||||||
//taos_query(taos, "drop database demo");
|
|
||||||
queryDB(taos, "drop database if exists demo");
|
|
||||||
|
|
||||||
//result = taos_query(taos, "create database demo");
|
|
||||||
//if (result == NULL) {
|
|
||||||
// printf("failed to create database, reason:%s\n", "null result"/*taos_errstr(taos)*/);
|
|
||||||
// exit(1);
|
|
||||||
//}
|
|
||||||
queryDB(taos, "create database demo");
|
|
||||||
printf("success to create database\n");
|
|
||||||
|
|
||||||
//taos_query(taos, "use demo");
|
|
||||||
queryDB(taos, "use demo");
|
queryDB(taos, "use demo");
|
||||||
|
|
||||||
// create table
|
|
||||||
//if (taos_query(taos, "create table m1 (ts timestamp, ti tinyint, si smallint, i int, bi bigint, f float, d double, b binary(10))") == 0) {
|
|
||||||
// printf("failed to create table, reason:%s\n", taos_errstr(result));
|
|
||||||
// exit(1);
|
|
||||||
//}
|
|
||||||
queryDB(taos, "create table m1 (ts timestamp, ti tinyint, si smallint, i int, bi bigint, f float, d double, b binary(10))");
|
queryDB(taos, "create table m1 (ts timestamp, ti tinyint, si smallint, i int, bi bigint, f float, d double, b binary(10))");
|
||||||
printf("success to create table\n");
|
printf("success to create table\n");
|
||||||
|
|
||||||
// sleep for one second to make sure table is created on data node
|
|
||||||
// taosMsleep(1000);
|
|
||||||
|
|
||||||
// insert 10 records
|
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for (i = 0; i < 10; ++i) {
|
for (i = 0; i < 10; ++i) {
|
||||||
sprintf(qstr, "insert into m1 values (%" PRId64 ", %d, %d, %d, %d, %f, %lf, '%s')", 1546300800000 + i * 1000, i, i, i, i*10000000, i*1.0, i*2.0, "hello");
|
sprintf(qstr, "insert into m1 values (%" PRId64 ", %d, %d, %d, %d, %f, %lf, '%s')", 1546300800000 + i * 1000, i, i, i, i*10000000, i*1.0, i*2.0, "hello");
|
||||||
|
@ -117,7 +102,6 @@ int main(int argc, char *argv[]) {
|
||||||
}
|
}
|
||||||
taos_free_result(result);
|
taos_free_result(result);
|
||||||
|
|
||||||
//sleep(1);
|
|
||||||
}
|
}
|
||||||
printf("success to insert rows, total %d rows\n", i);
|
printf("success to insert rows, total %d rows\n", i);
|
||||||
|
|
||||||
|
@ -147,5 +131,6 @@ int main(int argc, char *argv[]) {
|
||||||
|
|
||||||
taos_free_result(result);
|
taos_free_result(result);
|
||||||
printf("====demo end====\n\n");
|
printf("====demo end====\n\n");
|
||||||
return getchar();
|
taos_close(taos);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,3 +0,0 @@
|
||||||
/target
|
|
||||||
**/*.rs.bk
|
|
||||||
Cargo.lock
|
|
|
@ -1,7 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "tdengine"
|
|
||||||
version = "0.1.0"
|
|
||||||
authors = ["Chunhua Jiang <jiangch@3reality.com>"]
|
|
||||||
edition = "2018"
|
|
||||||
|
|
||||||
[dependencies]
|
|
|
@ -1,20 +0,0 @@
|
||||||
# TDengine driver connector for Rust
|
|
||||||
|
|
||||||
It's a rust implementation for [TDengine](https://github.com/taosdata/TDengine), an open-sourced big data platform designed and optimized for the Internet of Things (IoT), Connected Cars, Industrial IoT, and IT Infrastructure and Application Monitoring.
|
|
||||||
|
|
||||||
## Dependencies
|
|
||||||
- Rust:
|
|
||||||
```
|
|
||||||
curl https://sh.rustup.rs -sSf | sh
|
|
||||||
```
|
|
||||||
|
|
||||||
## Run with Sample
|
|
||||||
|
|
||||||
Build and run basic sample:
|
|
||||||
```
|
|
||||||
cargo run --example demo
|
|
||||||
```
|
|
||||||
Build and run subscribe sample:
|
|
||||||
```
|
|
||||||
cargo run --example subscribe
|
|
||||||
```
|
|
|
@ -1,10 +0,0 @@
|
||||||
// build.rs
|
|
||||||
|
|
||||||
use std::env;
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let project_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
|
|
||||||
|
|
||||||
println!("cargo:rustc-link-search={}", project_dir); // the "-L" flag
|
|
||||||
println!("cargo:rustc-link-lib=taos"); // the "-l" flag
|
|
||||||
}
|
|
|
@ -1,19 +0,0 @@
|
||||||
use std::process;
|
|
||||||
use tdengine::Tdengine;
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let tde = Tdengine::new("127.0.0.1", "root", "taosdata", "demo", 0)
|
|
||||||
.unwrap_or_else(|err| {
|
|
||||||
eprintln!("Can't create Tdengine: {}", err);
|
|
||||||
process::exit(1)
|
|
||||||
});
|
|
||||||
|
|
||||||
tde.query("drop database demo");
|
|
||||||
tde.query("create database demo");
|
|
||||||
tde.query("use demo");
|
|
||||||
tde.query("create table m1 (ts timestamp, speed int)");
|
|
||||||
|
|
||||||
for i in 0..10 {
|
|
||||||
tde.query(format!("insert into m1 values (now+{}s, {})", i, i).as_str());
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,19 +0,0 @@
|
||||||
use std::process;
|
|
||||||
use tdengine::Subscriber;
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let subscriber = Subscriber::new("127.0.0.1", "root", "taosdata", "demo", "m1", 0, 1000)
|
|
||||||
.unwrap_or_else(|err| {
|
|
||||||
eprintln!("Can't create Subscriber: {}", err);
|
|
||||||
process::exit(1)
|
|
||||||
});
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let row = subscriber.consume().unwrap_or_else(|err| {
|
|
||||||
eprintln!("consume exit: {}", err);
|
|
||||||
process::exit(1)
|
|
||||||
});
|
|
||||||
|
|
||||||
subscriber.print_row(&row);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,332 +0,0 @@
|
||||||
/* automatically generated by rust-bindgen */
|
|
||||||
#![allow(unused)]
|
|
||||||
#![allow(non_camel_case_types)]
|
|
||||||
|
|
||||||
pub const _STDINT_H: u32 = 1;
|
|
||||||
pub const _FEATURES_H: u32 = 1;
|
|
||||||
pub const _DEFAULT_SOURCE: u32 = 1;
|
|
||||||
pub const __USE_ISOC11: u32 = 1;
|
|
||||||
pub const __USE_ISOC99: u32 = 1;
|
|
||||||
pub const __USE_ISOC95: u32 = 1;
|
|
||||||
pub const __USE_POSIX_IMPLICITLY: u32 = 1;
|
|
||||||
pub const _POSIX_SOURCE: u32 = 1;
|
|
||||||
pub const _POSIX_C_SOURCE: u32 = 200809;
|
|
||||||
pub const __USE_POSIX: u32 = 1;
|
|
||||||
pub const __USE_POSIX2: u32 = 1;
|
|
||||||
pub const __USE_POSIX199309: u32 = 1;
|
|
||||||
pub const __USE_POSIX199506: u32 = 1;
|
|
||||||
pub const __USE_XOPEN2K: u32 = 1;
|
|
||||||
pub const __USE_XOPEN2K8: u32 = 1;
|
|
||||||
pub const _ATFILE_SOURCE: u32 = 1;
|
|
||||||
pub const __USE_MISC: u32 = 1;
|
|
||||||
pub const __USE_ATFILE: u32 = 1;
|
|
||||||
pub const __USE_FORTIFY_LEVEL: u32 = 0;
|
|
||||||
pub const _STDC_PREDEF_H: u32 = 1;
|
|
||||||
pub const __STDC_IEC_559__: u32 = 1;
|
|
||||||
pub const __STDC_IEC_559_COMPLEX__: u32 = 1;
|
|
||||||
pub const __STDC_ISO_10646__: u32 = 201505;
|
|
||||||
pub const __STDC_NO_THREADS__: u32 = 1;
|
|
||||||
pub const __GNU_LIBRARY__: u32 = 6;
|
|
||||||
pub const __GLIBC__: u32 = 2;
|
|
||||||
pub const __GLIBC_MINOR__: u32 = 23;
|
|
||||||
pub const _SYS_CDEFS_H: u32 = 1;
|
|
||||||
pub const __WORDSIZE: u32 = 64;
|
|
||||||
pub const __WORDSIZE_TIME64_COMPAT32: u32 = 1;
|
|
||||||
pub const __SYSCALL_WORDSIZE: u32 = 64;
|
|
||||||
pub const _BITS_WCHAR_H: u32 = 1;
|
|
||||||
pub const INT8_MIN: i32 = -128;
|
|
||||||
pub const INT16_MIN: i32 = -32768;
|
|
||||||
pub const INT32_MIN: i32 = -2147483648;
|
|
||||||
pub const INT8_MAX: u32 = 127;
|
|
||||||
pub const INT16_MAX: u32 = 32767;
|
|
||||||
pub const INT32_MAX: u32 = 2147483647;
|
|
||||||
pub const UINT8_MAX: u32 = 255;
|
|
||||||
pub const UINT16_MAX: u32 = 65535;
|
|
||||||
pub const UINT32_MAX: u32 = 4294967295;
|
|
||||||
pub const INT_LEAST8_MIN: i32 = -128;
|
|
||||||
pub const INT_LEAST16_MIN: i32 = -32768;
|
|
||||||
pub const INT_LEAST32_MIN: i32 = -2147483648;
|
|
||||||
pub const INT_LEAST8_MAX: u32 = 127;
|
|
||||||
pub const INT_LEAST16_MAX: u32 = 32767;
|
|
||||||
pub const INT_LEAST32_MAX: u32 = 2147483647;
|
|
||||||
pub const UINT_LEAST8_MAX: u32 = 255;
|
|
||||||
pub const UINT_LEAST16_MAX: u32 = 65535;
|
|
||||||
pub const UINT_LEAST32_MAX: u32 = 4294967295;
|
|
||||||
pub const INT_FAST8_MIN: i32 = -128;
|
|
||||||
pub const INT_FAST16_MIN: i64 = -9223372036854775808;
|
|
||||||
pub const INT_FAST32_MIN: i64 = -9223372036854775808;
|
|
||||||
pub const INT_FAST8_MAX: u32 = 127;
|
|
||||||
pub const INT_FAST16_MAX: u64 = 9223372036854775807;
|
|
||||||
pub const INT_FAST32_MAX: u64 = 9223372036854775807;
|
|
||||||
pub const UINT_FAST8_MAX: u32 = 255;
|
|
||||||
pub const UINT_FAST16_MAX: i32 = -1;
|
|
||||||
pub const UINT_FAST32_MAX: i32 = -1;
|
|
||||||
pub const INTPTR_MIN: i64 = -9223372036854775808;
|
|
||||||
pub const INTPTR_MAX: u64 = 9223372036854775807;
|
|
||||||
pub const UINTPTR_MAX: i32 = -1;
|
|
||||||
pub const PTRDIFF_MIN: i64 = -9223372036854775808;
|
|
||||||
pub const PTRDIFF_MAX: u64 = 9223372036854775807;
|
|
||||||
pub const SIG_ATOMIC_MIN: i32 = -2147483648;
|
|
||||||
pub const SIG_ATOMIC_MAX: u32 = 2147483647;
|
|
||||||
pub const SIZE_MAX: i32 = -1;
|
|
||||||
pub const WINT_MIN: u32 = 0;
|
|
||||||
pub const WINT_MAX: u32 = 4294967295;
|
|
||||||
pub const TSDB_DATA_TYPE_NULL: u32 = 0;
|
|
||||||
pub const TSDB_DATA_TYPE_BOOL: u32 = 1;
|
|
||||||
pub const TSDB_DATA_TYPE_TINYINT: u32 = 2;
|
|
||||||
pub const TSDB_DATA_TYPE_SMALLINT: u32 = 3;
|
|
||||||
pub const TSDB_DATA_TYPE_INT: u32 = 4;
|
|
||||||
pub const TSDB_DATA_TYPE_BIGINT: u32 = 5;
|
|
||||||
pub const TSDB_DATA_TYPE_FLOAT: u32 = 6;
|
|
||||||
pub const TSDB_DATA_TYPE_DOUBLE: u32 = 7;
|
|
||||||
pub const TSDB_DATA_TYPE_BINARY: u32 = 8;
|
|
||||||
pub const TSDB_DATA_TYPE_TIMESTAMP: u32 = 9;
|
|
||||||
pub const TSDB_DATA_TYPE_NCHAR: u32 = 10;
|
|
||||||
pub type int_least8_t = ::std::os::raw::c_schar;
|
|
||||||
pub type int_least16_t = ::std::os::raw::c_short;
|
|
||||||
pub type int_least32_t = ::std::os::raw::c_int;
|
|
||||||
pub type int_least64_t = ::std::os::raw::c_long;
|
|
||||||
pub type uint_least8_t = ::std::os::raw::c_uchar;
|
|
||||||
pub type uint_least16_t = ::std::os::raw::c_ushort;
|
|
||||||
pub type uint_least32_t = ::std::os::raw::c_uint;
|
|
||||||
pub type uint_least64_t = ::std::os::raw::c_ulong;
|
|
||||||
pub type int_fast8_t = ::std::os::raw::c_schar;
|
|
||||||
pub type int_fast16_t = ::std::os::raw::c_long;
|
|
||||||
pub type int_fast32_t = ::std::os::raw::c_long;
|
|
||||||
pub type int_fast64_t = ::std::os::raw::c_long;
|
|
||||||
pub type uint_fast8_t = ::std::os::raw::c_uchar;
|
|
||||||
pub type uint_fast16_t = ::std::os::raw::c_ulong;
|
|
||||||
pub type uint_fast32_t = ::std::os::raw::c_ulong;
|
|
||||||
pub type uint_fast64_t = ::std::os::raw::c_ulong;
|
|
||||||
pub type intmax_t = ::std::os::raw::c_long;
|
|
||||||
pub type uintmax_t = ::std::os::raw::c_ulong;
|
|
||||||
pub const TSDB_OPTION_TSDB_OPTION_LOCALE: TSDB_OPTION = 0;
|
|
||||||
pub const TSDB_OPTION_TSDB_OPTION_CHARSET: TSDB_OPTION = 1;
|
|
||||||
pub const TSDB_OPTION_TSDB_OPTION_TIMEZONE: TSDB_OPTION = 2;
|
|
||||||
pub const TSDB_OPTION_TSDB_OPTION_CONFIGDIR: TSDB_OPTION = 3;
|
|
||||||
pub const TSDB_OPTION_TSDB_OPTION_SHELL_ACTIVITY_TIMER: TSDB_OPTION = 4;
|
|
||||||
pub const TSDB_OPTION_TSDB_MAX_OPTIONS: TSDB_OPTION = 5;
|
|
||||||
pub type TSDB_OPTION = u32;
|
|
||||||
#[repr(C)]
|
|
||||||
#[derive(Copy, Clone)]
|
|
||||||
pub struct taosField {
|
|
||||||
pub name: [::std::os::raw::c_char; 64usize],
|
|
||||||
pub bytes: ::std::os::raw::c_short,
|
|
||||||
pub type_: ::std::os::raw::c_char,
|
|
||||||
}
|
|
||||||
#[test]
|
|
||||||
fn bindgen_test_layout_taosField() {
|
|
||||||
assert_eq!(
|
|
||||||
::std::mem::size_of::<taosField>(),
|
|
||||||
68usize,
|
|
||||||
concat!("Size of: ", stringify!(taosField))
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
::std::mem::align_of::<taosField>(),
|
|
||||||
2usize,
|
|
||||||
concat!("Alignment of ", stringify!(taosField))
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
unsafe { &(*(::std::ptr::null::<taosField>())).name as *const _ as usize },
|
|
||||||
0usize,
|
|
||||||
concat!(
|
|
||||||
"Offset of field: ",
|
|
||||||
stringify!(taosField),
|
|
||||||
"::",
|
|
||||||
stringify!(name)
|
|
||||||
)
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
unsafe { &(*(::std::ptr::null::<taosField>())).bytes as *const _ as usize },
|
|
||||||
64usize,
|
|
||||||
concat!(
|
|
||||||
"Offset of field: ",
|
|
||||||
stringify!(taosField),
|
|
||||||
"::",
|
|
||||||
stringify!(bytes)
|
|
||||||
)
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
unsafe { &(*(::std::ptr::null::<taosField>())).type_ as *const _ as usize },
|
|
||||||
66usize,
|
|
||||||
concat!(
|
|
||||||
"Offset of field: ",
|
|
||||||
stringify!(taosField),
|
|
||||||
"::",
|
|
||||||
stringify!(type_)
|
|
||||||
)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
pub type TAOS_FIELD = taosField;
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_init();
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_options(
|
|
||||||
option: TSDB_OPTION,
|
|
||||||
arg: *const ::std::os::raw::c_void,
|
|
||||||
...
|
|
||||||
) -> ::std::os::raw::c_int;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_connect(
|
|
||||||
ip: *mut ::std::os::raw::c_char,
|
|
||||||
user: *mut ::std::os::raw::c_char,
|
|
||||||
pass: *mut ::std::os::raw::c_char,
|
|
||||||
db: *mut ::std::os::raw::c_char,
|
|
||||||
port: ::std::os::raw::c_int,
|
|
||||||
) -> *mut ::std::os::raw::c_void;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_close(taos: *mut ::std::os::raw::c_void);
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_query(
|
|
||||||
taos: *mut ::std::os::raw::c_void,
|
|
||||||
sqlstr: *mut ::std::os::raw::c_char,
|
|
||||||
) -> ::std::os::raw::c_int;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_use_result(taos: *mut ::std::os::raw::c_void) -> *mut ::std::os::raw::c_void;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_fetch_row(res: *mut ::std::os::raw::c_void) -> *mut *mut ::std::os::raw::c_void;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_result_precision(res: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_free_result(res: *mut ::std::os::raw::c_void);
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_field_count(taos: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_num_fields(res: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_affected_rows(taos: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_fetch_fields(res: *mut ::std::os::raw::c_void) -> *mut TAOS_FIELD;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_select_db(
|
|
||||||
taos: *mut ::std::os::raw::c_void,
|
|
||||||
db: *mut ::std::os::raw::c_char,
|
|
||||||
) -> ::std::os::raw::c_int;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_print_row(
|
|
||||||
str: *mut ::std::os::raw::c_char,
|
|
||||||
row: *mut *mut ::std::os::raw::c_void,
|
|
||||||
fields: *mut TAOS_FIELD,
|
|
||||||
num_fields: ::std::os::raw::c_int,
|
|
||||||
) -> ::std::os::raw::c_int;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_stop_query(res: *mut ::std::os::raw::c_void);
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_fetch_block(
|
|
||||||
res: *mut ::std::os::raw::c_void,
|
|
||||||
rows: *mut *mut *mut ::std::os::raw::c_void,
|
|
||||||
) -> ::std::os::raw::c_int;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_validate_sql(
|
|
||||||
taos: *mut ::std::os::raw::c_void,
|
|
||||||
sql: *mut ::std::os::raw::c_char,
|
|
||||||
) -> ::std::os::raw::c_int;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_get_server_info(taos: *mut ::std::os::raw::c_void) -> *mut ::std::os::raw::c_char;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_get_client_info() -> *mut ::std::os::raw::c_char;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_errstr(taos: *mut ::std::os::raw::c_void) -> *mut ::std::os::raw::c_char;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_errno(taos: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_query_a(
|
|
||||||
taos: *mut ::std::os::raw::c_void,
|
|
||||||
sqlstr: *mut ::std::os::raw::c_char,
|
|
||||||
fp: ::std::option::Option<
|
|
||||||
unsafe extern "C" fn(
|
|
||||||
param: *mut ::std::os::raw::c_void,
|
|
||||||
arg1: *mut ::std::os::raw::c_void,
|
|
||||||
code: ::std::os::raw::c_int,
|
|
||||||
),
|
|
||||||
>,
|
|
||||||
param: *mut ::std::os::raw::c_void,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_fetch_rows_a(
|
|
||||||
res: *mut ::std::os::raw::c_void,
|
|
||||||
fp: ::std::option::Option<
|
|
||||||
unsafe extern "C" fn(
|
|
||||||
param: *mut ::std::os::raw::c_void,
|
|
||||||
arg1: *mut ::std::os::raw::c_void,
|
|
||||||
numOfRows: ::std::os::raw::c_int,
|
|
||||||
),
|
|
||||||
>,
|
|
||||||
param: *mut ::std::os::raw::c_void,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_fetch_row_a(
|
|
||||||
res: *mut ::std::os::raw::c_void,
|
|
||||||
fp: ::std::option::Option<
|
|
||||||
unsafe extern "C" fn(
|
|
||||||
param: *mut ::std::os::raw::c_void,
|
|
||||||
arg1: *mut ::std::os::raw::c_void,
|
|
||||||
row: *mut *mut ::std::os::raw::c_void,
|
|
||||||
),
|
|
||||||
>,
|
|
||||||
param: *mut ::std::os::raw::c_void,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_subscribe(
|
|
||||||
host: *mut ::std::os::raw::c_char,
|
|
||||||
user: *mut ::std::os::raw::c_char,
|
|
||||||
pass: *mut ::std::os::raw::c_char,
|
|
||||||
db: *mut ::std::os::raw::c_char,
|
|
||||||
table: *mut ::std::os::raw::c_char,
|
|
||||||
time: i64,
|
|
||||||
mseconds: ::std::os::raw::c_int,
|
|
||||||
) -> *mut ::std::os::raw::c_void;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_consume(tsub: *mut ::std::os::raw::c_void) -> *mut *mut ::std::os::raw::c_void;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_unsubscribe(tsub: *mut ::std::os::raw::c_void);
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_open_stream(
|
|
||||||
taos: *mut ::std::os::raw::c_void,
|
|
||||||
sqlstr: *mut ::std::os::raw::c_char,
|
|
||||||
fp: ::std::option::Option<
|
|
||||||
unsafe extern "C" fn(
|
|
||||||
param: *mut ::std::os::raw::c_void,
|
|
||||||
arg1: *mut ::std::os::raw::c_void,
|
|
||||||
row: *mut *mut ::std::os::raw::c_void,
|
|
||||||
),
|
|
||||||
>,
|
|
||||||
stime: i64,
|
|
||||||
param: *mut ::std::os::raw::c_void,
|
|
||||||
callback: ::std::option::Option<unsafe extern "C" fn(arg1: *mut ::std::os::raw::c_void)>,
|
|
||||||
) -> *mut ::std::os::raw::c_void;
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub fn taos_close_stream(tstr: *mut ::std::os::raw::c_void);
|
|
||||||
}
|
|
||||||
extern "C" {
|
|
||||||
pub static mut configDir: [::std::os::raw::c_char; 0usize];
|
|
||||||
}
|
|
|
@ -1,10 +0,0 @@
|
||||||
#![allow(unused)]
|
|
||||||
#![allow(non_camel_case_types)]
|
|
||||||
|
|
||||||
pub mod subscriber;
|
|
||||||
pub use subscriber::*;
|
|
||||||
|
|
||||||
pub mod tdengine;
|
|
||||||
pub use tdengine::*;
|
|
||||||
|
|
||||||
pub mod utils;
|
|
|
@ -1,77 +0,0 @@
|
||||||
#![allow(non_camel_case_types)]
|
|
||||||
#![allow(non_snake_case)]
|
|
||||||
|
|
||||||
#[path = "utils.rs"]
|
|
||||||
mod utils;
|
|
||||||
use utils::*;
|
|
||||||
use utils::bindings::*;
|
|
||||||
|
|
||||||
use std::os::raw::{c_void, c_char, c_int, c_long};
|
|
||||||
|
|
||||||
pub struct Subscriber {
|
|
||||||
tsub: *mut c_void,
|
|
||||||
fields: *mut taosField,
|
|
||||||
fcount: c_int,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Subscriber {
|
|
||||||
pub fn new(host: &str,
|
|
||||||
username: &str,
|
|
||||||
passwd: &str,
|
|
||||||
db: &str,
|
|
||||||
table:&str,
|
|
||||||
time: i64,
|
|
||||||
mseconds: i32
|
|
||||||
) -> Result<Subscriber, &'static str> {
|
|
||||||
unsafe {
|
|
||||||
let mut tsub = taos_subscribe(str_into_raw(host),
|
|
||||||
str_into_raw(username),
|
|
||||||
str_into_raw(passwd),
|
|
||||||
str_into_raw(db),
|
|
||||||
str_into_raw(table),
|
|
||||||
time as c_long,
|
|
||||||
mseconds as c_int);
|
|
||||||
if tsub.is_null() {
|
|
||||||
return Err("subscribe error")
|
|
||||||
}
|
|
||||||
println!("subscribed to {} user:{}, db:{}, tb:{}, time:{}, mseconds:{}",
|
|
||||||
host, username, db, table, time, mseconds);
|
|
||||||
|
|
||||||
let mut fields = taos_fetch_fields(tsub);
|
|
||||||
if fields.is_null() {
|
|
||||||
taos_unsubscribe(tsub);
|
|
||||||
return Err("fetch fields error")
|
|
||||||
}
|
|
||||||
|
|
||||||
let fcount = taos_field_count(tsub);
|
|
||||||
if fcount == 0 {
|
|
||||||
taos_unsubscribe(tsub);
|
|
||||||
return Err("fields count is 0")
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Subscriber{tsub, fields, fcount})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn consume(self: &Subscriber) -> Result<Row, &'static str> {
|
|
||||||
unsafe {
|
|
||||||
let taosRow = taos_consume(self.tsub);
|
|
||||||
if taosRow.is_null() {
|
|
||||||
return Err("consume error")
|
|
||||||
}
|
|
||||||
let taosRow= std::slice::from_raw_parts(taosRow, self.fcount as usize);
|
|
||||||
let row = raw_into_row(self.fields, self.fcount, &taosRow);
|
|
||||||
Ok(row)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn print_row(self: &Subscriber, row: &Row) {
|
|
||||||
println!("{}", format_row(row));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for Subscriber {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
unsafe {taos_unsubscribe(self.tsub);}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,65 +0,0 @@
|
||||||
#[path = "bindings.rs"]
|
|
||||||
mod bindings;
|
|
||||||
use bindings::*;
|
|
||||||
|
|
||||||
#[path = "utils.rs"]
|
|
||||||
mod utils;
|
|
||||||
use utils::*;
|
|
||||||
|
|
||||||
use std::os::raw::c_void;
|
|
||||||
use std::os::raw::c_char;
|
|
||||||
use std::os::raw::c_int;
|
|
||||||
use std::os::raw::c_long;
|
|
||||||
|
|
||||||
pub struct Tdengine {
|
|
||||||
conn: *mut c_void,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// - **TODO**: doc
|
|
||||||
impl Tdengine {
|
|
||||||
|
|
||||||
//! - **TODO**: implement default param.
|
|
||||||
//!
|
|
||||||
//! > refer to https://stackoverflow.com/questions/24047686/default-function-arguments-in-rust
|
|
||||||
pub fn new(ip: &str, username: &str, passwd: &str, db: &str, port: i32) -> Result<Tdengine, &'static str> {
|
|
||||||
unsafe {
|
|
||||||
taos_init();
|
|
||||||
let mut conn = taos_connect(str_into_raw(ip),
|
|
||||||
str_into_raw(username),
|
|
||||||
str_into_raw(passwd),
|
|
||||||
str_into_raw(db),
|
|
||||||
port as c_int);
|
|
||||||
if conn.is_null() {
|
|
||||||
Err("connect error")
|
|
||||||
} else {
|
|
||||||
println!("connected to {}:{} user:{}, db:{}", ip, port, username, db);
|
|
||||||
Ok(Tdengine {conn})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// - **TODO**: check error code
|
|
||||||
pub fn query(self: &Tdengine, s: &str) {
|
|
||||||
unsafe {
|
|
||||||
if taos_query(self.conn, str_into_raw(s)) == 0 {
|
|
||||||
println!("query '{}' ok", s);
|
|
||||||
} else {
|
|
||||||
println!("query '{}' error: {}", s, raw_into_str(taos_errstr(self.conn)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for Tdengine {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
unsafe {taos_close(self.conn);}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
#[test]
|
|
||||||
fn it_works() {
|
|
||||||
assert_eq!(2 + 2, 4);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,127 +0,0 @@
|
||||||
#[path = "bindings.rs"]
|
|
||||||
pub mod bindings;
|
|
||||||
use bindings::*;
|
|
||||||
|
|
||||||
use std::fmt;
|
|
||||||
use std::fmt::Display;
|
|
||||||
use std::os::raw::{c_void, c_char, c_int};
|
|
||||||
use std::ffi::{CString, CStr};
|
|
||||||
|
|
||||||
// #[derive(Debug)]
|
|
||||||
pub enum Field {
|
|
||||||
tinyInt(i8),
|
|
||||||
smallInt(i16),
|
|
||||||
normalInt(i32),
|
|
||||||
bigInt(i64),
|
|
||||||
float(f32),
|
|
||||||
double(f64),
|
|
||||||
binary(String),
|
|
||||||
timeStamp(i64),
|
|
||||||
boolType(bool),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
impl fmt::Display for Field {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
match &*self {
|
|
||||||
Field::tinyInt(v) => write!(f, "{}", v),
|
|
||||||
Field::smallInt(v) => write!(f, "{}", v),
|
|
||||||
Field::normalInt(v) => write!(f, "{}", v),
|
|
||||||
Field::bigInt(v) => write!(f, "{}", v),
|
|
||||||
Field::float(v) => write!(f, "{}", v),
|
|
||||||
Field::double(v) => write!(f, "{}", v),
|
|
||||||
Field::binary(v) => write!(f, "{}", v),
|
|
||||||
Field::tinyInt(v) => write!(f, "{}", v),
|
|
||||||
Field::timeStamp(v) => write!(f, "{}", v),
|
|
||||||
Field::boolType(v) => write!(f, "{}", v),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// pub type Fields = Vec<Field>;
|
|
||||||
pub type Row = Vec<Field>;
|
|
||||||
|
|
||||||
pub fn format_row(row: &Row) -> String {
|
|
||||||
let mut s = String::new();
|
|
||||||
for field in row {
|
|
||||||
s.push_str(format!("{} ", field).as_str());
|
|
||||||
// println!("{}", field);
|
|
||||||
}
|
|
||||||
s
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn str_into_raw(s: &str) -> *mut c_char {
|
|
||||||
if s.is_empty() {
|
|
||||||
0 as *mut c_char
|
|
||||||
} else {
|
|
||||||
CString::new(s).unwrap().into_raw()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn raw_into_str<'a>(raw: *mut c_char) -> &'static str {
|
|
||||||
unsafe {CStr::from_ptr(raw).to_str().unwrap()}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
pub fn raw_into_field(raw: *mut TAOS_FIELD, fcount: c_int) -> Vec<taosField> {
|
|
||||||
let mut fields: Vec<taosField> = Vec::new();
|
|
||||||
|
|
||||||
for i in 0..fcount as isize {
|
|
||||||
fields.push(
|
|
||||||
taosField {
|
|
||||||
name: unsafe {(*raw.offset(i as isize))}.name,
|
|
||||||
bytes: unsafe {(*raw.offset(i as isize))}.bytes,
|
|
||||||
type_: unsafe {(*raw.offset(i as isize))}.type_,
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// TODO: error[E0382]: use of moved value: `fields`
|
|
||||||
// for field in &fields {
|
|
||||||
// println!("type: {}, bytes: {}", field.type_, field.bytes);
|
|
||||||
// }
|
|
||||||
|
|
||||||
fields
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn raw_into_row(fields: *mut TAOS_FIELD, fcount: c_int, raw_row: &[*mut c_void]) -> Row {
|
|
||||||
let mut row: Row= Vec::new();
|
|
||||||
let fields = raw_into_field(fields, fcount);
|
|
||||||
|
|
||||||
for (i, field) in fields.iter().enumerate() {
|
|
||||||
// println!("index: {}, type: {}, bytes: {}", i, field.type_, field.bytes);
|
|
||||||
unsafe {
|
|
||||||
match field.type_ as u32 {
|
|
||||||
TSDB_DATA_TYPE_TINYINT => {
|
|
||||||
row.push(Field::tinyInt(*(raw_row[i] as *mut i8)));
|
|
||||||
}
|
|
||||||
TSDB_DATA_TYPE_SMALLINT => {
|
|
||||||
row.push(Field::smallInt(*(raw_row[i] as *mut i16)));
|
|
||||||
}
|
|
||||||
TSDB_DATA_TYPE_INT => {
|
|
||||||
row.push(Field::normalInt(*(raw_row[i] as *mut i32)));
|
|
||||||
}
|
|
||||||
TSDB_DATA_TYPE_BIGINT => {
|
|
||||||
row.push(Field::bigInt(*(raw_row[i] as *mut i64)));
|
|
||||||
}
|
|
||||||
TSDB_DATA_TYPE_FLOAT => {
|
|
||||||
row.push(Field::float(*(raw_row[i] as *mut f32)));
|
|
||||||
}
|
|
||||||
TSDB_DATA_TYPE_DOUBLE => {
|
|
||||||
row.push(Field::double(*(raw_row[i] as *mut f64)));
|
|
||||||
}
|
|
||||||
TSDB_DATA_TYPE_BINARY | TSDB_DATA_TYPE_NCHAR => {
|
|
||||||
// row.push(Field::binary(*(raw_row[i] as *mut f64)));
|
|
||||||
}
|
|
||||||
TSDB_DATA_TYPE_TIMESTAMP => {
|
|
||||||
row.push(Field::timeStamp(*(raw_row[i] as *mut i64)));
|
|
||||||
}
|
|
||||||
TSDB_DATA_TYPE_BOOL => {
|
|
||||||
// row.push(Field::boolType(*(raw_row[i] as *mut i8) as bool));
|
|
||||||
}
|
|
||||||
_ => println!(""),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
row
|
|
||||||
}
|
|
|
@ -1,8 +1,8 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
today=`date +"%Y%m%d"`
|
today=`date +"%Y%m%d"`
|
||||||
WORK_DIR=/home/ubuntu/pxiao/
|
WORK_DIR=/home/ubuntu/pxiao
|
||||||
PERFORMANCE_TEST_REPORT=$TDENGINE_DIR/tests/performance-test-report-$today.log
|
PERFORMANCE_TEST_REPORT=$WORK_DIR/TDengine/tests/performance-test-report-$today.log
|
||||||
|
|
||||||
# Coloured Echoes #
|
# Coloured Echoes #
|
||||||
function red_echo { echo -e "\033[31m$@\033[0m"; } #
|
function red_echo { echo -e "\033[31m$@\033[0m"; } #
|
||||||
|
@ -60,11 +60,12 @@ function buildTDengine {
|
||||||
}
|
}
|
||||||
|
|
||||||
function runQueryPerfTest {
|
function runQueryPerfTest {
|
||||||
nohup $WORK_DIR/TDengine/debug/build/bin/taosd -c /etc/taodperf/ > /dev/null 2>&1 &
|
[ -f $PERFORMANCE_TEST_REPORT ] && rm $PERFORMANCE_TEST_REPORT
|
||||||
|
nohup $WORK_DIR/TDengine/debug/build/bin/taosd -c /etc/taosperf/ > /dev/null 2>&1 &
|
||||||
echoInfo "Run Performance Test"
|
echoInfo "Run Performance Test"
|
||||||
cd $WORK_DIR/TDengine/tests/pytest
|
cd $WORK_DIR/TDengine/tests/pytest
|
||||||
|
|
||||||
python3 query/queryPerformance.py | tee -a $PERFORMANCE_TEST_REPORT
|
python3 query/queryPerformance.py 0 | tee -a $PERFORMANCE_TEST_REPORT
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -77,9 +78,9 @@ function sendReport {
|
||||||
|
|
||||||
sed -i 's/\x1b\[[0-9;]*m//g' $PERFORMANCE_TEST_REPORT
|
sed -i 's/\x1b\[[0-9;]*m//g' $PERFORMANCE_TEST_REPORT
|
||||||
BODY_CONTENT=`cat $PERFORMANCE_TEST_REPORT`
|
BODY_CONTENT=`cat $PERFORMANCE_TEST_REPORT`
|
||||||
echo -e "to: ${receiver}\nsubject: Query Performace Report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \
|
echo -e "From: <support@taosdata.com>\nto: ${receiver}\nsubject: Query Performace Report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \
|
||||||
(cat - && uuencode $PERFORMANCE_TEST_REPORT performance-test-report-$today.log) | \
|
(cat - && uuencode $PERFORMANCE_TEST_REPORT performance-test-report-$today.log) | \
|
||||||
ssmtp "${receiver}" && echo "Report Sent!"
|
/usr/sbin/ssmtp "${receiver}" && echo "Report Sent!"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -16,112 +16,202 @@ import sys
|
||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
import random
|
import random
|
||||||
# query sql
|
import requests
|
||||||
query_sql = [
|
from requests.auth import HTTPBasicAuth
|
||||||
# first supertable
|
func_list=['avg','count','twa','sum','stddev','leastsquares','min',
|
||||||
"select count(*) from test.meters ;",
|
'max','first','last','top','bottom','percentile','apercentile',
|
||||||
"select count(*) from test.meters where t3 > 2;",
|
'last_row','diff','spread']
|
||||||
"select count(*) from test.meters where ts <> '2020-05-13 10:00:00.002';",
|
condition_list=[
|
||||||
"select count(*) from test.meters where t7 like 'taos_1%';",
|
"where _c0 > now -10d ",
|
||||||
"select count(*) from test.meters where t7 like '_____2';",
|
'interval(10s)',
|
||||||
"select count(*) from test.meters where t8 like '%思%';",
|
'limit 10',
|
||||||
"select count(*) from test.meters interval(1n) order by ts desc;",
|
'group by',
|
||||||
#"select max(c0) from test.meters group by tbname",
|
'order by',
|
||||||
"select first(ts) from test.meters where t5 >5000 and t5<5100;",
|
'fill(null)'
|
||||||
"select last(ts) from test.meters where t5 >5000 and t5<5100;",
|
|
||||||
"select last_row(*) from test.meters;",
|
|
||||||
"select twa(c1) from test.t1 where ts > 1500000001000 and ts < 1500000101000" ,
|
|
||||||
"select avg(c1) from test.meters where t5 >5000 and t5<5100;",
|
|
||||||
"select bottom(c1, 2) from test.t1;",
|
|
||||||
"select diff(c1) from test.t1;",
|
|
||||||
"select leastsquares(c1, 1, 1) from test.t1 ;",
|
|
||||||
"select max(c1) from test.meters where t5 >5000 and t5<5100;",
|
|
||||||
"select min(c1) from test.meters where t5 >5000 and t5<5100;",
|
|
||||||
"select c1 + c2 + c1 / c5 + c4 + c2 from test.t1;",
|
|
||||||
"select percentile(c1, 50) from test.t1;",
|
|
||||||
"select spread(c1) from test.t1 ;",
|
|
||||||
"select stddev(c1) from test.t1;",
|
|
||||||
"select sum(c1) from test.meters where t5 >5000 and t5<5100;",
|
|
||||||
"select top(c1, 2) from test.meters where t5 >5000 and t5<5100;"
|
|
||||||
"select twa(c4) from test.t1 where ts > 1500000001000 and ts < 1500000101000" ,
|
|
||||||
"select avg(c4) from test.meters where t5 >5000 and t5<5100;",
|
|
||||||
"select bottom(c4, 2) from test.t1 where t5 >5000 and t5<5100;",
|
|
||||||
"select diff(c4) from test.t1 where t5 >5000 and t5<5100;",
|
|
||||||
"select leastsquares(c4, 1, 1) from test.t1 ;",
|
|
||||||
"select max(c4) from test.meters where t5 >5000 and t5<5100;",
|
|
||||||
"select min(c4) from test.meters where t5 >5000 and t5<5100;",
|
|
||||||
"select c5 + c2 + c4 / c5 + c4 + c2 from test.t1 ;",
|
|
||||||
"select percentile(c5, 50) from test.t1;",
|
|
||||||
"select spread(c5) from test.t1 ;",
|
|
||||||
"select stddev(c5) from test.t1 where t5 >5000 and t5<5100;",
|
|
||||||
"select sum(c5) from test.meters where t5 >5000 and t5<5100;",
|
|
||||||
"select top(c5, 2) from test.meters where t5 >5000 and t5<5100;",
|
|
||||||
#all vnode
|
|
||||||
"select count(*) from test.meters where t5 >5000 and t5<5100",
|
|
||||||
"select max(c0),avg(c1) from test.meters where t5 >5000 and t5<5100",
|
|
||||||
"select sum(c5),avg(c1) from test.meters where t5 >5000 and t5<5100",
|
|
||||||
"select max(c0),min(c5) from test.meters where t5 >5000 and t5<5100",
|
|
||||||
"select min(c0),avg(c5) from test.meters where t5 >5000 and t5<5100",
|
|
||||||
# second supertable
|
|
||||||
"select count(*) from test.meters1 where t3 > 2;",
|
|
||||||
"select count(*) from test.meters1 where ts <> '2020-05-13 10:00:00.002';",
|
|
||||||
"select count(*) from test.meters where t7 like 'taos_1%';",
|
|
||||||
"select count(*) from test.meters where t7 like '_____2';",
|
|
||||||
"select count(*) from test.meters where t8 like '%思%';",
|
|
||||||
"select count(*) from test.meters1 interval(1n) order by ts desc;",
|
|
||||||
#"select max(c0) from test.meters1 group by tbname",
|
|
||||||
"select first(ts) from test.meters1 where t5 >5000 and t5<5100;",
|
|
||||||
"select last(ts) from test.meters1 where t5 >5000 and t5<5100;",
|
|
||||||
"select last_row(*) from test.meters1 ;",
|
|
||||||
"select twa(c1) from test.m1 where ts > 1500000001000 and ts < 1500000101000" ,
|
|
||||||
"select avg(c1) from test.meters1 where t5 >5000 and t5<5100;",
|
|
||||||
"select bottom(c1, 2) from test.m1 where t5 >5000 and t5<5100;",
|
|
||||||
"select diff(c1) from test.m1 ;",
|
|
||||||
"select leastsquares(c1, 1, 1) from test.m1 ;",
|
|
||||||
"select max(c1) from test.meters1 where t5 >5000 and t5<5100;",
|
|
||||||
"select min(c1) from test.meters1 where t5 >5000 and t5<5100;",
|
|
||||||
"select c1 + c2 + c1 / c0 + c2 from test.m1 ;",
|
|
||||||
"select percentile(c1, 50) from test.m1;",
|
|
||||||
"select spread(c1) from test.m1 ;",
|
|
||||||
"select stddev(c1) from test.m1;",
|
|
||||||
"select sum(c1) from test.meters1 where t5 >5000 and t5<5100;",
|
|
||||||
"select top(c1, 2) from test.meters1 where t5 >5000 and t5<5100;",
|
|
||||||
"select twa(c5) from test.m1 where ts > 1500000001000 and ts < 1500000101000" ,
|
|
||||||
"select avg(c5) from test.meters1 where t5 >5000 and t5<5100;",
|
|
||||||
"select bottom(c5, 2) from test.m1;",
|
|
||||||
"select diff(c5) from test.m1;",
|
|
||||||
"select leastsquares(c5, 1, 1) from test.m1 ;",
|
|
||||||
"select max(c5) from test.meters1 where t5 >5000 and t5<5100;",
|
|
||||||
"select min(c5) from test.meters1 where t5 >5000 and t5<5100;",
|
|
||||||
"select c5 + c2 + c4 / c5 + c0 from test.m1;",
|
|
||||||
"select percentile(c4, 50) from test.m1;",
|
|
||||||
"select spread(c4) from test.m1 ;",
|
|
||||||
"select stddev(c4) from test.m1;",
|
|
||||||
"select sum(c4) from test.meters1 where t5 >5100 and t5<5300;",
|
|
||||||
"select top(c4, 2) from test.meters1 where t5 >5100 and t5<5300;",
|
|
||||||
"select count(*) from test.meters1 where t5 >5100 and t5<5300",
|
|
||||||
#all vnode
|
|
||||||
"select count(*) from test.meters1 where t5 >5100 and t5<5300",
|
|
||||||
"select max(c0),avg(c1) from test.meters1 where t5 >5000 and t5<5100",
|
|
||||||
"select sum(c5),avg(c1) from test.meters1 where t5 >5000 and t5<5100",
|
|
||||||
"select max(c0),min(c5) from test.meters1 where t5 >5000 and t5<5100",
|
|
||||||
"select min(c0),avg(c5) from test.meters1 where t5 >5000 and t5<5100",
|
|
||||||
#join
|
|
||||||
# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t5 = meters1.t5",
|
|
||||||
# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t7 = meters1.t7",
|
|
||||||
# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8",
|
|
||||||
# "select meters.ts,meters1.c2 from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8"
|
|
||||||
]
|
]
|
||||||
|
where_list = ['_c0>now-10d',' <50'," like \'%a%\'"]
|
||||||
class ConcurrentInquiry:
|
class ConcurrentInquiry:
|
||||||
def initConnection(self):
|
def __init__(self,n_Therads=25,r_Therads=25):
|
||||||
self.numOfTherads = 50
|
self.n_numOfTherads = n_Therads
|
||||||
|
self.r_numOfTherads = r_Therads
|
||||||
self.ts=1500000001000
|
self.ts=1500000001000
|
||||||
|
self.dbname='test'
|
||||||
|
self.stb_list=[]
|
||||||
|
self.subtb_list=[]
|
||||||
|
self.stb_stru_list=[]
|
||||||
|
self.subtb_stru_list=[]
|
||||||
|
self.stb_tag_list=[]
|
||||||
|
self.subtb_tag_list=[]
|
||||||
|
|
||||||
def SetThreadsNum(self,num):
|
def SetThreadsNum(self,num):
|
||||||
self.numOfTherads=num
|
self.numOfTherads=num
|
||||||
def query_thread(self,threadID):
|
|
||||||
host = "10.211.55.14"
|
def ret_fcol(self,cl,sql): #返回结果的第一列
|
||||||
|
cl.execute(sql)
|
||||||
|
fcol_list=[]
|
||||||
|
for data in cl:
|
||||||
|
fcol_list.append(data[0])
|
||||||
|
return fcol_list
|
||||||
|
|
||||||
|
def r_stb_list(self,cl): #返回超级表列表
|
||||||
|
sql='show '+self.dbname+'.stables'
|
||||||
|
self.stb_list=self.ret_fcol(cl,sql)
|
||||||
|
|
||||||
|
def r_subtb_list(self,cl,stablename): #每个超级表返回2个子表
|
||||||
|
sql='select tbname from '+self.dbname+'.'+stablename+' limit 2;'
|
||||||
|
self.subtb_list+=self.ret_fcol(cl,sql)
|
||||||
|
|
||||||
|
def cal_struct(self,cl,tbname): #查看表结构
|
||||||
|
tb=[]
|
||||||
|
tag=[]
|
||||||
|
sql='describe '+self.dbname+'.'+tbname+';'
|
||||||
|
cl.execute(sql)
|
||||||
|
for data in cl:
|
||||||
|
if data[3]:
|
||||||
|
tag.append(data[0])
|
||||||
|
else:
|
||||||
|
tb.append(data[0])
|
||||||
|
return tb,tag
|
||||||
|
|
||||||
|
def r_stb_stru(self,cl): #获取所有超级表的表结构
|
||||||
|
for i in self.stb_list:
|
||||||
|
tb,tag=self.cal_struct(cl,i)
|
||||||
|
self.stb_stru_list.append(tb)
|
||||||
|
self.stb_tag_list.append(tag)
|
||||||
|
|
||||||
|
def r_subtb_stru(self,cl): #返回所有子表的表结构
|
||||||
|
for i in self.subtb_list:
|
||||||
|
tb,tag=self.cal_struct(cl,i)
|
||||||
|
self.subtb_stru_list.append(tb)
|
||||||
|
self.subtb_tag_list.append(tag)
|
||||||
|
|
||||||
|
def get_full(self): #获取所有的表、表结构
|
||||||
|
host = "127.0.0.1"
|
||||||
|
user = "root"
|
||||||
|
password = "taosdata"
|
||||||
|
conn = taos.connect(
|
||||||
|
host,
|
||||||
|
user,
|
||||||
|
password,
|
||||||
|
)
|
||||||
|
cl = conn.cursor()
|
||||||
|
self.r_stb_list(cl)
|
||||||
|
for i in self.stb_list:
|
||||||
|
self.r_subtb_list(cl,i)
|
||||||
|
self.r_stb_stru(cl)
|
||||||
|
self.r_subtb_stru(cl)
|
||||||
|
cl.close()
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
#query condition
|
||||||
|
def con_where(self,tlist):
|
||||||
|
l=[]
|
||||||
|
for i in range(random.randint(0,len(tlist))):
|
||||||
|
c = random.choice(where_list)
|
||||||
|
if c == '_c0>now-10d':
|
||||||
|
l.append(c)
|
||||||
|
else:
|
||||||
|
l.append(random.choice(tlist)+c)
|
||||||
|
return 'where '+random.choice([' and ',' or ']).join(l)
|
||||||
|
|
||||||
|
def con_interval(self,tlist):
|
||||||
|
return random.choice(['interval(10s)','interval(10d)','interval(1n)'])
|
||||||
|
|
||||||
|
def con_limit(self,tlist):
|
||||||
|
return random.choice(['limit 10','limit 10 offset 10','slimit 10','slimit 10 offset 10','limit 10 slimit 10','limit 10 offset 5 slimit 5 soffset 10'])
|
||||||
|
|
||||||
|
def con_fill(self,tlist):
|
||||||
|
return random.choice(['fill(null)','fill(prev)','fill(none)','fill(LINEAR)'])
|
||||||
|
|
||||||
|
def con_group(self,tlist):
|
||||||
|
return 'group by '+random.choice(tlist)
|
||||||
|
|
||||||
|
def con_order(self,tlist):
|
||||||
|
return 'order by '+random.choice(tlist)
|
||||||
|
|
||||||
|
def gen_query_sql(self): #生成查询语句
|
||||||
|
tbi=random.randint(0,len(self.subtb_list)+len(self.stb_list)) #随机决定查询哪张表
|
||||||
|
tbname=''
|
||||||
|
col_list=[]
|
||||||
|
tag_list=[]
|
||||||
|
is_stb=0
|
||||||
|
if tbi>len(self.stb_list) :
|
||||||
|
tbi=tbi-len(self.stb_list)
|
||||||
|
tbname=self.subtb_list[tbi-1]
|
||||||
|
col_list=self.subtb_stru_list[tbi-1]
|
||||||
|
tag_list=self.subtb_tag_list[tbi-1]
|
||||||
|
else:
|
||||||
|
tbname=self.stb_list[tbi-1]
|
||||||
|
col_list=self.stb_stru_list[tbi-1]
|
||||||
|
tag_list=self.stb_tag_list[tbi-1]
|
||||||
|
is_stb=1
|
||||||
|
tlist=col_list+tag_list
|
||||||
|
con_rand=random.randint(0,len(condition_list))
|
||||||
|
func_rand=random.randint(0,len(func_list))
|
||||||
|
col_rand=random.randint(0,len(col_list))
|
||||||
|
tag_rand=random.randint(0,len(tag_list))
|
||||||
|
t_rand=random.randint(0,len(tlist))
|
||||||
|
sql='select ' #select
|
||||||
|
random.shuffle(col_list)
|
||||||
|
random.shuffle(func_list)
|
||||||
|
sel_col_list=[]
|
||||||
|
col_rand=random.randint(0,len(col_list))
|
||||||
|
for i,j in zip(col_list[0:col_rand],func_list): #决定每个被查询col的函数
|
||||||
|
if j == 'leastsquares':
|
||||||
|
sel_col_list.append(j+'('+i+',1,1)')
|
||||||
|
elif j == 'top' or j == 'bottom' or j == 'percentile' or j == 'apercentile':
|
||||||
|
sel_col_list.append(j+'('+i+',1)')
|
||||||
|
else:
|
||||||
|
sel_col_list.append(j+'('+i+')')
|
||||||
|
sql=sql+','.join(sel_col_list)+' from '+random.choice(self.stb_list+self.subtb_list)+' ' #select col & func
|
||||||
|
con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill]
|
||||||
|
sel_con=random.sample(con_func,random.randint(0,len(con_func)))
|
||||||
|
sel_con_list=[]
|
||||||
|
for i in sel_con:
|
||||||
|
sel_con_list.append(i(tlist)) #获取对应的条件函数
|
||||||
|
sql+=' '.join(sel_con_list) # condition
|
||||||
|
print(sql)
|
||||||
|
return sql
|
||||||
|
|
||||||
|
def rest_query(self,sql): #rest 接口
|
||||||
|
host = "127.0.0.1"
|
||||||
|
user = "root"
|
||||||
|
password = "taosdata"
|
||||||
|
port =6041
|
||||||
|
url = "http://{}:{}/rest/sql".format(host, port )
|
||||||
|
try:
|
||||||
|
r = requests.post(url,
|
||||||
|
data = 'use test',
|
||||||
|
auth = HTTPBasicAuth('root', 'taosdata'))
|
||||||
|
r = requests.post(url,
|
||||||
|
data = sql,
|
||||||
|
auth = HTTPBasicAuth('root', 'taosdata'))
|
||||||
|
except:
|
||||||
|
print("REST API Failure (TODO: more info here)")
|
||||||
|
raise
|
||||||
|
rj = r.json()
|
||||||
|
if ('status' not in rj):
|
||||||
|
raise RuntimeError("No status in REST response")
|
||||||
|
|
||||||
|
if rj['status'] == 'error': # clearly reported error
|
||||||
|
if ('code' not in rj): # error without code
|
||||||
|
raise RuntimeError("REST error return without code")
|
||||||
|
errno = rj['code'] # May need to massage this in the future
|
||||||
|
# print("Raising programming error with REST return: {}".format(rj))
|
||||||
|
raise taos.error.ProgrammingError(
|
||||||
|
rj['desc'], errno) # todo: check existance of 'desc'
|
||||||
|
|
||||||
|
if rj['status'] != 'succ': # better be this
|
||||||
|
raise RuntimeError(
|
||||||
|
"Unexpected REST return status: {}".format(
|
||||||
|
rj['status']))
|
||||||
|
|
||||||
|
nRows = rj['rows'] if ('rows' in rj) else 0
|
||||||
|
return nRows
|
||||||
|
|
||||||
|
def query_thread_n(self,threadID): #使用原生python接口查询
|
||||||
|
host = "127.0.0.1"
|
||||||
user = "root"
|
user = "root"
|
||||||
password = "taosdata"
|
password = "taosdata"
|
||||||
conn = taos.connect(
|
conn = taos.connect(
|
||||||
|
@ -135,35 +225,59 @@ class ConcurrentInquiry:
|
||||||
print("Thread %d: starting" % threadID)
|
print("Thread %d: starting" % threadID)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
ran_query_sql=query_sql
|
|
||||||
random.shuffle(ran_query_sql)
|
|
||||||
for i in ran_query_sql:
|
|
||||||
print("Thread %d : %s"% (threadID,i))
|
|
||||||
try:
|
try:
|
||||||
|
sql=self.gen_query_sql()
|
||||||
|
print("sql is ",sql)
|
||||||
start = time.time()
|
start = time.time()
|
||||||
cl.execute(i)
|
cl.execute(sql)
|
||||||
cl.fetchall
|
cl.fetchall()
|
||||||
end = time.time()
|
end = time.time()
|
||||||
print("time cost :",end-start)
|
print("time cost :",end-start)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(
|
print(
|
||||||
"Failure thread%d, sql: %s,exception: %s" %
|
"Failure thread%d, sql: %s,exception: %s" %
|
||||||
(threadID, str(i),str(e)))
|
(threadID, str(sql),str(e)))
|
||||||
exit(-1)
|
#exit(-1)
|
||||||
|
|
||||||
|
|
||||||
print("Thread %d: finishing" % threadID)
|
print("Thread %d: finishing" % threadID)
|
||||||
|
|
||||||
|
def query_thread_r(self,threadID): #使用rest接口查询
|
||||||
|
print("Thread %d: starting" % threadID)
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
sql=self.gen_query_sql()
|
||||||
|
print("sql is ",sql)
|
||||||
|
start = time.time()
|
||||||
|
self.rest_query(sql)
|
||||||
|
end = time.time()
|
||||||
|
print("time cost :",end-start)
|
||||||
|
except Exception as e:
|
||||||
|
print(
|
||||||
|
"Failure thread%d, sql: %s,exception: %s" %
|
||||||
|
(threadID, str(sql),str(e)))
|
||||||
|
#exit(-1)
|
||||||
|
|
||||||
|
|
||||||
|
print("Thread %d: finishing" % threadID)
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
|
print(self.n_numOfTherads,self.r_numOfTherads)
|
||||||
threads = []
|
threads = []
|
||||||
for i in range(self.numOfTherads):
|
for i in range(self.n_numOfTherads):
|
||||||
thread = threading.Thread(target=self.query_thread, args=(i,))
|
thread = threading.Thread(target=self.query_thread_n, args=(i,))
|
||||||
threads.append(thread)
|
threads.append(thread)
|
||||||
thread.start()
|
thread.start()
|
||||||
|
for i in range(self.r_numOfTherads):
|
||||||
q = ConcurrentInquiry()
|
# for i in range(1):
|
||||||
q.initConnection()
|
thread = threading.Thread(target=self.query_thread_r, args=(i,))
|
||||||
|
threads.append(thread)
|
||||||
|
thread.start()
|
||||||
|
if len(sys.argv)>1:
|
||||||
|
q = ConcurrentInquiry(n_Therads=sys.argv[1],r_Therads=sys.argv[2])
|
||||||
|
else:
|
||||||
|
q = ConcurrentInquiry()
|
||||||
|
q.get_full()
|
||||||
|
#q.gen_query_sql()
|
||||||
q.run()
|
q.run()
|
||||||
|
|
|
@ -0,0 +1,169 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
import threading
|
||||||
|
import taos
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import random
|
||||||
|
# query sql
|
||||||
|
query_sql = [
|
||||||
|
# first supertable
|
||||||
|
"select count(*) from test.meters ;",
|
||||||
|
"select count(*) from test.meters where t3 > 2;",
|
||||||
|
"select count(*) from test.meters where ts <> '2020-05-13 10:00:00.002';",
|
||||||
|
"select count(*) from test.meters where t7 like 'taos_1%';",
|
||||||
|
"select count(*) from test.meters where t7 like '_____2';",
|
||||||
|
"select count(*) from test.meters where t8 like '%思%';",
|
||||||
|
"select count(*) from test.meters interval(1n) order by ts desc;",
|
||||||
|
#"select max(c0) from test.meters group by tbname",
|
||||||
|
"select first(ts) from test.meters where t5 >5000 and t5<5100;",
|
||||||
|
"select last(ts) from test.meters where t5 >5000 and t5<5100;",
|
||||||
|
"select last_row(*) from test.meters;",
|
||||||
|
"select twa(c1) from test.t1 where ts > 1500000001000 and ts < 1500000101000" ,
|
||||||
|
"select avg(c1) from test.meters where t5 >5000 and t5<5100;",
|
||||||
|
"select bottom(c1, 2) from test.t1;",
|
||||||
|
"select diff(c1) from test.t1;",
|
||||||
|
"select leastsquares(c1, 1, 1) from test.t1 ;",
|
||||||
|
"select max(c1) from test.meters where t5 >5000 and t5<5100;",
|
||||||
|
"select min(c1) from test.meters where t5 >5000 and t5<5100;",
|
||||||
|
"select c1 + c2 + c1 / c5 + c4 + c2 from test.t1;",
|
||||||
|
"select percentile(c1, 50) from test.t1;",
|
||||||
|
"select spread(c1) from test.t1 ;",
|
||||||
|
"select stddev(c1) from test.t1;",
|
||||||
|
"select sum(c1) from test.meters where t5 >5000 and t5<5100;",
|
||||||
|
"select top(c1, 2) from test.meters where t5 >5000 and t5<5100;"
|
||||||
|
"select twa(c4) from test.t1 where ts > 1500000001000 and ts < 1500000101000" ,
|
||||||
|
"select avg(c4) from test.meters where t5 >5000 and t5<5100;",
|
||||||
|
"select bottom(c4, 2) from test.t1 where t5 >5000 and t5<5100;",
|
||||||
|
"select diff(c4) from test.t1 where t5 >5000 and t5<5100;",
|
||||||
|
"select leastsquares(c4, 1, 1) from test.t1 ;",
|
||||||
|
"select max(c4) from test.meters where t5 >5000 and t5<5100;",
|
||||||
|
"select min(c4) from test.meters where t5 >5000 and t5<5100;",
|
||||||
|
"select c5 + c2 + c4 / c5 + c4 + c2 from test.t1 ;",
|
||||||
|
"select percentile(c5, 50) from test.t1;",
|
||||||
|
"select spread(c5) from test.t1 ;",
|
||||||
|
"select stddev(c5) from test.t1 where t5 >5000 and t5<5100;",
|
||||||
|
"select sum(c5) from test.meters where t5 >5000 and t5<5100;",
|
||||||
|
"select top(c5, 2) from test.meters where t5 >5000 and t5<5100;",
|
||||||
|
#all vnode
|
||||||
|
"select count(*) from test.meters where t5 >5000 and t5<5100",
|
||||||
|
"select max(c0),avg(c1) from test.meters where t5 >5000 and t5<5100",
|
||||||
|
"select sum(c5),avg(c1) from test.meters where t5 >5000 and t5<5100",
|
||||||
|
"select max(c0),min(c5) from test.meters where t5 >5000 and t5<5100",
|
||||||
|
"select min(c0),avg(c5) from test.meters where t5 >5000 and t5<5100",
|
||||||
|
# second supertable
|
||||||
|
"select count(*) from test.meters1 where t3 > 2;",
|
||||||
|
"select count(*) from test.meters1 where ts <> '2020-05-13 10:00:00.002';",
|
||||||
|
"select count(*) from test.meters where t7 like 'taos_1%';",
|
||||||
|
"select count(*) from test.meters where t7 like '_____2';",
|
||||||
|
"select count(*) from test.meters where t8 like '%思%';",
|
||||||
|
"select count(*) from test.meters1 interval(1n) order by ts desc;",
|
||||||
|
#"select max(c0) from test.meters1 group by tbname",
|
||||||
|
"select first(ts) from test.meters1 where t5 >5000 and t5<5100;",
|
||||||
|
"select last(ts) from test.meters1 where t5 >5000 and t5<5100;",
|
||||||
|
"select last_row(*) from test.meters1 ;",
|
||||||
|
"select twa(c1) from test.m1 where ts > 1500000001000 and ts < 1500000101000" ,
|
||||||
|
"select avg(c1) from test.meters1 where t5 >5000 and t5<5100;",
|
||||||
|
"select bottom(c1, 2) from test.m1 where t5 >5000 and t5<5100;",
|
||||||
|
"select diff(c1) from test.m1 ;",
|
||||||
|
"select leastsquares(c1, 1, 1) from test.m1 ;",
|
||||||
|
"select max(c1) from test.meters1 where t5 >5000 and t5<5100;",
|
||||||
|
"select min(c1) from test.meters1 where t5 >5000 and t5<5100;",
|
||||||
|
"select c1 + c2 + c1 / c0 + c2 from test.m1 ;",
|
||||||
|
"select percentile(c1, 50) from test.m1;",
|
||||||
|
"select spread(c1) from test.m1 ;",
|
||||||
|
"select stddev(c1) from test.m1;",
|
||||||
|
"select sum(c1) from test.meters1 where t5 >5000 and t5<5100;",
|
||||||
|
"select top(c1, 2) from test.meters1 where t5 >5000 and t5<5100;",
|
||||||
|
"select twa(c5) from test.m1 where ts > 1500000001000 and ts < 1500000101000" ,
|
||||||
|
"select avg(c5) from test.meters1 where t5 >5000 and t5<5100;",
|
||||||
|
"select bottom(c5, 2) from test.m1;",
|
||||||
|
"select diff(c5) from test.m1;",
|
||||||
|
"select leastsquares(c5, 1, 1) from test.m1 ;",
|
||||||
|
"select max(c5) from test.meters1 where t5 >5000 and t5<5100;",
|
||||||
|
"select min(c5) from test.meters1 where t5 >5000 and t5<5100;",
|
||||||
|
"select c5 + c2 + c4 / c5 + c0 from test.m1;",
|
||||||
|
"select percentile(c4, 50) from test.m1;",
|
||||||
|
"select spread(c4) from test.m1 ;",
|
||||||
|
"select stddev(c4) from test.m1;",
|
||||||
|
"select sum(c4) from test.meters1 where t5 >5100 and t5<5300;",
|
||||||
|
"select top(c4, 2) from test.meters1 where t5 >5100 and t5<5300;",
|
||||||
|
"select count(*) from test.meters1 where t5 >5100 and t5<5300",
|
||||||
|
#all vnode
|
||||||
|
"select count(*) from test.meters1 where t5 >5100 and t5<5300",
|
||||||
|
"select max(c0),avg(c1) from test.meters1 where t5 >5000 and t5<5100",
|
||||||
|
"select sum(c5),avg(c1) from test.meters1 where t5 >5000 and t5<5100",
|
||||||
|
"select max(c0),min(c5) from test.meters1 where t5 >5000 and t5<5100",
|
||||||
|
"select min(c0),avg(c5) from test.meters1 where t5 >5000 and t5<5100",
|
||||||
|
#join
|
||||||
|
# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t5 = meters1.t5",
|
||||||
|
# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t7 = meters1.t7",
|
||||||
|
# "select * from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8",
|
||||||
|
# "select meters.ts,meters1.c2 from meters,meters1 where meters.ts = meters1.ts and meters.t8 = meters1.t8"
|
||||||
|
]
|
||||||
|
|
||||||
|
class ConcurrentInquiry:
|
||||||
|
def initConnection(self):
|
||||||
|
self.numOfTherads = 50
|
||||||
|
self.ts=1500000001000
|
||||||
|
|
||||||
|
def SetThreadsNum(self,num):
|
||||||
|
self.numOfTherads=num
|
||||||
|
def query_thread(self,threadID):
|
||||||
|
host = "10.211.55.14"
|
||||||
|
user = "root"
|
||||||
|
password = "taosdata"
|
||||||
|
conn = taos.connect(
|
||||||
|
host,
|
||||||
|
user,
|
||||||
|
password,
|
||||||
|
)
|
||||||
|
cl = conn.cursor()
|
||||||
|
cl.execute("use test;")
|
||||||
|
|
||||||
|
print("Thread %d: starting" % threadID)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
ran_query_sql=query_sql
|
||||||
|
random.shuffle(ran_query_sql)
|
||||||
|
for i in ran_query_sql:
|
||||||
|
print("Thread %d : %s"% (threadID,i))
|
||||||
|
try:
|
||||||
|
start = time.time()
|
||||||
|
cl.execute(i)
|
||||||
|
cl.fetchall()
|
||||||
|
end = time.time()
|
||||||
|
print("time cost :",end-start)
|
||||||
|
except Exception as e:
|
||||||
|
print(
|
||||||
|
"Failure thread%d, sql: %s,exception: %s" %
|
||||||
|
(threadID, str(i),str(e)))
|
||||||
|
exit(-1)
|
||||||
|
|
||||||
|
|
||||||
|
print("Thread %d: finishing" % threadID)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
|
||||||
|
threads = []
|
||||||
|
for i in range(self.numOfTherads):
|
||||||
|
thread = threading.Thread(target=self.query_thread, args=(i,))
|
||||||
|
threads.append(thread)
|
||||||
|
thread.start()
|
||||||
|
|
||||||
|
q = ConcurrentInquiry()
|
||||||
|
q.initConnection()
|
||||||
|
q.run()
|
|
@ -202,6 +202,7 @@ python3 queryCount.py
|
||||||
python3 ./test.py -f query/queryGroupbyWithInterval.py
|
python3 ./test.py -f query/queryGroupbyWithInterval.py
|
||||||
python3 client/twoClients.py
|
python3 client/twoClients.py
|
||||||
python3 test.py -f query/queryInterval.py
|
python3 test.py -f query/queryInterval.py
|
||||||
|
python3 test.py -f query/queryFillTest.py
|
||||||
|
|
||||||
# tools
|
# tools
|
||||||
python3 test.py -f tools/taosdemo.py
|
python3 test.py -f tools/taosdemo.py
|
||||||
|
|
|
@ -0,0 +1,71 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import taos
|
||||||
|
from util.log import *
|
||||||
|
from util.cases import *
|
||||||
|
from util.sql import *
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor())
|
||||||
|
|
||||||
|
self.rowNum = 10
|
||||||
|
self.ts = 1537146000000
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
|
||||||
|
tdSql.execute('''create table test(ts timestamp, col1 int, col2 int) tags(loc nchar(20))''')
|
||||||
|
tdSql.execute("create table test1 using test tags('beijing')")
|
||||||
|
tdSql.execute("create table test2 using test tags('shanghai')")
|
||||||
|
for i in range(self.rowNum):
|
||||||
|
tdSql.execute("insert into test1 values(%d, %d, %d)" % (self.ts + i, i + 1, i + 1))
|
||||||
|
tdSql.execute("insert into test2 values(%d, %d, %d)" % (self.ts + i, i + 1, i + 1))
|
||||||
|
|
||||||
|
# arithmetic verifacation
|
||||||
|
tdSql.query("select 0.1 + 0.1 from test")
|
||||||
|
tdSql.checkRows(self.rowNum * 2)
|
||||||
|
for i in range(self.rowNum * 2):
|
||||||
|
tdSql.checkData(0, 0, 0.20000000)
|
||||||
|
|
||||||
|
tdSql.query("select 4 * avg(col1) from test")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, 22)
|
||||||
|
|
||||||
|
tdSql.query("select 4 * sum(col1) from test")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, 440)
|
||||||
|
|
||||||
|
tdSql.query("select 4 * avg(col1) * sum(col2) from test")
|
||||||
|
tdSql.checkRows(1)
|
||||||
|
tdSql.checkData(0, 0, 2420)
|
||||||
|
|
||||||
|
tdSql.query("select 4 * avg(col1) * sum(col2) from test group by loc")
|
||||||
|
tdSql.checkRows(2)
|
||||||
|
tdSql.checkData(0, 0, 1210)
|
||||||
|
tdSql.checkData(1, 0, 1210)
|
||||||
|
|
||||||
|
tdSql.error("select avg(col1 * 2)from test group by loc")
|
||||||
|
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -5,7 +5,7 @@ GREEN='\033[1;32m'
|
||||||
GREEN_DARK='\033[0;32m'
|
GREEN_DARK='\033[0;32m'
|
||||||
GREEN_UNDERLINE='\033[4;32m'
|
GREEN_UNDERLINE='\033[4;32m'
|
||||||
NC='\033[0m'
|
NC='\033[0m'
|
||||||
nohup /root/TDinternal/debug/build/bin/taosd -c /root/TDinternal/community/sim/dnode1/cfg >/dev/null &
|
nohup /var/lib/jenkins/workspace/TDinternal/debug/build/bin/taosd -c /var/lib/jenkins/workspace/TDinternal/community/sim/dnode1/cfg >/dev/null &
|
||||||
./crash_gen.sh --valgrind -p -t 10 -s 100 -b 4
|
./crash_gen.sh --valgrind -p -t 10 -s 100 -b 4
|
||||||
pidof taosd|xargs kill
|
pidof taosd|xargs kill
|
||||||
grep 'start to execute\|ERROR SUMMARY' valgrind.err|grep -v 'grep'|uniq|tee crash_gen_mem_err.log
|
grep 'start to execute\|ERROR SUMMARY' valgrind.err|grep -v 'grep'|uniq|tee crash_gen_mem_err.log
|
||||||
|
|
|
@ -15,25 +15,28 @@ import requests
|
||||||
import threading
|
import threading
|
||||||
import random
|
import random
|
||||||
import time
|
import time
|
||||||
|
import argparse
|
||||||
|
|
||||||
class RestfulInsert:
|
class RestfulInsert:
|
||||||
def init(self):
|
def __init__(self, host, dbname, threads, tables, records, batchSize, tbNamePerfix, outOfOrder):
|
||||||
self.header = {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='}
|
self.header = {'Authorization': 'Basic cm9vdDp0YW9zZGF0YQ=='}
|
||||||
self.url = "http://127.0.0.1:6041/rest/sql"
|
self.url = "http://%s:6041/rest/sql" % host
|
||||||
self.ts = 1500000000000
|
self.ts = 1500000000000
|
||||||
self.numOfThreads = 20
|
self.dbname = dbname
|
||||||
self.numOfTables = 10000
|
self.numOfThreads = threads
|
||||||
self.recordsPerTable = 10000
|
self.numOfTables = tables
|
||||||
self.batchSize = 1000
|
self.recordsPerTable = records
|
||||||
self.tableNamePerfix = 't'
|
self.batchSize = batchSize
|
||||||
|
self.tableNamePerfix = tbNamePerfix
|
||||||
|
self.outOfOrder = outOfOrder
|
||||||
|
|
||||||
def createTable(self, threadID):
|
def createTable(self, threadID):
|
||||||
tablesPerThread = int (self.numOfTables / self.numOfThreads)
|
tablesPerThread = int (self.numOfTables / self.numOfThreads)
|
||||||
print("create table %d to %d" % (tablesPerThread * threadID, tablesPerThread * (threadID + 1) - 1))
|
print("create table %d to %d" % (tablesPerThread * threadID, tablesPerThread * (threadID + 1) - 1))
|
||||||
for i in range(tablesPerThread):
|
for i in range(tablesPerThread):
|
||||||
tableID = threadID * tablesPerThread
|
tableID = threadID * tablesPerThread
|
||||||
name = 'beijing' if tableID % 2 == 0 else 'shanghai'
|
name = 'beijing' if tableID % 2 == 0 else 'shanghai'
|
||||||
data = "create table test.%s%d using test.meters tags(%d, '%s')" % (self.tableNamePerfix, tableID + i, tableID + i, name)
|
data = "create table %s.%s%d using %s.meters tags(%d, '%s')" % (self.dbname, self.tableNamePerfix, tableID + i, self.dbname, tableID + i, name)
|
||||||
requests.post(self.url, data, headers = self.header)
|
requests.post(self.url, data, headers = self.header)
|
||||||
|
|
||||||
def insertData(self, threadID):
|
def insertData(self, threadID):
|
||||||
|
@ -43,17 +46,42 @@ class RestfulInsert:
|
||||||
tableID = i + threadID * tablesPerThread
|
tableID = i + threadID * tablesPerThread
|
||||||
start = self.ts
|
start = self.ts
|
||||||
for j in range(int(self.recordsPerTable / self.batchSize)):
|
for j in range(int(self.recordsPerTable / self.batchSize)):
|
||||||
data = "insert into test.%s%d values" % (self.tableNamePerfix, tableID)
|
data = "insert into %s.%s%d values" % (self.dbname, self.tableNamePerfix, tableID)
|
||||||
|
values = []
|
||||||
for k in range(self.batchSize):
|
for k in range(self.batchSize):
|
||||||
data += "(%d, %d, %d, %d)" % (start + j * self.batchSize + k, random.randint(1, 100), random.randint(1, 100), random.randint(1, 100))
|
data += "(%d, %d, %d, %d)" % (start + j * self.batchSize + k, random.randint(1, 100), random.randint(1, 100), random.randint(1, 100))
|
||||||
|
requests.post(self.url, data, headers = self.header)
|
||||||
|
|
||||||
|
def insertUnlimitedData(self, threadID):
|
||||||
|
print("thread %d started" % threadID)
|
||||||
|
tablesPerThread = int (self.numOfTables / self.numOfThreads)
|
||||||
|
while True:
|
||||||
|
i = 0
|
||||||
|
start = self.ts
|
||||||
|
|
||||||
|
for i in range(tablesPerThread):
|
||||||
|
tableID = i + threadID * tablesPerThread
|
||||||
|
|
||||||
|
data = "insert into %s.%s%d values" % (self.dbname, self.tableNamePerfix, tableID)
|
||||||
|
values = []
|
||||||
|
for k in range(self.batchSize):
|
||||||
|
values.append("(%d, %d, %d, %d)" % (start + j * self.batchSize + k, random.randint(1, 100), random.randint(1, 100), random.randint(1, 100)))
|
||||||
|
|
||||||
|
if(self.outOfOrder == False):
|
||||||
|
for k in range(len(values)):
|
||||||
|
data += values[k]
|
||||||
|
else:
|
||||||
|
random.shuffle(values)
|
||||||
|
for k in range(len(values)):
|
||||||
|
data += values[k]
|
||||||
requests.post(self.url, data, headers = self.header)
|
requests.post(self.url, data, headers = self.header)
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
data = "drop database if exists test"
|
data = "drop database if exists %s" % self.dbname
|
||||||
requests.post(self.url, data, headers = self.header)
|
requests.post(self.url, data, headers = self.header)
|
||||||
data = "create database test"
|
data = "create database %s" % self.dbname
|
||||||
requests.post(self.url, data, headers = self.header)
|
requests.post(self.url, data, headers = self.header)
|
||||||
data = "create table test.meters(ts timestamp, f1 int, f2 int, f3 int) tags(id int, loc nchar(20))"
|
data = "create table %s.meters(ts timestamp, f1 int, f2 int, f3 int) tags(id int, loc nchar(20))" % self.dbname
|
||||||
requests.post(self.url, data, headers = self.header)
|
requests.post(self.url, data, headers = self.header)
|
||||||
|
|
||||||
threads = []
|
threads = []
|
||||||
|
@ -70,7 +98,10 @@ class RestfulInsert:
|
||||||
threads = []
|
threads = []
|
||||||
startTime = time.time()
|
startTime = time.time()
|
||||||
for i in range(self.numOfThreads):
|
for i in range(self.numOfThreads):
|
||||||
thread = threading.Thread(target=self.insertData, args=(i,))
|
if(self.recordsPerTable != -1):
|
||||||
|
thread = threading.Thread(target=self.insertData, args=(i,))
|
||||||
|
else:
|
||||||
|
thread = threading.Thread(target=self.insertUnlimitedData, args=(i,))
|
||||||
thread.start()
|
thread.start()
|
||||||
threads.append(thread)
|
threads.append(thread)
|
||||||
|
|
||||||
|
@ -78,6 +109,62 @@ class RestfulInsert:
|
||||||
threads[i].join()
|
threads[i].join()
|
||||||
print("inserting %d records takes %d seconds" % (self.numOfTables * self.recordsPerTable, (time.time() - startTime)))
|
print("inserting %d records takes %d seconds" % (self.numOfTables * self.recordsPerTable, (time.time() - startTime)))
|
||||||
|
|
||||||
ri = RestfulInsert()
|
parser = argparse.ArgumentParser()
|
||||||
ri.init()
|
parser.add_argument(
|
||||||
|
'-H',
|
||||||
|
'--host-name',
|
||||||
|
action='store',
|
||||||
|
default='127.0.0.1',
|
||||||
|
type=str,
|
||||||
|
help='host name to be connected (default: 127.0.0.1)')
|
||||||
|
parser.add_argument(
|
||||||
|
'-d',
|
||||||
|
'--db-name',
|
||||||
|
action='store',
|
||||||
|
default='test',
|
||||||
|
type=str,
|
||||||
|
help='Database name to be created (default: test)')
|
||||||
|
parser.add_argument(
|
||||||
|
'-t',
|
||||||
|
'--number-of-threads',
|
||||||
|
action='store',
|
||||||
|
default=10,
|
||||||
|
type=int,
|
||||||
|
help='Number of threads to create tables and insert datas (default: 10)')
|
||||||
|
parser.add_argument(
|
||||||
|
'-T',
|
||||||
|
'--number-of-tables',
|
||||||
|
action='store',
|
||||||
|
default=1000,
|
||||||
|
type=int,
|
||||||
|
help='Number of tables to be created (default: 1000)')
|
||||||
|
parser.add_argument(
|
||||||
|
'-r',
|
||||||
|
'--number-of-records',
|
||||||
|
action='store',
|
||||||
|
default=1000,
|
||||||
|
type=int,
|
||||||
|
help='Number of record to be created for each table (default: 1000, -1 for unlimited records)')
|
||||||
|
parser.add_argument(
|
||||||
|
'-s',
|
||||||
|
'--batch-size',
|
||||||
|
action='store',
|
||||||
|
default='1000',
|
||||||
|
type=int,
|
||||||
|
help='Number of tables to be created (default: 1000)')
|
||||||
|
parser.add_argument(
|
||||||
|
'-p',
|
||||||
|
'--table-name-prefix',
|
||||||
|
action='store',
|
||||||
|
default='t',
|
||||||
|
type=str,
|
||||||
|
help='Number of tables to be created (default: 1000)')
|
||||||
|
parser.add_argument(
|
||||||
|
'-o',
|
||||||
|
'--out-of-order',
|
||||||
|
action='store_true',
|
||||||
|
help='The order of test data (default: False)')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
ri = RestfulInsert(args.host_name, args.db_name, args.number_of_threads, args.number_of_tables, args.number_of_records, args.batch_size, args.table_name_prefix, args.out_of_order)
|
||||||
ri.run()
|
ri.run()
|
|
@ -0,0 +1,68 @@
|
||||||
|
###################################################################
|
||||||
|
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This file is proprietary and confidential to TAOS Technologies.
|
||||||
|
# No part of this file may be reproduced, stored, transmitted,
|
||||||
|
# disclosed or used in any form or by any means other than as
|
||||||
|
# expressly provided by the written permission from Jianhui Tao
|
||||||
|
#
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import taos
|
||||||
|
from util.log import tdLog
|
||||||
|
from util.cases import tdCases
|
||||||
|
from util.sql import tdSql
|
||||||
|
|
||||||
|
|
||||||
|
class TDTestCase:
|
||||||
|
def init(self, conn, logSql):
|
||||||
|
tdLog.debug("start to execute %s" % __file__)
|
||||||
|
tdSql.init(conn.cursor(), logSql)
|
||||||
|
|
||||||
|
self.ts = 1537146000000
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
tdSql.prepare()
|
||||||
|
|
||||||
|
print("==============step1")
|
||||||
|
tdSql.execute(
|
||||||
|
"create table if not exists stb (ts timestamp, col1 int, col2 int, col3 int) tags(loc nchar(20), id int)")
|
||||||
|
|
||||||
|
currTs = self.ts
|
||||||
|
|
||||||
|
for i in range(100):
|
||||||
|
sql = "create table tb%d using stb tags('city%d', 1)" % (i, i)
|
||||||
|
tdSql.execute(sql)
|
||||||
|
|
||||||
|
sql = "insert into tb%d values" % i
|
||||||
|
for j in range(5):
|
||||||
|
val = 1 + j
|
||||||
|
sql += "(%d, %d, %d, %d)" % (currTs, val, val, val)
|
||||||
|
currTs += 1000000
|
||||||
|
tdSql.execute(sql)
|
||||||
|
|
||||||
|
tdSql.query("select first(col1) - avg(col1) from stb where ts > '2018-09-17 08:00:00.000' and ts < '2018-09-23 04:36:40.000' interval(1h)")
|
||||||
|
tdSql.checkRows(139)
|
||||||
|
|
||||||
|
tdSql.query("select first(col1) - avg(col1) from stb where ts > '2018-09-17 08:00:00.000' and ts < '2018-09-23 04:36:40.000' interval(1h) fill(null)")
|
||||||
|
tdSql.checkRows(141)
|
||||||
|
tdSql.checkData(0, 1, None)
|
||||||
|
tdSql.checkData(140, 1, None)
|
||||||
|
|
||||||
|
tdSql.query("select max(col1) - min(col1) from stb where ts > '2018-09-17 08:00:00.000' and ts < '2018-09-23 04:36:40.000' and id = 1 group by loc, id")
|
||||||
|
rows = tdSql.queryRows
|
||||||
|
|
||||||
|
tdSql.query("select spread(col1) from stb where ts > '2018-09-17 08:00:00.000' and ts < '2018-09-23 04:36:40.000' and id = 1 group by loc, id")
|
||||||
|
tdSql.checkRows(rows)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
tdSql.close()
|
||||||
|
tdLog.success("%s successfully executed" % __file__)
|
||||||
|
|
||||||
|
|
||||||
|
tdCases.addWindows(__file__, TDTestCase())
|
||||||
|
tdCases.addLinux(__file__, TDTestCase())
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue