Merge branch 'develop' of https://github.com/taosdata/TDengine into develop
This commit is contained in:
commit
d16c70d032
|
@ -4,6 +4,9 @@
|
|||
[submodule "src/connector/grafanaplugin"]
|
||||
path = src/connector/grafanaplugin
|
||||
url = https://github.com/taosdata/grafanaplugin
|
||||
[submodule "tests/examples/rust"]
|
||||
path = tests/examples/rust
|
||||
url = https://github.com/songtianyi/tdengine-rust-bindings.git
|
||||
[submodule "src/connector/hivemq-tdengine-extension"]
|
||||
path = src/connector/hivemq-tdengine-extension
|
||||
url = https://github.com/huskar-t/hivemq-tdengine-extension.git
|
||||
url = https://github.com/huskar-t/hivemq-tdengine-extension.git
|
|
@ -4,7 +4,7 @@ PROJECT(TDengine)
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "2.0.6.0")
|
||||
SET(TD_VER_NUMBER "2.0.7.0")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -48,7 +48,7 @@ cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_pat
|
|||
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
|
||||
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
|
||||
cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin
|
||||
#cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
|
||||
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
|
||||
|
|
|
@ -58,7 +58,7 @@ cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/scri
|
|||
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin
|
||||
#cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
|
||||
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
|
||||
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
|
||||
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
|
||||
log_dir=$1
|
||||
result_file=$2
|
||||
|
||||
if [ ! -n "$1" ];then
|
||||
echo "Pleas input the director of taosdlog."
|
||||
echo "usage: ./get_client.sh <taosdlog directory> <result file>"
|
||||
exit 1
|
||||
else
|
||||
log_dir=$1
|
||||
fi
|
||||
|
||||
if [ ! -n "$2" ];then
|
||||
result_file=clientInfo.txt
|
||||
else
|
||||
result_file=$2
|
||||
fi
|
||||
|
||||
grep "new TCP connection" ${log_dir}/taosdlog.* | sed -e "s/0x.* from / /"|sed -e "s/,.*$//"|sed -e "s/:[0-9]*$//"|sort -r|uniq -f 2|sort -k 3 -r|uniq -f 2 > ${result_file}
|
|
@ -45,8 +45,7 @@ if [ "$osType" != "Darwin" ]; then
|
|||
strip ${build_dir}/bin/taos
|
||||
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
|
||||
else
|
||||
#bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh"
|
||||
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh"
|
||||
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh"
|
||||
fi
|
||||
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
||||
else
|
||||
|
|
|
@ -76,8 +76,10 @@ if [ "$osType" != "Darwin" ]; then
|
|||
else
|
||||
cp ${build_dir}/bin/taos ${install_dir}/bin/power
|
||||
cp ${script_dir}/remove_power.sh ${install_dir}/bin
|
||||
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
|
||||
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
|
||||
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
|
||||
cp ${script_dir}/set_core.sh ${install_dir}/bin
|
||||
cp ${script_dir}/get_client.sh ${install_dir}/bin
|
||||
fi
|
||||
else
|
||||
cp ${bin_files} ${install_dir}/bin
|
||||
|
|
|
@ -36,8 +36,7 @@ if [ "$pagMode" == "lite" ]; then
|
|||
strip ${build_dir}/bin/taos
|
||||
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
|
||||
else
|
||||
#bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh"
|
||||
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh"
|
||||
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh"
|
||||
fi
|
||||
|
||||
lib_files="${build_dir}/lib/libtaos.so.${version}"
|
||||
|
|
|
@ -77,8 +77,10 @@ else
|
|||
cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
|
||||
cp ${script_dir}/remove_power.sh ${install_dir}/bin
|
||||
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
|
||||
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
|
||||
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
|
||||
cp ${script_dir}/set_core.sh ${install_dir}/bin
|
||||
cp ${script_dir}/get_client.sh ${install_dir}/bin
|
||||
fi
|
||||
chmod a+x ${install_dir}/bin/* || :
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
name: tdengine
|
||||
base: core18
|
||||
version: '2.0.6.0'
|
||||
version: '2.0.7.0'
|
||||
icon: snap/gui/t-dengine.svg
|
||||
summary: an open-source big data platform designed and optimized for IoT.
|
||||
description: |
|
||||
|
|
|
@ -62,6 +62,7 @@ typedef struct SLocalReducer {
|
|||
bool hasUnprocessedRow;
|
||||
tOrderDescriptor * pDesc;
|
||||
SColumnModel * resColModel;
|
||||
SColumnModel* finalModel;
|
||||
tExtMemBuffer ** pExtMemBuffer; // disk-based buffer
|
||||
SFillInfo* pFillInfo; // interpolation support structure
|
||||
char* pFinalRes; // result data after interpo
|
||||
|
@ -74,7 +75,8 @@ typedef struct SLocalReducer {
|
|||
typedef struct SRetrieveSupport {
|
||||
tExtMemBuffer ** pExtMemBuffer; // for build loser tree
|
||||
tOrderDescriptor *pOrderDescriptor;
|
||||
SColumnModel * pFinalColModel; // colModel for final result
|
||||
SColumnModel* pFinalColModel; // colModel for final result
|
||||
SColumnModel* pFFColModel;
|
||||
int32_t subqueryIndex; // index of current vnode in vnode list
|
||||
SSqlObj * pParentSql;
|
||||
tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to
|
||||
|
@ -96,7 +98,7 @@ int32_t tscFlushTmpBuffer(tExtMemBuffer *pMemoryBuf, tOrderDescriptor *pDesc, tF
|
|||
* create local reducer to launch the second-stage reduce process at client site
|
||||
*/
|
||||
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
|
||||
SColumnModel *finalModel, SSqlObj* pSql);
|
||||
SColumnModel *finalModel, SColumnModel *pFFModel, SSqlObj* pSql);
|
||||
|
||||
void tscDestroyLocalReducer(SSqlObj *pSql);
|
||||
|
||||
|
|
|
@ -41,6 +41,8 @@ int32_t tscHandleInsertRetry(SSqlObj* pSql);
|
|||
void tscBuildResFromSubqueries(SSqlObj *pSql);
|
||||
TAOS_ROW doSetResultRowData(SSqlObj *pSql, bool finalResult);
|
||||
|
||||
char *getArithemicInputSrc(void *param, const char *name, int32_t colId);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -125,6 +125,7 @@ int32_t tscGetDataBlockFromList(void* pHashList, SArray* pDataBlockList, int64_t
|
|||
*/
|
||||
bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
|
||||
bool tscIsTWAQuery(SQueryInfo* pQueryInfo);
|
||||
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
|
||||
|
||||
bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex);
|
||||
bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
|
||||
|
@ -158,7 +159,7 @@ SInternalField* tscFieldInfoGetInternalField(SFieldInfo* pFieldInfo, int32_t ind
|
|||
TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index);
|
||||
|
||||
void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo);
|
||||
void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo);
|
||||
void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo);
|
||||
|
||||
int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index);
|
||||
void tscFieldInfoClear(SFieldInfo* pFieldInfo);
|
||||
|
@ -167,15 +168,15 @@ static FORCE_INLINE int32_t tscNumOfFields(SQueryInfo* pQueryInfo) { return pQue
|
|||
|
||||
int32_t tscFieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2);
|
||||
|
||||
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex);
|
||||
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes);
|
||||
|
||||
int32_t tscGetResRowLength(SArray* pExprList);
|
||||
|
||||
SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
|
||||
int16_t size, int16_t interSize, bool isTagCol);
|
||||
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol);
|
||||
|
||||
SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
|
||||
int16_t size, int16_t interSize, bool isTagCol);
|
||||
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol);
|
||||
|
||||
SSqlExpr* tscSqlExprUpdate(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, int16_t srcColumnIndex, int16_t type,
|
||||
int16_t size);
|
||||
|
|
|
@ -136,6 +136,7 @@ typedef struct SSqlExpr {
|
|||
int16_t numOfParams; // argument value of each function
|
||||
tVariant param[3]; // parameters are not more than 3
|
||||
int32_t offset; // sub result column value of arithmetic expression.
|
||||
int16_t resColId; // result column id
|
||||
} SSqlExpr;
|
||||
|
||||
typedef struct SColumnIndex {
|
||||
|
@ -251,6 +252,7 @@ typedef struct SQueryInfo {
|
|||
int64_t clauseLimit; // limit for current sub clause
|
||||
int64_t prjOffset; // offset value in the original sql expression, only applied at client side
|
||||
int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX
|
||||
int16_t resColumnId; // result column id
|
||||
} SQueryInfo;
|
||||
|
||||
typedef struct {
|
||||
|
@ -462,17 +464,16 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
|
|||
|
||||
int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo);
|
||||
|
||||
static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) {
|
||||
static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex, int32_t offset) {
|
||||
SInternalField* pInfo = (SInternalField*) TARRAY_GET_ELEM(pFieldInfo->internalField, columnIndex);
|
||||
assert(pInfo->pSqlExpr != NULL);
|
||||
|
||||
int32_t type = pInfo->pSqlExpr->resType;
|
||||
int32_t bytes = pInfo->pSqlExpr->resBytes;
|
||||
int32_t type = pInfo->field.type;
|
||||
int32_t bytes = pInfo->field.bytes;
|
||||
|
||||
char* pData = pRes->data + (int32_t)(pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row);
|
||||
char* pData = pRes->data + (int32_t)(offset * pRes->numOfRows + bytes * pRes->row);
|
||||
|
||||
// user defined constant value output columns
|
||||
if (TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
|
||||
if (pInfo->pSqlExpr != NULL && TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
|
||||
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
|
||||
pData = pInfo->pSqlExpr->param[1].pz;
|
||||
pRes->length[columnIndex] = pInfo->pSqlExpr->param[1].nLen;
|
||||
|
@ -517,6 +518,7 @@ extern SRpcCorEpSet tscMgmtEpSet;
|
|||
extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo);
|
||||
|
||||
void tscBuildVgroupTableInfo(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SArray* tables);
|
||||
int16_t getNewResColId(SQueryInfo* pQueryInfo);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -351,7 +351,7 @@ void tscProcessFetchRow(SSchedMsg *pMsg) {
|
|||
SInternalField* pSup = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
|
||||
|
||||
if (pSup->pSqlExpr != NULL) {
|
||||
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i);
|
||||
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i, 0);
|
||||
} else {
|
||||
// todo add
|
||||
}
|
||||
|
|
|
@ -2695,17 +2695,18 @@ static void apercentile_func_second_merge(SQLFunctionCtx *pCtx) {
|
|||
}
|
||||
|
||||
SAPercentileInfo *pOutput = getAPerctInfo(pCtx);
|
||||
SHistogramInfo * pHisto = pOutput->pHisto;
|
||||
SHistogramInfo *pHisto = pOutput->pHisto;
|
||||
|
||||
if (pHisto->numOfElems <= 0) {
|
||||
memcpy(pHisto, pInput->pHisto, sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1));
|
||||
pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo));
|
||||
} else {
|
||||
//TODO(dengyihao): avoid memcpy
|
||||
pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo));
|
||||
|
||||
SHistogramInfo *pRes = tHistogramMerge(pHisto, pInput->pHisto, MAX_HISTOGRAM_BIN);
|
||||
tHistogramDestroy(&pOutput->pHisto);
|
||||
pOutput->pHisto = pRes;
|
||||
memcpy(pHisto, pRes, sizeof(SHistogramInfo) + sizeof(SHistBin) * MAX_HISTOGRAM_BIN);
|
||||
pHisto->elems = (SHistBin*) ((char *)pHisto + sizeof(SHistogramInfo));
|
||||
tHistogramDestroy(&pRes);
|
||||
}
|
||||
|
||||
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
|
||||
|
|
|
@ -162,7 +162,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
|
|||
|
||||
SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
|
||||
(TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, (TSDB_COL_NAME_LEN - 1), false);
|
||||
(TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE, -1000, (TSDB_COL_NAME_LEN - 1), false);
|
||||
|
||||
rowLen += ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE);
|
||||
|
||||
|
@ -172,7 +172,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
|
|||
|
||||
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(typeColLength + VARSTR_HEADER_SIZE),
|
||||
typeColLength, false);
|
||||
-1000, typeColLength, false);
|
||||
|
||||
rowLen += typeColLength + VARSTR_HEADER_SIZE;
|
||||
|
||||
|
@ -182,7 +182,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
|
|||
|
||||
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_INT, sizeof(int32_t),
|
||||
sizeof(int32_t), false);
|
||||
-1000, sizeof(int32_t), false);
|
||||
|
||||
rowLen += sizeof(int32_t);
|
||||
|
||||
|
@ -192,7 +192,7 @@ static int32_t tscBuildTableSchemaResultFields(SSqlObj *pSql, int32_t numOfCols,
|
|||
|
||||
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, (int16_t)(noteColLength + VARSTR_HEADER_SIZE),
|
||||
noteColLength, false);
|
||||
-1000, noteColLength, false);
|
||||
|
||||
rowLen += noteColLength + VARSTR_HEADER_SIZE;
|
||||
return rowLen;
|
||||
|
@ -407,8 +407,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
|
|||
}
|
||||
|
||||
SInternalField* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
|
||||
f.bytes, f.bytes - VARSTR_HEADER_SIZE, false);
|
||||
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY, f.bytes, -1000, f.bytes - VARSTR_HEADER_SIZE, false);
|
||||
|
||||
rowLen += f.bytes;
|
||||
|
||||
|
@ -422,7 +421,7 @@ static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const
|
|||
|
||||
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
|
||||
(int16_t)(ddlLen + VARSTR_HEADER_SIZE), ddlLen, false);
|
||||
(int16_t)(ddlLen + VARSTR_HEADER_SIZE), -1000, ddlLen, false);
|
||||
|
||||
rowLen += ddlLen + VARSTR_HEADER_SIZE;
|
||||
|
||||
|
@ -619,7 +618,11 @@ static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName,
|
|||
for (int32_t i = 0; i < numOfRows; ++i) {
|
||||
uint8_t type = pSchema[i].type;
|
||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes);
|
||||
int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE;
|
||||
if (type == TSDB_DATA_TYPE_NCHAR) {
|
||||
bytes = bytes/TSDB_NCHAR_SIZE;
|
||||
}
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName, bytes);
|
||||
} else {
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName);
|
||||
}
|
||||
|
@ -642,7 +645,11 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
|
|||
for (int32_t i = 0; i < numOfRows; ++i) {
|
||||
uint8_t type = pSchema[i].type;
|
||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes);
|
||||
int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE;
|
||||
if (type == TSDB_DATA_TYPE_NCHAR) {
|
||||
bytes = bytes/TSDB_NCHAR_SIZE;
|
||||
}
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes);
|
||||
} else {
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName);
|
||||
}
|
||||
|
@ -652,7 +659,11 @@ static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName,
|
|||
for (int32_t i = numOfRows; i < totalRows; i++) {
|
||||
uint8_t type = pSchema[i].type;
|
||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes);
|
||||
int32_t bytes = pSchema[i].bytes - VARSTR_HEADER_SIZE;
|
||||
if (type == TSDB_DATA_TYPE_NCHAR) {
|
||||
bytes = bytes/TSDB_NCHAR_SIZE;
|
||||
}
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName, bytes);
|
||||
} else {
|
||||
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName);
|
||||
}
|
||||
|
|
|
@ -13,14 +13,15 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "tscLocalMerge.h"
|
||||
#include "tscSubquery.h"
|
||||
#include "os.h"
|
||||
#include "qAst.h"
|
||||
#include "tlosertree.h"
|
||||
#include "tscLog.h"
|
||||
#include "tscUtil.h"
|
||||
#include "tschemautil.h"
|
||||
#include "tsclient.h"
|
||||
#include "tutil.h"
|
||||
#include "tscLog.h"
|
||||
#include "tscLocalMerge.h"
|
||||
|
||||
typedef struct SCompareParam {
|
||||
SLocalDataSource **pLocalData;
|
||||
|
@ -29,6 +30,8 @@ typedef struct SCompareParam {
|
|||
int32_t groupOrderType;
|
||||
} SCompareParam;
|
||||
|
||||
static void doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize);
|
||||
|
||||
int32_t treeComparator(const void *pLeft, const void *pRight, void *param) {
|
||||
int32_t pLeftIdx = *(int32_t *)pLeft;
|
||||
int32_t pRightIdx = *(int32_t *)pRight;
|
||||
|
@ -132,28 +135,41 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc
|
|||
}
|
||||
|
||||
static SFillColInfo* createFillColInfo(SQueryInfo* pQueryInfo) {
|
||||
int32_t numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
|
||||
int32_t numOfCols = (int32_t)tscNumOfFields(pQueryInfo);
|
||||
int32_t offset = 0;
|
||||
|
||||
SFillColInfo* pFillCol = calloc(numOfCols, sizeof(SFillColInfo));
|
||||
for(int32_t i = 0; i < numOfCols; ++i) {
|
||||
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
|
||||
|
||||
pFillCol[i].col.bytes = pExpr->resBytes;
|
||||
pFillCol[i].col.type = (int8_t)pExpr->resType;
|
||||
pFillCol[i].col.colId = pExpr->colInfo.colId;
|
||||
pFillCol[i].flag = pExpr->colInfo.flag;
|
||||
pFillCol[i].col.offset = offset;
|
||||
pFillCol[i].functionId = pExpr->functionId;
|
||||
pFillCol[i].fillVal.i = pQueryInfo->fillVal[i];
|
||||
offset += pExpr->resBytes;
|
||||
SInternalField* pIField = taosArrayGet(pQueryInfo->fieldsInfo.internalField, i);
|
||||
|
||||
if (pIField->pArithExprInfo == NULL) {
|
||||
SSqlExpr* pExpr = pIField->pSqlExpr;
|
||||
|
||||
pFillCol[i].col.bytes = pExpr->resBytes;
|
||||
pFillCol[i].col.type = (int8_t)pExpr->resType;
|
||||
pFillCol[i].col.colId = pExpr->colInfo.colId;
|
||||
pFillCol[i].flag = pExpr->colInfo.flag;
|
||||
pFillCol[i].col.offset = offset;
|
||||
pFillCol[i].functionId = pExpr->functionId;
|
||||
pFillCol[i].fillVal.i = pQueryInfo->fillVal[i];
|
||||
} else {
|
||||
pFillCol[i].col.bytes = pIField->field.bytes;
|
||||
pFillCol[i].col.type = (int8_t)pIField->field.type;
|
||||
pFillCol[i].col.colId = -100;
|
||||
pFillCol[i].flag = TSDB_COL_NORMAL;
|
||||
pFillCol[i].col.offset = offset;
|
||||
pFillCol[i].functionId = -1;
|
||||
pFillCol[i].fillVal.i = pQueryInfo->fillVal[i];
|
||||
}
|
||||
|
||||
offset += pFillCol[i].col.bytes;
|
||||
}
|
||||
|
||||
return pFillCol;
|
||||
}
|
||||
|
||||
void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrderDescriptor *pDesc,
|
||||
SColumnModel *finalmodel, SSqlObj* pSql) {
|
||||
SColumnModel *finalmodel, SColumnModel *pFFModel, SSqlObj* pSql) {
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
SSqlRes* pRes = &pSql->res;
|
||||
|
||||
|
@ -342,8 +358,6 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
|||
return;
|
||||
}
|
||||
|
||||
size_t numOfCols = tscSqlExprNumOfExprs(pQueryInfo);
|
||||
|
||||
pReducer->pTempBuffer->num = 0;
|
||||
|
||||
tscCreateResPointerInfo(pRes, pQueryInfo);
|
||||
|
@ -372,7 +386,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
|
|||
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
|
||||
SFillColInfo* pFillCol = createFillColInfo(pQueryInfo);
|
||||
pReducer->pFillInfo = taosInitFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols,
|
||||
4096, (int32_t)numOfCols, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit,
|
||||
4096, (int32_t)pQueryInfo->fieldsInfo.numOfOutput, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit,
|
||||
tinfo.precision, pQueryInfo->fillType, pFillCol, pSql);
|
||||
}
|
||||
}
|
||||
|
@ -491,7 +505,8 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
|
|||
pLocalReducer->pFillInfo = taosDestroyFillInfo(pLocalReducer->pFillInfo);
|
||||
|
||||
if (pLocalReducer->pCtx != NULL) {
|
||||
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
|
||||
int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
|
||||
for (int32_t i = 0; i < numOfExprs; ++i) {
|
||||
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[i];
|
||||
|
||||
tVariantDestroy(&pCtx->tag);
|
||||
|
@ -555,7 +570,8 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
|
|||
if (numOfGroupByCols > 0) {
|
||||
|
||||
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
|
||||
int32_t startCols = pQueryInfo->fieldsInfo.numOfOutput - pQueryInfo->groupbyExpr.numOfGroupCols;
|
||||
int32_t numOfInternalOutput = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
|
||||
int32_t startCols = numOfInternalOutput - pQueryInfo->groupbyExpr.numOfGroupCols;
|
||||
|
||||
// the last "pQueryInfo->groupbyExpr.numOfGroupCols" columns are order-by columns
|
||||
for (int32_t i = 0; i < pQueryInfo->groupbyExpr.numOfGroupCols; ++i) {
|
||||
|
@ -674,6 +690,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
|
|||
|
||||
pSchema[i].bytes = pExpr->resBytes;
|
||||
pSchema[i].type = (int8_t)pExpr->resType;
|
||||
tstrncpy(pSchema[i].name, pExpr->aliasName, tListLen(pSchema[i].name));
|
||||
|
||||
rlen += pExpr->resBytes;
|
||||
}
|
||||
|
||||
|
@ -736,8 +754,8 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr
|
|||
}
|
||||
|
||||
*pFinalModel = createColumnModel(pSchema, (int32_t)size, capacity);
|
||||
tfree(pSchema);
|
||||
|
||||
tfree(pSchema);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -966,10 +984,11 @@ static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneO
|
|||
savePrevRecordAndSetupFillInfo(pLocalReducer, pQueryInfo, pFillInfo);
|
||||
}
|
||||
|
||||
int32_t offset = 0;
|
||||
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
|
||||
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
int16_t offset = getColumnModelOffset(pLocalReducer->resColModel, i);
|
||||
memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i]->data, (size_t)(pField->bytes * pRes->numOfRows));
|
||||
offset += pField->bytes;
|
||||
}
|
||||
|
||||
pRes->numOfRowsGroup += pRes->numOfRows;
|
||||
|
@ -1222,6 +1241,10 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
|
|||
|
||||
tColModelCompact(pModel, pResBuf, pModel->capacity);
|
||||
|
||||
if (tscIsSecondStageQuery(pQueryInfo)) {
|
||||
doArithmeticCalculate(pQueryInfo, pResBuf, pModel->rowSize, pLocalReducer->finalRowSize);
|
||||
}
|
||||
|
||||
#ifdef _DEBUG_VIEW
|
||||
printf("final result before interpo:\n");
|
||||
// tColModelDisplay(pLocalReducer->resColModel, pLocalReducer->pBufForInterpo, pResBuf->num, pResBuf->num);
|
||||
|
@ -1588,3 +1611,44 @@ void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen)
|
|||
pRes->pLocalReducer->pResultBuf->num = numOfRes;
|
||||
pRes->data = pRes->pLocalReducer->pResultBuf->data;
|
||||
}
|
||||
|
||||
void doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize) {
|
||||
char* pbuf = calloc(1, pOutput->num * rowSize);
|
||||
|
||||
size_t size = tscNumOfFields(pQueryInfo);
|
||||
SArithmeticSupport arithSup = {0};
|
||||
|
||||
// todo refactor
|
||||
arithSup.offset = 0;
|
||||
arithSup.numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
|
||||
arithSup.exprList = pQueryInfo->exprList;
|
||||
arithSup.data = calloc(arithSup.numOfCols, POINTER_BYTES);
|
||||
|
||||
for(int32_t k = 0; k < arithSup.numOfCols; ++k) {
|
||||
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k);
|
||||
arithSup.data[k] = (pOutput->data + pOutput->num* pExpr->offset);
|
||||
}
|
||||
|
||||
int32_t offset = 0;
|
||||
|
||||
for (int i = 0; i < size; ++i) {
|
||||
SInternalField* pSup = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i);
|
||||
|
||||
// calculate the result from several other columns
|
||||
if (pSup->pArithExprInfo != NULL) {
|
||||
arithSup.pArithExpr = pSup->pArithExprInfo;
|
||||
tExprTreeCalcTraverse(arithSup.pArithExpr->pExpr, (int32_t) pOutput->num, pbuf + pOutput->num*offset, &arithSup, TSDB_ORDER_ASC, getArithemicInputSrc);
|
||||
} else {
|
||||
SSqlExpr* pExpr = pSup->pSqlExpr;
|
||||
memcpy(pbuf + pOutput->num * offset, pExpr->offset * pOutput->num + pOutput->data, pExpr->resBytes * pOutput->num);
|
||||
}
|
||||
|
||||
offset += pSup->field.bytes;
|
||||
}
|
||||
|
||||
assert(finalRowSize <= rowSize);
|
||||
memcpy(pOutput->data, pbuf, pOutput->num * finalRowSize);
|
||||
|
||||
tfree(pbuf);
|
||||
tfree(arithSup.data);
|
||||
}
|
|
@ -1148,6 +1148,10 @@ int tsParseInsertSql(SSqlObj *pSql) {
|
|||
|
||||
index = 0;
|
||||
sToken = tStrGetToken(str, &index, false, 0, NULL);
|
||||
if (sToken.type != TK_STRING && sToken.type != TK_ID) {
|
||||
code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z);
|
||||
goto _error;
|
||||
}
|
||||
str += index;
|
||||
if (sToken.n == 0) {
|
||||
code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z);
|
||||
|
|
|
@ -52,7 +52,8 @@ typedef struct SConvertFunc {
|
|||
int32_t originFuncId;
|
||||
int32_t execFuncId;
|
||||
} SConvertFunc;
|
||||
static SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t colIndex, int32_t tableIndex);
|
||||
|
||||
static SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tableIndex);
|
||||
|
||||
static int32_t setShowInfo(SSqlObj* pSql, SSqlInfo* pInfo);
|
||||
static char* getAccountId(SSqlObj* pSql);
|
||||
|
@ -127,6 +128,10 @@ static int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo);
|
|||
static int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index);
|
||||
static int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pSqlExpr, SQueryInfo* pQueryInfo, SArray* pCols, int64_t *uid);
|
||||
|
||||
int16_t getNewResColId(SQueryInfo* pQueryInfo) {
|
||||
return pQueryInfo->resColumnId--;
|
||||
}
|
||||
|
||||
static uint8_t convertOptr(SStrToken *pToken) {
|
||||
switch (pToken->type) {
|
||||
case TK_LT:
|
||||
|
@ -1274,6 +1279,7 @@ static void tscInsertPrimaryTSSourceColumn(SQueryInfo* pQueryInfo, SColumnIndex*
|
|||
SColumnIndex tsCol = {.tableIndex = pIndex->tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX};
|
||||
tscColumnListInsert(pQueryInfo->colList, &tsCol);
|
||||
}
|
||||
|
||||
static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t exprIndex, tSQLExprItem* pItem) {
|
||||
const char* msg1 = "invalid column name, illegal column type, or columns in arithmetic expression from two tables";
|
||||
const char* msg2 = "invalid arithmetic expression in select clause";
|
||||
|
@ -1305,7 +1311,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
|
|||
SColumnIndex index = {.tableIndex = tableIndex};
|
||||
|
||||
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, sizeof(double),
|
||||
sizeof(double), false);
|
||||
-1000, sizeof(double), false);
|
||||
|
||||
char* name = (pItem->aliasName != NULL)? pItem->aliasName:pItem->pNode->token.z;
|
||||
size_t len = MIN(sizeof(pExpr->aliasName), pItem->pNode->token.n + 1);
|
||||
|
@ -1321,6 +1327,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
|
|||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
}
|
||||
|
||||
// check for if there is a tag in the arithmetic express
|
||||
size_t numOfNode = taosArrayGetSize(colList);
|
||||
for(int32_t k = 0; k < numOfNode; ++k) {
|
||||
SColIndex* pIndex = taosArrayGet(colList, k);
|
||||
|
@ -1346,9 +1353,9 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
|
|||
char* c = tbufGetData(&bw, false);
|
||||
|
||||
// set the serialized binary string as the parameter of arithmetic expression
|
||||
addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, (int32_t)len, index.tableIndex);
|
||||
|
||||
addExprParams(pExpr, c, TSDB_DATA_TYPE_BINARY, (int32_t)len);
|
||||
insertResultField(pQueryInfo, exprIndex, &columnList, sizeof(double), TSDB_DATA_TYPE_DOUBLE, pExpr->aliasName, pExpr);
|
||||
|
||||
// add ts column
|
||||
tscInsertPrimaryTSSourceColumn(pQueryInfo, &index);
|
||||
|
||||
|
@ -1380,6 +1387,10 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
|
|||
pArithExprInfo->interBytes = sizeof(double);
|
||||
pArithExprInfo->type = TSDB_DATA_TYPE_DOUBLE;
|
||||
|
||||
pArithExprInfo->base.functionId = TSDB_FUNC_ARITHM;
|
||||
pArithExprInfo->base.numOfParams = 1;
|
||||
pArithExprInfo->base.resColId = getNewResColId(pQueryInfo);
|
||||
|
||||
int32_t ret = exprTreeFromSqlExpr(pCmd, &pArithExprInfo->pExpr, pItem->pNode, pQueryInfo, NULL, &pArithExprInfo->uid);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
tExprTreeDestroy(&pArithExprInfo->pExpr, NULL);
|
||||
|
@ -1388,14 +1399,30 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t
|
|||
|
||||
pInfo->pArithExprInfo = pArithExprInfo;
|
||||
}
|
||||
|
||||
SBufferWriter bw = tbufInitWriter(NULL, false);
|
||||
|
||||
TRY(0) {
|
||||
exprTreeToBinary(&bw, pInfo->pArithExprInfo->pExpr);
|
||||
} CATCH(code) {
|
||||
tbufCloseWriter(&bw);
|
||||
UNUSED(code);
|
||||
// TODO: other error handling
|
||||
} END_TRY
|
||||
|
||||
SSqlFuncMsg* pFuncMsg = &pInfo->pArithExprInfo->base;
|
||||
pFuncMsg->arg[0].argBytes = (int16_t) tbufTell(&bw);
|
||||
pFuncMsg->arg[0].argValue.pz = tbufGetData(&bw, true);
|
||||
pFuncMsg->arg[0].argType = TSDB_DATA_TYPE_BINARY;
|
||||
|
||||
// tbufCloseWriter(&bw); // TODO there is a memory leak
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumnIndex* pIndex, tSQLExprItem* pItem) {
|
||||
SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, startPos, pIndex->columnIndex, pIndex->tableIndex);
|
||||
SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, pIndex->columnIndex, pIndex->tableIndex);
|
||||
|
||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pIndex->tableIndex);
|
||||
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
|
@ -1540,7 +1567,7 @@ int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnLi
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t colIndex, int32_t tableIndex) {
|
||||
SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t colIndex, int32_t tableIndex) {
|
||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex);
|
||||
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
|
||||
int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
|
||||
|
@ -1552,20 +1579,22 @@ SSqlExpr* doAddProjectCol(SQueryInfo* pQueryInfo, int32_t outputIndex, int32_t c
|
|||
|
||||
if (functionId == TSDB_FUNC_TAGPRJ) {
|
||||
index.columnIndex = colIndex - tscGetNumOfColumns(pTableMeta);
|
||||
|
||||
tscColumnListInsert(pTableMetaInfo->tagColList, &index);
|
||||
} else {
|
||||
index.columnIndex = colIndex;
|
||||
}
|
||||
|
||||
return tscSqlExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes,
|
||||
pSchema->bytes, functionId == TSDB_FUNC_TAGPRJ);
|
||||
|
||||
int16_t colId = getNewResColId(pQueryInfo);
|
||||
return tscSqlExprAppend(pQueryInfo, functionId, &index, pSchema->type, pSchema->bytes, colId, pSchema->bytes,
|
||||
(functionId == TSDB_FUNC_TAGPRJ));
|
||||
}
|
||||
|
||||
SSqlExpr* tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
|
||||
SColumnIndex* pIndex, SSchema* pColSchema, int16_t flag) {
|
||||
int16_t colId = getNewResColId(pQueryInfo);
|
||||
|
||||
SSqlExpr* pExpr = tscSqlExprInsert(pQueryInfo, outputColIndex, functionId, pIndex, pColSchema->type,
|
||||
pColSchema->bytes, pColSchema->bytes, TSDB_COL_IS_TAG(flag));
|
||||
pColSchema->bytes, colId, pColSchema->bytes, TSDB_COL_IS_TAG(flag));
|
||||
tstrncpy(pExpr->aliasName, pColSchema->name, sizeof(pExpr->aliasName));
|
||||
|
||||
SColumnList ids = getColumnList(1, pIndex->tableIndex, pIndex->columnIndex);
|
||||
|
@ -1601,7 +1630,7 @@ static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColum
|
|||
}
|
||||
|
||||
for (int32_t j = 0; j < numOfTotalColumns; ++j) {
|
||||
SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, startPos + j, j, pIndex->tableIndex);
|
||||
SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, j, pIndex->tableIndex);
|
||||
tstrncpy(pExpr->aliasName, pSchema[j].name, sizeof(pExpr->aliasName));
|
||||
|
||||
pIndex->columnIndex = j;
|
||||
|
@ -1710,7 +1739,7 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS
|
|||
bytes = pSchema->bytes;
|
||||
}
|
||||
|
||||
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, bytes, false);
|
||||
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, getNewResColId(pQueryInfo), bytes, false);
|
||||
tstrncpy(pExpr->aliasName, name, tListLen(pExpr->aliasName));
|
||||
|
||||
if (cvtFunc.originFuncId == TSDB_FUNC_LAST_ROW && cvtFunc.originFuncId != functionID) {
|
||||
|
@ -1804,7 +1833,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
|
||||
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
|
||||
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize;
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false);
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false);
|
||||
} else if (sqlOptr == TK_INTEGER) { // select count(1) from table1
|
||||
char buf[8] = {0};
|
||||
int64_t val = -1;
|
||||
|
@ -1816,7 +1845,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
if (val == 1) {
|
||||
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
|
||||
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize;
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false);
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false);
|
||||
} else {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
|
@ -1836,12 +1865,12 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
}
|
||||
|
||||
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize;
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, isTag);
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, isTag);
|
||||
}
|
||||
} else { // count(*) is equalled to count(primary_timestamp_key)
|
||||
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
|
||||
int32_t size = tDataTypeDesc[TSDB_DATA_TYPE_BIGINT].nSize;
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false);
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, getNewResColId(pQueryInfo), size, false);
|
||||
}
|
||||
|
||||
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
|
||||
|
@ -1928,7 +1957,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
colIndex += 1;
|
||||
SColumnIndex indexTS = {.tableIndex = index.tableIndex, .columnIndex = 0};
|
||||
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &indexTS, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE,
|
||||
TSDB_KEYSIZE, false);
|
||||
getNewResColId(pQueryInfo), TSDB_KEYSIZE, false);
|
||||
|
||||
SColumnList ids = getColumnList(1, 0, 0);
|
||||
insertResultField(pQueryInfo, 0, &ids, TSDB_KEYSIZE, TSDB_DATA_TYPE_TIMESTAMP, aAggs[TSDB_FUNC_TS_DUMMY].aName, pExpr);
|
||||
|
@ -1939,7 +1968,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
|
||||
}
|
||||
|
||||
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, resultType, resultSize, resultSize, false);
|
||||
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, resultType, resultSize, getNewResColId(pQueryInfo), resultSize, false);
|
||||
|
||||
if (optr == TK_LEASTSQUARES) {
|
||||
/* set the leastsquares parameters */
|
||||
|
@ -1948,14 +1977,14 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, DOUBLE_BYTES, 0);
|
||||
addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, DOUBLE_BYTES);
|
||||
|
||||
memset(val, 0, tListLen(val));
|
||||
if (tVariantDump(&pParamElem[2].pNode->val, val, TSDB_DATA_TYPE_DOUBLE, true) < 0) {
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double), 0);
|
||||
addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double));
|
||||
}
|
||||
|
||||
SColumnList ids = {0};
|
||||
|
@ -2180,8 +2209,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
tscInsertPrimaryTSSourceColumn(pQueryInfo, &index);
|
||||
colIndex += 1; // the first column is ts
|
||||
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, resultSize, false);
|
||||
addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double), 0);
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pQueryInfo), resultSize, false);
|
||||
addExprParams(pExpr, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double));
|
||||
} else {
|
||||
tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true);
|
||||
|
||||
|
@ -2198,8 +2227,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
// todo REFACTOR
|
||||
// set the first column ts for top/bottom query
|
||||
SColumnIndex index1 = {0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE,
|
||||
TSDB_KEYSIZE, false);
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS, &index1, TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(pQueryInfo),
|
||||
TSDB_KEYSIZE, false);
|
||||
tstrncpy(pExpr->aliasName, aAggs[TSDB_FUNC_TS].aName, sizeof(pExpr->aliasName));
|
||||
|
||||
const int32_t TS_COLUMN_INDEX = PRIMARYKEY_TIMESTAMP_COL_INDEX;
|
||||
|
@ -2209,8 +2238,8 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
|
|||
|
||||
colIndex += 1; // the first column is ts
|
||||
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, resultSize, false);
|
||||
addExprParams(pExpr, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t), 0);
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pQueryInfo), resultSize, false);
|
||||
addExprParams(pExpr, val, TSDB_DATA_TYPE_BIGINT, sizeof(int64_t));
|
||||
}
|
||||
|
||||
memset(pExpr->aliasName, 0, tListLen(pExpr->aliasName));
|
||||
|
@ -2694,7 +2723,7 @@ int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) {
|
|||
}
|
||||
}
|
||||
|
||||
tscFieldInfoUpdateOffsetForInterResult(pQueryInfo);
|
||||
tscFieldInfoUpdateOffset(pQueryInfo);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -2922,7 +2951,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd)
|
|||
|
||||
void setColumnOffsetValueInResultset(SQueryInfo* pQueryInfo) {
|
||||
if (QUERY_IS_STABLE_QUERY(pQueryInfo->type)) {
|
||||
tscFieldInfoUpdateOffsetForInterResult(pQueryInfo);
|
||||
tscFieldInfoUpdateOffset(pQueryInfo);
|
||||
} else {
|
||||
tscFieldInfoUpdateOffset(pQueryInfo);
|
||||
}
|
||||
|
@ -4437,7 +4466,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
|
|||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
}
|
||||
|
||||
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
|
||||
size_t size = tscNumOfFields(pQueryInfo);
|
||||
|
||||
if (pQueryInfo->fillVal == NULL) {
|
||||
pQueryInfo->fillVal = calloc(size, sizeof(int64_t));
|
||||
|
@ -4451,12 +4480,8 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
|
|||
} else if (strncasecmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4) {
|
||||
pQueryInfo->fillType = TSDB_FILL_NULL;
|
||||
for (int32_t i = START_INTERPO_COL_IDX; i < size; ++i) {
|
||||
TAOS_FIELD* pFields = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull((char*) &pQueryInfo->fillVal[i], pFields->type);
|
||||
} else {
|
||||
setNull((char*)&pQueryInfo->fillVal[i], pFields->type, pFields->bytes);
|
||||
};
|
||||
TAOS_FIELD* pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
setNull((char*)&pQueryInfo->fillVal[i], pField->type, pField->bytes);
|
||||
}
|
||||
} else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) {
|
||||
pQueryInfo->fillType = TSDB_FILL_PREV;
|
||||
|
@ -4487,15 +4512,15 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
|
|||
int32_t j = 1;
|
||||
|
||||
for (int32_t i = startPos; i < numOfFillVal; ++i, ++j) {
|
||||
TAOS_FIELD* pFields = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
TAOS_FIELD* pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
|
||||
if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull((char*) &pQueryInfo->fillVal[i], pFields->type);
|
||||
if (pField->type == TSDB_DATA_TYPE_BINARY || pField->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull((char*) &pQueryInfo->fillVal[i], pField->type);
|
||||
continue;
|
||||
}
|
||||
|
||||
tVariant* p = taosArrayGet(pFillToken, j);
|
||||
int32_t ret = tVariantDump(p, (char*)&pQueryInfo->fillVal[i], pFields->type, true);
|
||||
int32_t ret = tVariantDump(p, (char*)&pQueryInfo->fillVal[i], pField->type, true);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
|
||||
}
|
||||
|
@ -4505,12 +4530,12 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
|
|||
tVariantListItem* lastItem = taosArrayGetLast(pFillToken);
|
||||
|
||||
for (int32_t i = numOfFillVal; i < size; ++i) {
|
||||
TAOS_FIELD* pFields = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
TAOS_FIELD* pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
|
||||
if (pFields->type == TSDB_DATA_TYPE_BINARY || pFields->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull((char*) &pQueryInfo->fillVal[i], pFields->type);
|
||||
if (pField->type == TSDB_DATA_TYPE_BINARY || pField->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull((char*) &pQueryInfo->fillVal[i], pField->type);
|
||||
} else {
|
||||
tVariantDump(&lastItem->pVar, (char*)&pQueryInfo->fillVal[i], pFields->type, true);
|
||||
tVariantDump(&lastItem->pVar, (char*)&pQueryInfo->fillVal[i], pField->type, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -5447,7 +5472,7 @@ void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClau
|
|||
int16_t type = pTagSchema->type;
|
||||
int16_t bytes = pTagSchema->bytes;
|
||||
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, bytes, true);
|
||||
pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true);
|
||||
pExpr->colInfo.flag = TSDB_COL_TAG;
|
||||
|
||||
// NOTE: tag column does not add to source column list
|
||||
|
@ -5750,7 +5775,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
|
|||
|
||||
if (TSDB_COL_IS_TAG(pColIndex->flag)) {
|
||||
SColumnIndex index = {.tableIndex = pQueryInfo->groupbyExpr.tableIndex, .columnIndex = colIndex};
|
||||
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, bytes, true);
|
||||
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG, &index, type, bytes, getNewResColId(pQueryInfo), bytes, true);
|
||||
|
||||
memset(pExpr->aliasName, 0, sizeof(pExpr->aliasName));
|
||||
tstrncpy(pExpr->aliasName, name, sizeof(pExpr->aliasName));
|
||||
|
@ -5913,7 +5938,7 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
|
|||
|
||||
SColumnIndex ind = {0};
|
||||
SSqlExpr* pExpr1 = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TAG_DUMMY, &ind, TSDB_DATA_TYPE_INT,
|
||||
tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, false);
|
||||
tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, getNewResColId(pQueryInfo), tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize, false);
|
||||
|
||||
const char* name = (pExprList->a[0].aliasName != NULL)? pExprList->a[0].aliasName:functionsInfo[index].name;
|
||||
tstrncpy(pExpr1->aliasName, name, tListLen(pExpr1->aliasName));
|
||||
|
@ -6585,6 +6610,7 @@ int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pS
|
|||
if (strcmp((*pExpr)->pSchema->name, p1->aliasName) == 0) {
|
||||
(*pExpr)->pSchema->type = (uint8_t)p1->resType;
|
||||
(*pExpr)->pSchema->bytes = p1->resBytes;
|
||||
(*pExpr)->pSchema->colId = p1->resColId;
|
||||
|
||||
if (uid != NULL) {
|
||||
*uid = p1->uid;
|
||||
|
|
|
@ -241,11 +241,7 @@ int tscSendMsgToServer(SSqlObj *pSql) {
|
|||
.code = 0
|
||||
};
|
||||
|
||||
// NOTE: the rpc context should be acquired before sending data to server.
|
||||
// Otherwise, the pSql object may have been released already during the response function, which is
|
||||
// processMsgFromServer function. In the meanwhile, the assignment of the rpc context to sql object will absolutely
|
||||
// cause crash.
|
||||
pSql->rpcRid = rpcSendRequest(pObj->pDnodeConn, &pSql->epSet, &rpcMsg);
|
||||
rpcSendRequest(pObj->pDnodeConn, &pSql->epSet, &rpcMsg, &pSql->rpcRid);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -702,7 +698,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
pQueryMsg->queryType = htonl(pQueryInfo->type);
|
||||
|
||||
size_t numOfOutput = tscSqlExprNumOfExprs(pQueryInfo);
|
||||
pQueryMsg->numOfOutput = htons((int16_t)numOfOutput);
|
||||
pQueryMsg->numOfOutput = htons((int16_t)numOfOutput); // this is the stage one output column number
|
||||
|
||||
// set column list ids
|
||||
size_t numOfCols = taosArrayGetSize(pQueryInfo->colList);
|
||||
|
@ -764,12 +760,15 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
assert(pExpr->resColId < 0);
|
||||
|
||||
pSqlFuncExpr->colInfo.colId = htons(pExpr->colInfo.colId);
|
||||
pSqlFuncExpr->colInfo.colIndex = htons(pExpr->colInfo.colIndex);
|
||||
pSqlFuncExpr->colInfo.flag = htons(pExpr->colInfo.flag);
|
||||
|
||||
pSqlFuncExpr->functionId = htons(pExpr->functionId);
|
||||
pSqlFuncExpr->numOfParams = htons(pExpr->numOfParams);
|
||||
pSqlFuncExpr->resColId = htons(pExpr->resColId);
|
||||
pMsg += sizeof(SSqlFuncMsg);
|
||||
|
||||
for (int32_t j = 0; j < pExpr->numOfParams; ++j) {
|
||||
|
@ -787,7 +786,73 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
|
||||
pSqlFuncExpr = (SSqlFuncMsg *)pMsg;
|
||||
}
|
||||
|
||||
|
||||
if(tscIsSecondStageQuery(pQueryInfo)) {
|
||||
size_t output = tscNumOfFields(pQueryInfo);
|
||||
pQueryMsg->secondStageOutput = htonl((int32_t) output);
|
||||
|
||||
SSqlFuncMsg *pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg;
|
||||
|
||||
for (int32_t i = 0; i < output; ++i) {
|
||||
SInternalField* pField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i);
|
||||
SSqlExpr *pExpr = pField->pSqlExpr;
|
||||
if (pExpr != NULL) {
|
||||
if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) {
|
||||
tscError("%p table schema is not matched with parsed sql", pSql);
|
||||
return TSDB_CODE_TSC_INVALID_SQL;
|
||||
}
|
||||
|
||||
pSqlFuncExpr1->colInfo.colId = htons(pExpr->colInfo.colId);
|
||||
pSqlFuncExpr1->colInfo.colIndex = htons(pExpr->colInfo.colIndex);
|
||||
pSqlFuncExpr1->colInfo.flag = htons(pExpr->colInfo.flag);
|
||||
|
||||
pSqlFuncExpr1->functionId = htons(pExpr->functionId);
|
||||
pSqlFuncExpr1->numOfParams = htons(pExpr->numOfParams);
|
||||
pMsg += sizeof(SSqlFuncMsg);
|
||||
|
||||
for (int32_t j = 0; j < pExpr->numOfParams; ++j) {
|
||||
// todo add log
|
||||
pSqlFuncExpr1->arg[j].argType = htons((uint16_t)pExpr->param[j].nType);
|
||||
pSqlFuncExpr1->arg[j].argBytes = htons(pExpr->param[j].nLen);
|
||||
|
||||
if (pExpr->param[j].nType == TSDB_DATA_TYPE_BINARY) {
|
||||
memcpy(pMsg, pExpr->param[j].pz, pExpr->param[j].nLen);
|
||||
pMsg += pExpr->param[j].nLen;
|
||||
} else {
|
||||
pSqlFuncExpr1->arg[j].argValue.i64 = htobe64(pExpr->param[j].i64Key);
|
||||
}
|
||||
}
|
||||
|
||||
pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg;
|
||||
} else {
|
||||
assert(pField->pArithExprInfo != NULL);
|
||||
SExprInfo* pExprInfo = pField->pArithExprInfo;
|
||||
|
||||
pSqlFuncExpr1->colInfo.colId = htons(pExprInfo->base.colInfo.colId);
|
||||
pSqlFuncExpr1->functionId = htons(pExprInfo->base.functionId);
|
||||
pSqlFuncExpr1->numOfParams = htons(pExprInfo->base.numOfParams);
|
||||
pMsg += sizeof(SSqlFuncMsg);
|
||||
|
||||
for (int32_t j = 0; j < pExprInfo->base.numOfParams; ++j) {
|
||||
// todo add log
|
||||
pSqlFuncExpr1->arg[j].argType = htons((uint16_t)pExprInfo->base.arg[j].argType);
|
||||
pSqlFuncExpr1->arg[j].argBytes = htons(pExprInfo->base.arg[j].argBytes);
|
||||
|
||||
if (pExprInfo->base.arg[j].argType == TSDB_DATA_TYPE_BINARY) {
|
||||
memcpy(pMsg, pExprInfo->base.arg[j].argValue.pz, pExprInfo->base.arg[j].argBytes);
|
||||
pMsg += pExprInfo->base.arg[j].argBytes;
|
||||
} else {
|
||||
pSqlFuncExpr1->arg[j].argValue.i64 = htobe64(pExprInfo->base.arg[j].argValue.i64);
|
||||
}
|
||||
}
|
||||
|
||||
pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
pQueryMsg->secondStageOutput = 0;
|
||||
}
|
||||
|
||||
// serialize the table info (sid, uid, tags)
|
||||
pMsg = doSerializeTableInfo(pQueryMsg, pSql, pMsg);
|
||||
|
||||
|
@ -814,7 +879,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
}
|
||||
|
||||
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
|
||||
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
|
||||
for (int32_t i = 0; i < tscSqlExprNumOfExprs(pQueryInfo); ++i) {
|
||||
*((int64_t *)pMsg) = htobe64(pQueryInfo->fillVal[i]);
|
||||
pMsg += sizeof(pQueryInfo->fillVal[0]);
|
||||
}
|
||||
|
@ -1950,7 +2015,7 @@ int tscProcessShowRsp(SSqlObj *pSql) {
|
|||
SInternalField* pInfo = tscFieldInfoAppend(pFieldInfo, &f);
|
||||
|
||||
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index,
|
||||
pTableSchema[i].type, pTableSchema[i].bytes, pTableSchema[i].bytes, false);
|
||||
pTableSchema[i].type, pTableSchema[i].bytes, getNewResColId(pQueryInfo), pTableSchema[i].bytes, false);
|
||||
}
|
||||
|
||||
pCmd->numOfCols = pQueryInfo->fieldsInfo.numOfOutput;
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
#include "tutil.h"
|
||||
#include "ttimer.h"
|
||||
#include "tscProfile.h"
|
||||
#include "ttimer.h"
|
||||
|
||||
static bool validImpl(const char* str, size_t maxsize) {
|
||||
if (str == NULL) {
|
||||
|
@ -482,7 +481,7 @@ int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) {
|
|||
|
||||
assert(0);
|
||||
for (int i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
|
||||
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i);
|
||||
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i, 0);
|
||||
}
|
||||
|
||||
*rows = pRes->tsrow;
|
||||
|
|
|
@ -1642,9 +1642,9 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
|||
}
|
||||
|
||||
tExtMemBuffer ** pMemoryBuf = NULL;
|
||||
tOrderDescriptor *pDesc = NULL;
|
||||
SColumnModel * pModel = NULL;
|
||||
|
||||
tOrderDescriptor *pDesc = NULL;
|
||||
SColumnModel *pModel = NULL;
|
||||
|
||||
pRes->qhandle = 0x1; // hack the qhandle check
|
||||
|
||||
const uint32_t nBufferSize = (1u << 16); // 64KB
|
||||
|
@ -1707,7 +1707,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
|
|||
trs->subqueryIndex = i;
|
||||
trs->pParentSql = pSql;
|
||||
trs->pFinalColModel = pModel;
|
||||
|
||||
|
||||
SSqlObj *pNew = tscCreateSTableSubquery(pSql, trs, NULL);
|
||||
if (pNew == NULL) {
|
||||
tscError("%p failed to malloc buffer for subObj, orderOfSub:%d, reason:%s", pSql, i, strerror(errno));
|
||||
|
@ -1762,10 +1762,6 @@ static void tscFreeRetrieveSup(SSqlObj *pSql) {
|
|||
}
|
||||
|
||||
tscDebug("%p start to free subquery supp obj:%p", pSql, trsupport);
|
||||
// int32_t index = trsupport->subqueryIndex;
|
||||
// SSqlObj *pParentSql = trsupport->pParentSql;
|
||||
|
||||
// assert(pSql == pParentSql->pSubs[index]);
|
||||
tfree(trsupport->localBuffer);
|
||||
tfree(trsupport);
|
||||
}
|
||||
|
@ -1956,7 +1952,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
|
|||
SQueryInfo *pPQueryInfo = tscGetQueryInfoDetail(&pParentSql->cmd, 0);
|
||||
tscClearInterpInfo(pPQueryInfo);
|
||||
|
||||
tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, trsupport->pFinalColModel, pParentSql);
|
||||
tscCreateLocalReducer(trsupport->pExtMemBuffer, pState->numOfSub, pDesc, trsupport->pFinalColModel, trsupport->pFFColModel, pParentSql);
|
||||
tscDebug("%p build loser tree completed", pParentSql);
|
||||
|
||||
pParentSql->res.precision = pSql->res.precision;
|
||||
|
@ -2418,7 +2414,7 @@ static void transferNcharData(SSqlObj *pSql, int32_t columnIndex, TAOS_FIELD *pF
|
|||
}
|
||||
}
|
||||
|
||||
static char *getArithemicInputSrc(void *param, const char *name, int32_t colId) {
|
||||
char *getArithemicInputSrc(void *param, const char *name, int32_t colId) {
|
||||
SArithmeticSupport *pSupport = (SArithmeticSupport *) param;
|
||||
|
||||
int32_t index = -1;
|
||||
|
@ -2449,48 +2445,22 @@ TAOS_ROW doSetResultRowData(SSqlObj *pSql, bool finalResult) {
|
|||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
|
||||
|
||||
size_t size = tscNumOfFields(pQueryInfo);
|
||||
int32_t offset = 0;
|
||||
|
||||
for (int i = 0; i < size; ++i) {
|
||||
SInternalField* pSup = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i);
|
||||
if (pSup->pSqlExpr != NULL) {
|
||||
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i);
|
||||
}
|
||||
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i, offset);
|
||||
TAOS_FIELD *pField = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i);
|
||||
|
||||
offset += pField->bytes;
|
||||
|
||||
// primary key column cannot be null in interval query, no need to check
|
||||
if (i == 0 && pQueryInfo->interval.interval > 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
TAOS_FIELD *pField = TARRAY_GET_ELEM(pQueryInfo->fieldsInfo.internalField, i);
|
||||
if (pRes->tsrow[i] != NULL && pField->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
transferNcharData(pSql, i, pField);
|
||||
}
|
||||
|
||||
// calculate the result from several other columns
|
||||
if (pSup->pArithExprInfo != NULL) {
|
||||
if (pRes->pArithSup == NULL) {
|
||||
pRes->pArithSup = (SArithmeticSupport*)calloc(1, sizeof(SArithmeticSupport));
|
||||
}
|
||||
|
||||
pRes->pArithSup->offset = 0;
|
||||
pRes->pArithSup->pArithExpr = pSup->pArithExprInfo;
|
||||
pRes->pArithSup->numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
|
||||
pRes->pArithSup->exprList = pQueryInfo->exprList;
|
||||
pRes->pArithSup->data = calloc(pRes->pArithSup->numOfCols, POINTER_BYTES);
|
||||
|
||||
if (pRes->buffer[i] == NULL) {
|
||||
TAOS_FIELD* field = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
|
||||
pRes->buffer[i] = malloc(field->bytes);
|
||||
}
|
||||
|
||||
for(int32_t k = 0; k < pRes->pArithSup->numOfCols; ++k) {
|
||||
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, k);
|
||||
pRes->pArithSup->data[k] = (pRes->data + pRes->numOfRows* pExpr->offset) + pRes->row*pExpr->resBytes;
|
||||
}
|
||||
|
||||
tExprTreeCalcTraverse(pRes->pArithSup->pArithExpr->pExpr, 1, pRes->buffer[i], pRes->pArithSup,
|
||||
TSDB_ORDER_ASC, getArithemicInputSrc);
|
||||
pRes->tsrow[i] = (unsigned char*)pRes->buffer[i];
|
||||
}
|
||||
}
|
||||
|
||||
pRes->row++; // index increase one-step
|
||||
|
|
|
@ -105,6 +105,7 @@ void taos_init_imp(void) {
|
|||
taosReadGlobalCfg();
|
||||
taosCheckGlobalCfg();
|
||||
|
||||
rpcInit();
|
||||
tscDebug("starting to initialize TAOS client ...");
|
||||
tscDebug("Local End Point is:%s", tsLocalEp);
|
||||
}
|
||||
|
@ -179,6 +180,7 @@ void taos_cleanup(void) {
|
|||
taosCloseRef(tscRefId);
|
||||
taosCleanupKeywordsTable();
|
||||
taosCloseLog();
|
||||
if (tscEmbedded == 0) rpcCleanup();
|
||||
|
||||
m = tscTmr;
|
||||
if (m != NULL && atomic_val_compare_exchange_ptr(&tscTmr, m, 0) == m) {
|
||||
|
|
|
@ -219,6 +219,24 @@ bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo) {
|
|||
return true;
|
||||
}
|
||||
|
||||
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo) {
|
||||
size_t numOfOutput = tscNumOfFields(pQueryInfo);
|
||||
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
|
||||
|
||||
if (numOfOutput == numOfExprs) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for(int32_t i = 0; i < numOfOutput; ++i) {
|
||||
SExprInfo* pExprInfo = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i)->pArithExprInfo;
|
||||
if (pExprInfo != NULL) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool tscIsTWAQuery(SQueryInfo* pQueryInfo) {
|
||||
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
|
||||
for (int32_t i = 0; i < numOfExprs; ++i) {
|
||||
|
@ -855,28 +873,11 @@ void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo) {
|
|||
|
||||
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
|
||||
pExpr->offset = 0;
|
||||
|
||||
for (int32_t i = 1; i < numOfExprs; ++i) {
|
||||
SSqlExpr* prev = taosArrayGetP(pQueryInfo->exprList, i - 1);
|
||||
SSqlExpr* p = taosArrayGetP(pQueryInfo->exprList, i);
|
||||
|
||||
p->offset = prev->offset + prev->resBytes;
|
||||
}
|
||||
}
|
||||
|
||||
void tscFieldInfoUpdateOffsetForInterResult(SQueryInfo* pQueryInfo) {
|
||||
if (tscSqlExprNumOfExprs(pQueryInfo) == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
|
||||
pExpr->offset = 0;
|
||||
|
||||
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
|
||||
for (int32_t i = 1; i < numOfExprs; ++i) {
|
||||
SSqlExpr* prev = taosArrayGetP(pQueryInfo->exprList, i - 1);
|
||||
SSqlExpr* p = taosArrayGetP(pQueryInfo->exprList, i);
|
||||
|
||||
|
||||
p->offset = prev->offset + prev->resBytes;
|
||||
}
|
||||
}
|
||||
|
@ -944,6 +945,14 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
|
|||
|
||||
if (pInfo->pArithExprInfo != NULL) {
|
||||
tExprTreeDestroy(&pInfo->pArithExprInfo->pExpr, NULL);
|
||||
|
||||
SSqlFuncMsg* pFuncMsg = &pInfo->pArithExprInfo->base;
|
||||
for(int32_t j = 0; j < pFuncMsg->numOfParams; ++j) {
|
||||
if (pFuncMsg->arg[j].argType == TSDB_DATA_TYPE_BINARY) {
|
||||
tfree(pFuncMsg->arg[j].argValue.pz);
|
||||
}
|
||||
}
|
||||
|
||||
tfree(pInfo->pArithExprInfo);
|
||||
}
|
||||
}
|
||||
|
@ -955,7 +964,7 @@ void tscFieldInfoClear(SFieldInfo* pFieldInfo) {
|
|||
}
|
||||
|
||||
static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
|
||||
int16_t size, int16_t interSize, int32_t colType) {
|
||||
int16_t size, int16_t resColId, int16_t interSize, int32_t colType) {
|
||||
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex);
|
||||
|
||||
SSqlExpr* pExpr = calloc(1, sizeof(SSqlExpr));
|
||||
|
@ -988,8 +997,9 @@ static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SCol
|
|||
|
||||
pExpr->resType = type;
|
||||
pExpr->resBytes = size;
|
||||
pExpr->resColId = resColId;
|
||||
pExpr->interBytes = interSize;
|
||||
|
||||
|
||||
if (pTableMetaInfo->pTableMeta) {
|
||||
pExpr->uid = pTableMetaInfo->pTableMeta->id.uid;
|
||||
}
|
||||
|
@ -998,20 +1008,20 @@ static SSqlExpr* doBuildSqlExpr(SQueryInfo* pQueryInfo, int16_t functionId, SCol
|
|||
}
|
||||
|
||||
SSqlExpr* tscSqlExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
|
||||
int16_t size, int16_t interSize, bool isTagCol) {
|
||||
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol) {
|
||||
int32_t num = (int32_t)taosArrayGetSize(pQueryInfo->exprList);
|
||||
if (index == num) {
|
||||
return tscSqlExprAppend(pQueryInfo, functionId, pColIndex, type, size, interSize, isTagCol);
|
||||
return tscSqlExprAppend(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
|
||||
}
|
||||
|
||||
SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, interSize, isTagCol);
|
||||
SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
|
||||
taosArrayInsert(pQueryInfo->exprList, index, &pExpr);
|
||||
return pExpr;
|
||||
}
|
||||
|
||||
SSqlExpr* tscSqlExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type,
|
||||
int16_t size, int16_t interSize, bool isTagCol) {
|
||||
SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, interSize, isTagCol);
|
||||
int16_t size, int16_t resColId, int16_t interSize, bool isTagCol) {
|
||||
SSqlExpr* pExpr = doBuildSqlExpr(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol);
|
||||
taosArrayPush(pQueryInfo->exprList, &pExpr);
|
||||
return pExpr;
|
||||
}
|
||||
|
@ -1039,16 +1049,14 @@ size_t tscSqlExprNumOfExprs(SQueryInfo* pQueryInfo) {
|
|||
return taosArrayGetSize(pQueryInfo->exprList);
|
||||
}
|
||||
|
||||
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex) {
|
||||
if (pExpr == NULL || argument == NULL || bytes == 0) {
|
||||
return;
|
||||
}
|
||||
void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes) {
|
||||
assert (pExpr != NULL || argument != NULL || bytes != 0);
|
||||
|
||||
// set parameter value
|
||||
// transfer to tVariant from byte data/no ascii data
|
||||
tVariantCreateFromBinary(&pExpr->param[pExpr->numOfParams], argument, bytes, type);
|
||||
|
||||
pExpr->numOfParams += 1;
|
||||
|
||||
assert(pExpr->numOfParams <= 3);
|
||||
}
|
||||
|
||||
|
@ -1601,6 +1609,7 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) {
|
|||
pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES);
|
||||
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
|
||||
pQueryInfo->udColumnId = TSDB_UD_COLUMN_INDEX;
|
||||
pQueryInfo->resColumnId= -1000;
|
||||
}
|
||||
|
||||
int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) {
|
||||
|
|
|
@ -197,7 +197,8 @@ public class TSDBConnection implements Connection {
|
|||
|
||||
public SQLWarning getWarnings() throws SQLException {
|
||||
//todo: implement getWarnings according to the warning messages returned from TDengine
|
||||
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
return null;
|
||||
// throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
|
||||
}
|
||||
|
||||
public void clearWarnings() throws SQLException {
|
||||
|
|
|
@ -24,8 +24,8 @@ int32_t dnodeInitVWrite();
|
|||
void dnodeCleanupVWrite();
|
||||
void dnodeDispatchToVWriteQueue(SRpcMsg *pMsg);
|
||||
void * dnodeAllocVWriteQueue(void *pVnode);
|
||||
void dnodeFreeVWriteQueue(void *wqueue);
|
||||
void dnodeSendRpcVWriteRsp(void *pVnode, void *param, int32_t code);
|
||||
void dnodeFreeVWriteQueue(void *pWqueue);
|
||||
void dnodeSendRpcVWriteRsp(void *pVnode, void *pWrite, int32_t code);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "tconfig.h"
|
||||
#include "tglobal.h"
|
||||
#include "twal.h"
|
||||
#include "trpc.h"
|
||||
#include "dnode.h"
|
||||
#include "dnodeInt.h"
|
||||
#include "dnodeMgmt.h"
|
||||
|
@ -54,6 +55,7 @@ typedef struct {
|
|||
} SDnodeComponent;
|
||||
|
||||
static const SDnodeComponent tsDnodeComponents[] = {
|
||||
{"rpc", rpcInit, rpcCleanup},
|
||||
{"storage", dnodeInitStorage, dnodeCleanupStorage},
|
||||
{"dnodecfg", dnodeInitCfg, dnodeCleanupCfg},
|
||||
{"dnodeeps", dnodeInitEps, dnodeCleanupEps},
|
||||
|
|
|
@ -151,6 +151,13 @@ void dnodeCleanupClient() {
|
|||
}
|
||||
|
||||
static void dnodeProcessRspFromDnode(SRpcMsg *pMsg, SRpcEpSet *pEpSet) {
|
||||
if (dnodeGetRunStatus() != TSDB_RUN_STATUS_RUNING) {
|
||||
if (pMsg == NULL || pMsg->pCont == NULL) return;
|
||||
dDebug("msg:%p is ignored since dnode not running", pMsg);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
return;
|
||||
}
|
||||
|
||||
if (pMsg->msgType == TSDB_MSG_TYPE_DM_STATUS_RSP && pEpSet) {
|
||||
dnodeUpdateEpSetForPeer(pEpSet);
|
||||
}
|
||||
|
@ -169,7 +176,7 @@ void dnodeAddClientRspHandle(uint8_t msgType, void (*fp)(SRpcMsg *rpcMsg)) {
|
|||
}
|
||||
|
||||
void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg) {
|
||||
rpcSendRequest(tsClientRpc, epSet, rpcMsg);
|
||||
rpcSendRequest(tsClientRpc, epSet, rpcMsg, NULL);
|
||||
}
|
||||
|
||||
void dnodeSendMsgToMnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp) {
|
||||
|
@ -180,4 +187,4 @@ void dnodeSendMsgToMnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp) {
|
|||
|
||||
void dnodeSendMsgToDnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp, SRpcEpSet *epSet) {
|
||||
rpcSendRecv(tsClientRpc, epSet, rpcMsg, rpcRsp);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,11 +38,11 @@ typedef struct {
|
|||
} SVWriteWorkerPool;
|
||||
|
||||
static SVWriteWorkerPool tsVWriteWP;
|
||||
static void *dnodeProcessVWriteQueue(void *param);
|
||||
static void *dnodeProcessVWriteQueue(void *pWorker);
|
||||
|
||||
int32_t dnodeInitVWrite() {
|
||||
tsVWriteWP.max = tsNumOfCores;
|
||||
tsVWriteWP.worker = (SVWriteWorker *)tcalloc(sizeof(SVWriteWorker), tsVWriteWP.max);
|
||||
tsVWriteWP.worker = tcalloc(sizeof(SVWriteWorker), tsVWriteWP.max);
|
||||
if (tsVWriteWP.worker == NULL) return -1;
|
||||
pthread_mutex_init(&tsVWriteWP.mutex, NULL);
|
||||
|
||||
|
@ -162,13 +162,13 @@ void *dnodeAllocVWriteQueue(void *pVnode) {
|
|||
return queue;
|
||||
}
|
||||
|
||||
void dnodeFreeVWriteQueue(void *wqueue) {
|
||||
taosCloseQueue(wqueue);
|
||||
void dnodeFreeVWriteQueue(void *pWqueue) {
|
||||
taosCloseQueue(pWqueue);
|
||||
}
|
||||
|
||||
void dnodeSendRpcVWriteRsp(void *pVnode, void *param, int32_t code) {
|
||||
if (param == NULL) return;
|
||||
SVWriteMsg *pWrite = param;
|
||||
void dnodeSendRpcVWriteRsp(void *pVnode, void *wparam, int32_t code) {
|
||||
if (wparam == NULL) return;
|
||||
SVWriteMsg *pWrite = wparam;
|
||||
|
||||
if (code < 0) pWrite->code = code;
|
||||
int32_t count = atomic_add_fetch_32(&pWrite->processedCount, 1);
|
||||
|
@ -183,13 +183,11 @@ void dnodeSendRpcVWriteRsp(void *pVnode, void *param, int32_t code) {
|
|||
};
|
||||
|
||||
rpcSendResponse(&rpcRsp);
|
||||
taosFreeQitem(pWrite);
|
||||
|
||||
vnodeRelease(pVnode);
|
||||
vnodeFreeFromWQueue(pVnode, pWrite);
|
||||
}
|
||||
|
||||
static void *dnodeProcessVWriteQueue(void *param) {
|
||||
SVWriteWorker *pWorker = param;
|
||||
static void *dnodeProcessVWriteQueue(void *wparam) {
|
||||
SVWriteWorker *pWorker = wparam;
|
||||
SVWriteMsg * pWrite;
|
||||
void * pVnode;
|
||||
int32_t numOfMsgs;
|
||||
|
@ -232,8 +230,7 @@ static void *dnodeProcessVWriteQueue(void *param) {
|
|||
if (pWrite->rspRet.rsp) {
|
||||
rpcFreeCont(pWrite->rspRet.rsp);
|
||||
}
|
||||
taosFreeQitem(pWrite);
|
||||
vnodeRelease(pVnode);
|
||||
vnodeFreeFromWQueue(pVnode, pWrite);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,8 +54,8 @@ void dnodeSendMsgToDnodeRecv(SRpcMsg *rpcMsg, SRpcMsg *rpcRsp, SRpcEpSet *epSet
|
|||
void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t tid);
|
||||
|
||||
void *dnodeAllocVWriteQueue(void *pVnode);
|
||||
void dnodeFreeVWriteQueue(void *wqueue);
|
||||
void dnodeSendRpcVWriteRsp(void *pVnode, void *param, int32_t code);
|
||||
void dnodeFreeVWriteQueue(void *pWqueue);
|
||||
void dnodeSendRpcVWriteRsp(void *pVnode, void *pWrite, int32_t code);
|
||||
void *dnodeAllocVReadQueue(void *pVnode);
|
||||
void dnodeFreeVReadQueue(void *rqueue);
|
||||
|
||||
|
|
|
@ -201,6 +201,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR, 0, 0x0507, "Missing da
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_OUT_OF_MEMORY, 0, 0x0508, "Out of memory")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_APP_ERROR, 0, 0x0509, "Unexpected generic error in vnode")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VRESION_FILE, 0, 0x050A, "Invalid version file")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FULL, 0, 0x050B, "Vnode memory is full because commit failed")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, 0, 0x0511, "Database suspended")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0512, "Write operation denied")
|
||||
|
||||
|
|
|
@ -392,6 +392,7 @@ typedef struct SColIndex {
|
|||
typedef struct SSqlFuncMsg {
|
||||
int16_t functionId;
|
||||
int16_t numOfParams;
|
||||
int16_t resColId; // result column id, id of the current output column
|
||||
|
||||
SColIndex colInfo;
|
||||
struct ArgElem {
|
||||
|
@ -461,11 +462,6 @@ typedef struct STimeWindow {
|
|||
TSKEY ekey;
|
||||
} STimeWindow;
|
||||
|
||||
/*
|
||||
* the outputCols is equalled to or larger than numOfCols
|
||||
* e.g., select min(colName), max(colName), avg(colName) from table
|
||||
* the outputCols will be 3 while the numOfCols is 1.
|
||||
*/
|
||||
typedef struct {
|
||||
SMsgHead head;
|
||||
STimeWindow window;
|
||||
|
@ -483,13 +479,14 @@ typedef struct {
|
|||
uint32_t queryType; // denote another query process
|
||||
int16_t numOfOutput; // final output columns numbers
|
||||
int16_t tagNameRelType; // relation of tag criteria and tbname criteria
|
||||
int16_t fillType; // interpolate type
|
||||
uint64_t fillVal; // default value array list
|
||||
int32_t tsOffset; // offset value in current msg body, NOTE: ts list is compressed
|
||||
int32_t tsLen; // total length of ts comp block
|
||||
int32_t tsNumOfBlocks; // ts comp block numbers
|
||||
int32_t tsOrder; // ts comp block order
|
||||
int32_t numOfTags; // number of tags columns involved
|
||||
int16_t fillType; // interpolate type
|
||||
uint64_t fillVal; // default value array list
|
||||
int32_t secondStageOutput;
|
||||
int32_t tsOffset; // offset value in current msg body, NOTE: ts list is compressed
|
||||
int32_t tsLen; // total length of ts comp block
|
||||
int32_t tsNumOfBlocks; // ts comp block numbers
|
||||
int32_t tsOrder; // ts comp block order
|
||||
int32_t numOfTags; // number of tags columns involved
|
||||
SColumnInfo colList[];
|
||||
} SQueryTableMsg;
|
||||
|
||||
|
|
|
@ -78,12 +78,14 @@ typedef struct SRpcInit {
|
|||
int (*afp)(char *tableId, char *spi, char *encrypt, char *secret, char *ckey);
|
||||
} SRpcInit;
|
||||
|
||||
int32_t rpcInit();
|
||||
void rpcCleanup();
|
||||
void *rpcOpen(const SRpcInit *pRpc);
|
||||
void rpcClose(void *);
|
||||
void *rpcMallocCont(int contLen);
|
||||
void rpcFreeCont(void *pCont);
|
||||
void *rpcReallocCont(void *ptr, int contLen);
|
||||
int64_t rpcSendRequest(void *thandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg);
|
||||
void rpcSendRequest(void *thandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid);
|
||||
void rpcSendResponse(const SRpcMsg *pMsg);
|
||||
void rpcSendRedirectRsp(void *pConn, const SRpcEpSet *pEpSet);
|
||||
int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo);
|
||||
|
|
|
@ -46,7 +46,7 @@ extern "C" {
|
|||
typedef struct {
|
||||
void *appH;
|
||||
void *cqH;
|
||||
int (*notifyStatus)(void *, int status);
|
||||
int (*notifyStatus)(void *, int status, int eno);
|
||||
int (*eventCallBack)(void *);
|
||||
void *(*cqCreateFunc)(void *handle, uint64_t uid, int sid, char *sqlStr, STSchema *pSchema);
|
||||
void (*cqDropFunc)(void *handle);
|
||||
|
@ -83,7 +83,7 @@ STsdbCfg *tsdbGetCfg(const TSDB_REPO_T *repo);
|
|||
int tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg);
|
||||
int32_t tsdbDropRepo(char *rootDir);
|
||||
TSDB_REPO_T *tsdbOpenRepo(char *rootDir, STsdbAppH *pAppH);
|
||||
void tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit);
|
||||
int tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit);
|
||||
int32_t tsdbConfigRepo(TSDB_REPO_T *repo, STsdbCfg *pCfg);
|
||||
int tsdbGetState(TSDB_REPO_T *repo);
|
||||
|
||||
|
|
|
@ -70,11 +70,12 @@ void* vnodeAcquire(int32_t vgId); // add refcount
|
|||
void vnodeRelease(void *pVnode); // dec refCount
|
||||
void* vnodeGetWal(void *pVnode);
|
||||
|
||||
int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rparam);
|
||||
int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rparam);
|
||||
int32_t vnodeWriteToWQueue(void *pVnode, void *pHead, int32_t qtype, void *pRpcMsg);
|
||||
void vnodeFreeFromWQueue(void *pVnode, SVWriteMsg *pWrite);
|
||||
int32_t vnodeProcessWrite(void *pVnode, void *pHead, int32_t qtype, void *pRspRet);
|
||||
int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes);
|
||||
void vnodeBuildStatusMsg(void *param);
|
||||
void vnodeConfirmForward(void *param, uint64_t version, int32_t code);
|
||||
void vnodeBuildStatusMsg(void *pStatus);
|
||||
void vnodeConfirmForward(void *pVnode, uint64_t version, int32_t code);
|
||||
void vnodeSetAccess(SVgroupAccess *pAccess, int32_t numOfVnodes);
|
||||
|
||||
int32_t vnodeInitResources();
|
||||
|
|
|
@ -3,3 +3,4 @@ PROJECT(TDengine)
|
|||
|
||||
ADD_SUBDIRECTORY(shell)
|
||||
ADD_SUBDIRECTORY(taosdemo)
|
||||
ADD_SUBDIRECTORY(taosdump)
|
||||
|
|
|
@ -46,7 +46,7 @@ static struct argp_option options[] = {
|
|||
{"thread", 'T', "THREADNUM", 0, "Number of threads when using multi-thread to import data."},
|
||||
{"database", 'd', "DATABASE", 0, "Database to use when connecting to the server."},
|
||||
{"timezone", 't', "TIMEZONE", 0, "Time zone of the shell, default is local."},
|
||||
{"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is NULL, valid option: client | server."},
|
||||
{"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is NULL, options: client|clients|server."},
|
||||
{"endport", 'e', "ENDPORT", 0, "Net test end port, default is 6042."},
|
||||
{"pktlen", 'l', "PKTLEN", 0, "Packet length used for net test, default is 1000 bytes."},
|
||||
{0}};
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
|
||||
PROJECT(TDengine)
|
||||
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
|
||||
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
|
||||
INCLUDE_DIRECTORIES(inc)
|
||||
AUX_SOURCE_DIRECTORY(. SRC)
|
||||
|
||||
IF (TD_LINUX)
|
||||
ADD_EXECUTABLE(taosdump ${SRC})
|
||||
IF (TD_SOMODE_STATIC)
|
||||
TARGET_LINK_LIBRARIES(taosdump taos_static)
|
||||
ELSE ()
|
||||
TARGET_LINK_LIBRARIES(taosdump taos)
|
||||
ENDIF ()
|
||||
ENDIF ()
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,48 @@
|
|||
taos1_6="/root/mnt/work/test/td1.6/build/bin/taos"
|
||||
taosdump1_6="/root/mnt/work/test/td1.6/build/bin/taosdump"
|
||||
taoscfg1_6="/root/mnt/work/test/td1.6/test/cfg"
|
||||
|
||||
taos2_0="/root/mnt/work/test/td2.0/build/bin/taos"
|
||||
taosdump2_0="/root/mnt/work/test/td2.0/build/bin/taosdump"
|
||||
taoscfg2_0="/root/mnt/work/test/td2.0/test/cfg"
|
||||
|
||||
data_dir="/root/mnt/work/test/td1.6/output"
|
||||
table_list="/root/mnt/work/test/td1.6/tables"
|
||||
|
||||
DBNAME="test"
|
||||
NTABLES=$(wc -l ${table_list} | awk '{print $1;}')
|
||||
NTABLES_PER_DUMP=101
|
||||
|
||||
mkdir -p ${data_dir}
|
||||
i=0
|
||||
round=0
|
||||
command="${taosdump1_6} -c ${taoscfg1_6} -o ${data_dir} -N 100 -T 20 ${DBNAME}"
|
||||
while IFS= read -r line
|
||||
do
|
||||
i=$((i+1))
|
||||
|
||||
command="${command} ${line}"
|
||||
|
||||
if [[ "$i" -eq ${NTABLES_PER_DUMP} ]]; then
|
||||
round=$((round+1))
|
||||
echo "Starting round ${round} dump out..."
|
||||
rm -f ${data_dir}/*
|
||||
${command}
|
||||
echo "Starting round ${round} dump in..."
|
||||
${taosdump2_0} -c ${taoscfg2_0} -i ${data_dir}
|
||||
|
||||
# Reset variables
|
||||
# command="${taosdump1_6} -c ${taoscfg1_6} -o ${data_dir} -N 100 ${DBNAME}"
|
||||
command="${taosdump1_6} -c ${taoscfg1_6} -o ${data_dir} -N 100 -T 20 ${DBNAME}"
|
||||
i=0
|
||||
fi
|
||||
done < "${table_list}"
|
||||
|
||||
if [[ ${i} -ne "0" ]]; then
|
||||
round=$((round+1))
|
||||
echo "Starting round ${round} dump out..."
|
||||
rm -f ${data_dir}/*
|
||||
${command}
|
||||
echo "Starting round ${round} dump in..."
|
||||
${taosdump2_0} -c ${taoscfg2_0} -i ${data_dir}
|
||||
fi
|
|
@ -37,7 +37,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#ifndef TAOS_OS_DEF_EPOLL
|
||||
#define TAOS_EPOLL_WAIT_TIME -1
|
||||
#define TAOS_EPOLL_WAIT_TIME 500
|
||||
#endif
|
||||
|
||||
#ifdef TAOS_RANDOM_NETWORK_FAIL
|
||||
|
|
|
@ -111,6 +111,9 @@ void taosUninitTimer() {
|
|||
pthread_sigmask(SIG_BLOCK, &set, NULL);
|
||||
*/
|
||||
void taosMsleep(int mseconds) {
|
||||
#if 1
|
||||
usleep(mseconds * 1000);
|
||||
#else
|
||||
struct timeval timeout;
|
||||
int seconds, useconds;
|
||||
|
||||
|
@ -126,7 +129,8 @@ void taosMsleep(int mseconds) {
|
|||
|
||||
select(0, NULL, NULL, NULL, &timeout);
|
||||
|
||||
/* pthread_sigmask(SIG_UNBLOCK, &set, NULL); */
|
||||
/* pthread_sigmask(SIG_UNBLOCK, &set, NULL); */
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
|
@ -85,7 +85,7 @@ static void httpProcessHttpData(void *param) {
|
|||
while (1) {
|
||||
struct epoll_event events[HTTP_MAX_EVENTS];
|
||||
//-1 means uncertainty, 0-nowait, 1-wait 1 ms, set it from -1 to 1
|
||||
fdNum = epoll_wait(pThread->pollFd, events, HTTP_MAX_EVENTS, 1);
|
||||
fdNum = epoll_wait(pThread->pollFd, events, HTTP_MAX_EVENTS, TAOS_EPOLL_WAIT_TIME);
|
||||
if (pThread->stop) {
|
||||
httpDebug("%p, http thread get stop event, exiting...", pThread);
|
||||
break;
|
||||
|
|
|
@ -152,7 +152,10 @@ typedef struct SQuery {
|
|||
SLimitVal limit;
|
||||
int32_t rowSize;
|
||||
SSqlGroupbyExpr* pGroupbyExpr;
|
||||
SExprInfo* pSelectExpr;
|
||||
SExprInfo* pExpr1;
|
||||
SExprInfo* pExpr2;
|
||||
int32_t numOfExpr2;
|
||||
|
||||
SColumnInfo* colList;
|
||||
SColumnInfo* tagColList;
|
||||
int32_t numOfFilterCols;
|
||||
|
|
|
@ -43,7 +43,8 @@ typedef struct SHistogramInfo {
|
|||
int32_t numOfElems;
|
||||
int32_t numOfEntries;
|
||||
int32_t maxEntries;
|
||||
|
||||
double min;
|
||||
double max;
|
||||
#if defined(USE_ARRAYLIST)
|
||||
SHistBin* elems;
|
||||
#else
|
||||
|
@ -52,9 +53,6 @@ typedef struct SHistogramInfo {
|
|||
int32_t maxIndex;
|
||||
bool ordered;
|
||||
#endif
|
||||
|
||||
double min;
|
||||
double max;
|
||||
} SHistogramInfo;
|
||||
|
||||
SHistogramInfo* tHistogramCreate(int32_t numOfBins);
|
||||
|
|
|
@ -48,7 +48,7 @@ static FORCE_INLINE SResultRow *getResultRow(SWindowResInfo *pWindowResInfo, int
|
|||
}
|
||||
|
||||
#define curTimeWindowIndex(_winres) ((_winres)->curIndex)
|
||||
#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pSelectExpr[1].base.arg->argValue.i64:1)
|
||||
#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pExpr1[1].base.arg->argValue.i64:1)
|
||||
|
||||
bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot);
|
||||
|
||||
|
@ -62,7 +62,7 @@ static FORCE_INLINE char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int3
|
|||
|
||||
int32_t realRowId = (int32_t)(pResult->rowId * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery));
|
||||
return ((char *)page->data) + pRuntimeEnv->offset[columnIndex] * pRuntimeEnv->numOfRowsPerPage +
|
||||
pQuery->pSelectExpr[columnIndex].bytes * realRowId;
|
||||
pQuery->pExpr1[columnIndex].bytes * realRowId;
|
||||
}
|
||||
|
||||
bool isNull_filter(SColumnFilterElem *pFilter, char* minval, char* maxval);
|
||||
|
|
|
@ -128,7 +128,7 @@ typedef struct SArithmeticSupport {
|
|||
SExprInfo *pArithExpr;
|
||||
int32_t numOfCols;
|
||||
SColumnInfo *colList;
|
||||
SArray* exprList; // client side used
|
||||
void *exprList; // client side used
|
||||
int32_t offset;
|
||||
char** data;
|
||||
} SArithmeticSupport;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -225,7 +225,7 @@ tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) {
|
|||
tSQLExprDestroy(pLeft);
|
||||
tSQLExprDestroy(pRight);
|
||||
|
||||
} else if (pLeft->nSQLOptr == TK_FLOAT || pRight->nSQLOptr == TK_FLOAT) {
|
||||
} else if ((pLeft->nSQLOptr == TK_FLOAT && pRight->nSQLOptr == TK_INTEGER) || (pLeft->nSQLOptr == TK_INTEGER && pRight->nSQLOptr == TK_FLOAT)) {
|
||||
pExpr->val.nType = TSDB_DATA_TYPE_DOUBLE;
|
||||
pExpr->nSQLOptr = TK_FLOAT;
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ int32_t getOutputInterResultBufSize(SQuery* pQuery) {
|
|||
int32_t size = 0;
|
||||
|
||||
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
|
||||
size += pQuery->pSelectExpr[i].interBytes;
|
||||
size += pQuery->pExpr1[i].interBytes;
|
||||
}
|
||||
|
||||
assert(size >= 0);
|
||||
|
@ -237,7 +237,7 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pWindowRes) {
|
|||
SResultRowCellInfo *pResultInfo = &pWindowRes->pCellInfo[i];
|
||||
|
||||
char * s = getPosInResultPage(pRuntimeEnv, i, pWindowRes, page);
|
||||
size_t size = pRuntimeEnv->pQuery->pSelectExpr[i].bytes;
|
||||
size_t size = pRuntimeEnv->pQuery->pExpr1[i].bytes;
|
||||
memset(s, 0, size);
|
||||
|
||||
RESET_RESULT_INFO(pResultInfo);
|
||||
|
@ -280,7 +280,7 @@ void copyResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *dst, const SResult
|
|||
|
||||
tFilePage *srcpage = getResBufPage(pRuntimeEnv->pResultBuf, src->pageId);
|
||||
char * srcBuf = getPosInResultPage(pRuntimeEnv, i, (SResultRow *)src, srcpage);
|
||||
size_t s = pRuntimeEnv->pQuery->pSelectExpr[i].bytes;
|
||||
size_t s = pRuntimeEnv->pQuery->pExpr1[i].bytes;
|
||||
|
||||
memcpy(dstBuf, srcBuf, s);
|
||||
}
|
||||
|
|
|
@ -135,7 +135,7 @@ int tsRpcOverhead;
|
|||
|
||||
static int tsRpcRefId = -1;
|
||||
static int32_t tsRpcNum = 0;
|
||||
static pthread_once_t tsRpcInit = PTHREAD_ONCE_INIT;
|
||||
//static pthread_once_t tsRpcInit = PTHREAD_ONCE_INIT;
|
||||
|
||||
// server:0 client:1 tcp:2 udp:0
|
||||
#define RPC_CONN_UDPS 0
|
||||
|
@ -221,13 +221,15 @@ static void rpcFree(void *p) {
|
|||
free(p);
|
||||
}
|
||||
|
||||
void rpcInit(void) {
|
||||
int32_t rpcInit(void) {
|
||||
tsProgressTimer = tsRpcTimer/2;
|
||||
tsRpcMaxRetry = tsRpcMaxTime * 1000/tsProgressTimer;
|
||||
tsRpcHeadSize = RPC_MSG_OVERHEAD;
|
||||
tsRpcOverhead = sizeof(SRpcReqContext);
|
||||
|
||||
tsRpcRefId = taosOpenRef(200, rpcFree);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rpcCleanup(void) {
|
||||
|
@ -238,7 +240,7 @@ void rpcCleanup(void) {
|
|||
void *rpcOpen(const SRpcInit *pInit) {
|
||||
SRpcInfo *pRpc;
|
||||
|
||||
pthread_once(&tsRpcInit, rpcInit);
|
||||
//pthread_once(&tsRpcInit, rpcInit);
|
||||
|
||||
pRpc = (SRpcInfo *)calloc(1, sizeof(SRpcInfo));
|
||||
if (pRpc == NULL) return NULL;
|
||||
|
@ -379,7 +381,7 @@ void *rpcReallocCont(void *ptr, int contLen) {
|
|||
return start + sizeof(SRpcReqContext) + sizeof(SRpcHead);
|
||||
}
|
||||
|
||||
int64_t rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg) {
|
||||
void rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg, int64_t *pRid) {
|
||||
SRpcInfo *pRpc = (SRpcInfo *)shandle;
|
||||
SRpcReqContext *pContext;
|
||||
|
||||
|
@ -405,14 +407,10 @@ int64_t rpcSendRequest(void *shandle, const SRpcEpSet *pEpSet, SRpcMsg *pMsg) {
|
|||
|| type == TSDB_MSG_TYPE_CM_SHOW )
|
||||
pContext->connType = RPC_CONN_TCPC;
|
||||
|
||||
// set the handle to pContext, so app can cancel the request
|
||||
if (pMsg->handle) *((void **)pMsg->handle) = pContext;
|
||||
|
||||
pContext->rid = taosAddRef(tsRpcRefId, pContext);
|
||||
if (pRid) *pRid = pContext->rid;
|
||||
|
||||
rpcSendReqToServer(pRpc, pContext);
|
||||
|
||||
return pContext->rid;
|
||||
}
|
||||
|
||||
void rpcSendResponse(const SRpcMsg *pRsp) {
|
||||
|
@ -528,7 +526,7 @@ void rpcSendRecv(void *shandle, SRpcEpSet *pEpSet, SRpcMsg *pMsg, SRpcMsg *pRsp)
|
|||
pContext->pRsp = pRsp;
|
||||
pContext->pSet = pEpSet;
|
||||
|
||||
rpcSendRequest(shandle, pEpSet, pMsg);
|
||||
rpcSendRequest(shandle, pEpSet, pMsg, NULL);
|
||||
|
||||
tsem_wait(&sem);
|
||||
tsem_destroy(&sem);
|
||||
|
|
|
@ -171,40 +171,17 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
|
|||
}
|
||||
|
||||
static void taosStopTcpThread(SThreadObj* pThreadObj) {
|
||||
pThreadObj->stop = true;
|
||||
eventfd_t fd = -1;
|
||||
|
||||
// save thread into local variable since pThreadObj is freed when thread exits
|
||||
// save thread into local variable and signal thread to stop
|
||||
pthread_t thread = pThreadObj->thread;
|
||||
|
||||
if (taosComparePthread(pThreadObj->thread, pthread_self())) {
|
||||
if (!taosCheckPthreadValid(thread)) {
|
||||
return;
|
||||
}
|
||||
pThreadObj->stop = true;
|
||||
if (taosComparePthread(thread, pthread_self())) {
|
||||
pthread_detach(pthread_self());
|
||||
return;
|
||||
}
|
||||
|
||||
if (taosCheckPthreadValid(pThreadObj->thread)) {
|
||||
// signal the thread to stop, try graceful method first,
|
||||
// and use pthread_cancel when failed
|
||||
struct epoll_event event = { .events = EPOLLIN };
|
||||
fd = eventfd(1, 0);
|
||||
if (fd == -1) {
|
||||
// failed to create eventfd, call pthread_cancel instead, which may result in data corruption:
|
||||
tError("%s, failed to create eventfd(%s)", pThreadObj->label, strerror(errno));
|
||||
pThreadObj->stop = true;
|
||||
pthread_cancel(pThreadObj->thread);
|
||||
} else if (epoll_ctl(pThreadObj->pollFd, EPOLL_CTL_ADD, fd, &event) < 0) {
|
||||
// failed to call epoll_ctl, call pthread_cancel instead, which may result in data corruption:
|
||||
tError("%s, failed to call epoll_ctl(%s)", pThreadObj->label, strerror(errno));
|
||||
pthread_cancel(pThreadObj->thread);
|
||||
}
|
||||
}
|
||||
|
||||
// at this step, pThreadObj has already been released
|
||||
if (taosCheckPthreadValid(thread)) {
|
||||
pthread_join(thread, NULL);
|
||||
}
|
||||
|
||||
if (fd != -1) taosCloseSocket(fd);
|
||||
pthread_join(thread, NULL);
|
||||
}
|
||||
|
||||
void taosStopTcpServer(void *handle) {
|
||||
|
|
|
@ -57,7 +57,7 @@ static void *sendRequest(void *param) {
|
|||
rpcMsg.ahandle = pInfo;
|
||||
rpcMsg.msgType = 1;
|
||||
tDebug("thread:%d, send request, contLen:%d num:%d", pInfo->index, pInfo->msgSize, pInfo->num);
|
||||
rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg);
|
||||
rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg, NULL);
|
||||
if ( pInfo->num % 20000 == 0 )
|
||||
tInfo("thread:%d, %d requests have been sent", pInfo->index, pInfo->num);
|
||||
tsem_wait(&pInfo->rspSem);
|
||||
|
|
|
@ -36,6 +36,7 @@ extern "C" {
|
|||
#define TAOS_SMSG_STATUS 7
|
||||
|
||||
#define SYNC_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + sizeof(SSyncHead) + 16)
|
||||
#define SYNC_RECV_BUFFER_SIZE (5*1024*1024)
|
||||
|
||||
#define nodeRole pNode->peerInfo[pNode->selfIndex]->role
|
||||
#define nodeVersion pNode->peerInfo[pNode->selfIndex]->version
|
||||
|
|
|
@ -179,6 +179,13 @@ int64_t syncStart(const SSyncInfo *pInfo) {
|
|||
for (int32_t i = 0; i < pCfg->replica; ++i) {
|
||||
const SNodeInfo *pNodeInfo = pCfg->nodeInfo + i;
|
||||
pNode->peerInfo[i] = syncAddPeer(pNode, pNodeInfo);
|
||||
if (pNode->peerInfo[i] == NULL) {
|
||||
sError("vgId:%d, node:%d fqdn:%s port:%u is not configured, stop taosd", pNode->vgId, pNodeInfo->nodeId, pNodeInfo->nodeFqdn,
|
||||
pNodeInfo->nodePort);
|
||||
syncStop(pNode->rid);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if ((strcmp(pNodeInfo->nodeFqdn, tsNodeFqdn) == 0) && (pNodeInfo->nodePort == tsSyncPort)) {
|
||||
pNode->selfIndex = i;
|
||||
}
|
||||
|
@ -476,7 +483,11 @@ static void syncRemovePeer(SSyncPeer *pPeer) {
|
|||
|
||||
static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) {
|
||||
uint32_t ip = taosGetIpFromFqdn(pInfo->nodeFqdn);
|
||||
if (ip == -1) return NULL;
|
||||
if (ip == 0xFFFFFFFF) {
|
||||
sError("failed to add peer, can resolve fqdn:%s since %s", pInfo->nodeFqdn, strerror(errno));
|
||||
terrno = TSDB_CODE_RPC_FQDN_ERROR;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SSyncPeer *pPeer = calloc(1, sizeof(SSyncPeer));
|
||||
if (pPeer == NULL) return NULL;
|
||||
|
@ -578,6 +589,7 @@ static void syncChooseMaster(SSyncNode *pNode) {
|
|||
|
||||
#if 0
|
||||
for (int32_t i = 0; i < pNode->replica; ++i) {
|
||||
if (i == index) continue;
|
||||
pPeer = pNode->peerInfo[i];
|
||||
if (pPeer->version == nodeVersion) {
|
||||
pPeer->role = TAOS_SYNC_ROLE_SLAVE;
|
||||
|
|
|
@ -136,7 +136,7 @@ static int32_t syncRestoreWal(SSyncPeer *pPeer) {
|
|||
SSyncNode *pNode = pPeer->pSyncNode;
|
||||
int32_t ret, code = -1;
|
||||
|
||||
void *buffer = calloc(1024000, 1); // size for one record
|
||||
void *buffer = calloc(SYNC_MAX_SIZE, 1); // size for one record
|
||||
if (buffer == NULL) return -1;
|
||||
|
||||
SWalHead *pHead = (SWalHead *)buffer;
|
||||
|
@ -237,7 +237,7 @@ static int32_t syncOpenRecvBuffer(SSyncNode *pNode) {
|
|||
SRecvBuffer *pRecv = calloc(sizeof(SRecvBuffer), 1);
|
||||
if (pRecv == NULL) return -1;
|
||||
|
||||
pRecv->bufferSize = 5000000;
|
||||
pRecv->bufferSize = SYNC_RECV_BUFFER_SIZE;
|
||||
pRecv->buffer = malloc(pRecv->bufferSize);
|
||||
if (pRecv->buffer == NULL) {
|
||||
free(pRecv);
|
||||
|
|
|
@ -182,6 +182,8 @@ static int32_t syncReadOneWalRecord(int32_t sfd, SWalHead *pHead, uint32_t *pEve
|
|||
return 0;
|
||||
}
|
||||
|
||||
assert(pHead->len <= TSDB_MAX_WAL_SIZE);
|
||||
|
||||
ret = read(sfd, pHead->cont, pHead->len);
|
||||
if (ret < 0) return -1;
|
||||
|
||||
|
|
|
@ -301,31 +301,14 @@ static SThreadObj *taosGetTcpThread(SPoolObj *pPool) {
|
|||
}
|
||||
|
||||
static void taosStopPoolThread(SThreadObj *pThread) {
|
||||
pthread_t thread = pThread->thread;
|
||||
if (!taosCheckPthreadValid(thread)) {
|
||||
return;
|
||||
}
|
||||
pThread->stop = true;
|
||||
|
||||
if (pThread->thread == pthread_self()) {
|
||||
if (taosComparePthread(thread, pthread_self())) {
|
||||
pthread_detach(pthread_self());
|
||||
return;
|
||||
}
|
||||
|
||||
// save thread ID into a local variable, since pThread is freed when the thread exits
|
||||
pthread_t thread = pThread->thread;
|
||||
|
||||
// signal the thread to stop, try graceful method first,
|
||||
// and use pthread_cancel when failed
|
||||
struct epoll_event event = {.events = EPOLLIN};
|
||||
eventfd_t fd = eventfd(1, 0);
|
||||
if (fd == -1) {
|
||||
// failed to create eventfd, call pthread_cancel instead, which may result in data corruption
|
||||
sError("failed to create eventfd since %s", strerror(errno));
|
||||
pthread_cancel(pThread->thread);
|
||||
pThread->stop = true;
|
||||
} else if (epoll_ctl(pThread->pollFd, EPOLL_CTL_ADD, fd, &event) < 0) {
|
||||
// failed to call epoll_ctl, call pthread_cancel instead, which may result in data corruption
|
||||
sError("failed to call epoll_ctl since %s", strerror(errno));
|
||||
pthread_cancel(pThread->thread);
|
||||
}
|
||||
|
||||
pthread_join(thread, NULL);
|
||||
if (fd >= 0) taosClose(fd);
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ void *sendRequest(void *param) {
|
|||
rpcMsg.ahandle = pInfo;
|
||||
rpcMsg.msgType = 1;
|
||||
uDebug("thread:%d, send request, contLen:%d num:%d", pInfo->index, pInfo->msgSize, pInfo->num);
|
||||
rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg);
|
||||
rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg, NULL);
|
||||
if (pInfo->num % 20000 == 0) {
|
||||
uInfo("thread:%d, %d requests have been sent", pInfo->index, pInfo->num);
|
||||
}
|
||||
|
|
|
@ -208,6 +208,18 @@ typedef struct {
|
|||
} SFileGroupIter;
|
||||
|
||||
// ------------------ tsdbMain.c
|
||||
typedef struct {
|
||||
int32_t totalLen;
|
||||
int32_t len;
|
||||
SDataRow row;
|
||||
} SSubmitBlkIter;
|
||||
|
||||
typedef struct {
|
||||
int32_t totalLen;
|
||||
int32_t len;
|
||||
void * pMsg;
|
||||
} SSubmitMsgIter;
|
||||
|
||||
typedef struct {
|
||||
int8_t state;
|
||||
|
||||
|
@ -223,6 +235,7 @@ typedef struct {
|
|||
sem_t readyToCommit;
|
||||
pthread_mutex_t mutex;
|
||||
bool repoLocked;
|
||||
int32_t code; // Commit code
|
||||
} STsdbRepo;
|
||||
|
||||
// ------------------ tsdbRWHelper.c
|
||||
|
@ -430,7 +443,6 @@ void tsdbCloseBufPool(STsdbRepo* pRepo);
|
|||
SListNode* tsdbAllocBufBlockFromPool(STsdbRepo* pRepo);
|
||||
|
||||
// ------------------ tsdbMemTable.c
|
||||
int tsdbUpdateRowInMem(STsdbRepo* pRepo, SDataRow row, STable* pTable);
|
||||
int tsdbRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable);
|
||||
int tsdbUnRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable);
|
||||
int tsdbTakeMemSnapshot(STsdbRepo* pRepo, SMemTable** pMem, SMemTable** pIMem);
|
||||
|
|
|
@ -0,0 +1,340 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include "tsdbMain.h"
|
||||
|
||||
static int tsdbCommitTSData(STsdbRepo *pRepo);
|
||||
static int tsdbCommitMeta(STsdbRepo *pRepo);
|
||||
static void tsdbEndCommit(STsdbRepo *pRepo, int eno);
|
||||
static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey);
|
||||
static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols);
|
||||
static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo);
|
||||
static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables);
|
||||
|
||||
void *tsdbCommitData(STsdbRepo *pRepo) {
|
||||
SMemTable * pMem = pRepo->imem;
|
||||
|
||||
tsdbInfo("vgId:%d start to commit! keyFirst %" PRId64 " keyLast %" PRId64 " numOfRows %" PRId64 " meta rows: %d",
|
||||
REPO_ID(pRepo), pMem->keyFirst, pMem->keyLast, pMem->numOfRows, listNEles(pMem->actList));
|
||||
|
||||
pRepo->code = TSDB_CODE_SUCCESS;
|
||||
|
||||
// Commit to update meta file
|
||||
if (tsdbCommitMeta(pRepo) < 0) {
|
||||
tsdbError("vgId:%d error occurs while committing META data since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
// Create the iterator to read from cache
|
||||
if (tsdbCommitTSData(pRepo) < 0) {
|
||||
tsdbError("vgId:%d error occurs while committing TS data since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
tsdbFitRetention(pRepo);
|
||||
|
||||
tsdbInfo("vgId:%d commit over, succeed", REPO_ID(pRepo));
|
||||
tsdbEndCommit(pRepo, TSDB_CODE_SUCCESS);
|
||||
|
||||
return NULL;
|
||||
|
||||
_err:
|
||||
ASSERT(terrno != TSDB_CODE_SUCCESS);
|
||||
pRepo->code = terrno;
|
||||
tsdbInfo("vgId:%d commit over, failed", REPO_ID(pRepo));
|
||||
tsdbEndCommit(pRepo, terrno);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int tsdbCommitTSData(STsdbRepo *pRepo) {
|
||||
SMemTable * pMem = pRepo->imem;
|
||||
SDataCols * pDataCols = NULL;
|
||||
STsdbMeta * pMeta = pRepo->tsdbMeta;
|
||||
SCommitIter *iters = NULL;
|
||||
SRWHelper whelper = {0};
|
||||
STsdbCfg * pCfg = &(pRepo->config);
|
||||
|
||||
if (pMem->numOfRows <= 0) return 0;
|
||||
|
||||
iters = tsdbCreateCommitIters(pRepo);
|
||||
if (iters == NULL) {
|
||||
tsdbError("vgId:%d failed to create commit iterator since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if (tsdbInitWriteHelper(&whelper, pRepo) < 0) {
|
||||
tsdbError("vgId:%d failed to init write helper since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if ((pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pCfg->maxRowsPerFileBlock)) == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
tsdbError("vgId:%d failed to init data cols with maxRowBytes %d maxCols %d maxRowsPerFileBlock %d since %s",
|
||||
REPO_ID(pRepo), pMeta->maxCols, pMeta->maxRowBytes, pCfg->maxRowsPerFileBlock, tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
int sfid = (int)(TSDB_KEY_FILEID(pMem->keyFirst, pCfg->daysPerFile, pCfg->precision));
|
||||
int efid = (int)(TSDB_KEY_FILEID(pMem->keyLast, pCfg->daysPerFile, pCfg->precision));
|
||||
|
||||
// Loop to commit to each file
|
||||
for (int fid = sfid; fid <= efid; fid++) {
|
||||
if (tsdbCommitToFile(pRepo, fid, iters, &whelper, pDataCols) < 0) {
|
||||
tsdbError("vgId:%d failed to commit to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
tdFreeDataCols(pDataCols);
|
||||
tsdbDestroyCommitIters(iters, pMem->maxTables);
|
||||
tsdbDestroyHelper(&whelper);
|
||||
|
||||
return 0;
|
||||
|
||||
_err:
|
||||
tdFreeDataCols(pDataCols);
|
||||
tsdbDestroyCommitIters(iters, pMem->maxTables);
|
||||
tsdbDestroyHelper(&whelper);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int tsdbCommitMeta(STsdbRepo *pRepo) {
|
||||
SMemTable *pMem = pRepo->imem;
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
SActObj * pAct = NULL;
|
||||
SActCont * pCont = NULL;
|
||||
|
||||
if (listNEles(pMem->actList) <= 0) return 0;
|
||||
|
||||
if (tdKVStoreStartCommit(pMeta->pStore) < 0) {
|
||||
tsdbError("vgId:%d failed to commit data while start commit meta since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
SListNode *pNode = NULL;
|
||||
|
||||
while ((pNode = tdListPopHead(pMem->actList)) != NULL) {
|
||||
pAct = (SActObj *)pNode->data;
|
||||
if (pAct->act == TSDB_UPDATE_META) {
|
||||
pCont = (SActCont *)POINTER_SHIFT(pAct, sizeof(SActObj));
|
||||
if (tdUpdateKVStoreRecord(pMeta->pStore, pAct->uid, (void *)(pCont->cont), pCont->len) < 0) {
|
||||
tsdbError("vgId:%d failed to update meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
|
||||
tstrerror(terrno));
|
||||
tdKVStoreEndCommit(pMeta->pStore);
|
||||
goto _err;
|
||||
}
|
||||
} else if (pAct->act == TSDB_DROP_META) {
|
||||
if (tdDropKVStoreRecord(pMeta->pStore, pAct->uid) < 0) {
|
||||
tsdbError("vgId:%d failed to drop meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
|
||||
tstrerror(terrno));
|
||||
tdKVStoreEndCommit(pMeta->pStore);
|
||||
goto _err;
|
||||
}
|
||||
} else {
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
|
||||
if (tdKVStoreEndCommit(pMeta->pStore) < 0) {
|
||||
tsdbError("vgId:%d failed to commit data while end commit meta since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
_err:
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void tsdbEndCommit(STsdbRepo *pRepo, int eno) {
|
||||
if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_OVER, eno);
|
||||
sem_post(&(pRepo->readyToCommit));
|
||||
}
|
||||
|
||||
static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey) {
|
||||
for (int i = 0; i < nIters; i++) {
|
||||
TSKEY nextKey = tsdbNextIterKey((iters + i)->pIter);
|
||||
if (nextKey != TSDB_DATA_TIMESTAMP_NULL && (nextKey >= minKey && nextKey <= maxKey)) return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols) {
|
||||
char * dataDir = NULL;
|
||||
STsdbCfg * pCfg = &pRepo->config;
|
||||
STsdbFileH *pFileH = pRepo->tsdbFileH;
|
||||
SFileGroup *pGroup = NULL;
|
||||
SMemTable * pMem = pRepo->imem;
|
||||
bool newLast = false;
|
||||
|
||||
TSKEY minKey = 0, maxKey = 0;
|
||||
tsdbGetFidKeyRange(pCfg->daysPerFile, pCfg->precision, fid, &minKey, &maxKey);
|
||||
|
||||
// Check if there are data to commit to this file
|
||||
int hasDataToCommit = tsdbHasDataToCommit(iters, pMem->maxTables, minKey, maxKey);
|
||||
if (!hasDataToCommit) {
|
||||
tsdbDebug("vgId:%d no data to commit to file %d", REPO_ID(pRepo), fid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Create and open files for commit
|
||||
dataDir = tsdbGetDataDirName(pRepo->rootDir);
|
||||
if (dataDir == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((pGroup = tsdbCreateFGroupIfNeed(pRepo, dataDir, fid)) == NULL) {
|
||||
tsdbError("vgId:%d failed to create file group %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
// Open files for write/read
|
||||
if (tsdbSetAndOpenHelperFile(pHelper, pGroup) < 0) {
|
||||
tsdbError("vgId:%d failed to set helper file since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
newLast = TSDB_NLAST_FILE_OPENED(pHelper);
|
||||
|
||||
if (tsdbLoadCompIdx(pHelper, NULL) < 0) {
|
||||
tsdbError("vgId:%d failed to load SCompIdx part since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
// Loop to commit data in each table
|
||||
for (int tid = 1; tid < pMem->maxTables; tid++) {
|
||||
SCommitIter *pIter = iters + tid;
|
||||
if (pIter->pTable == NULL) continue;
|
||||
|
||||
taosRLockLatch(&(pIter->pTable->latch));
|
||||
|
||||
if (tsdbSetHelperTable(pHelper, pIter->pTable, pRepo) < 0) goto _err;
|
||||
|
||||
if (pIter->pIter != NULL) {
|
||||
if (tdInitDataCols(pDataCols, tsdbGetTableSchemaImpl(pIter->pTable, false, false, -1)) < 0) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if (tsdbCommitTableData(pHelper, pIter, pDataCols, maxKey) < 0) {
|
||||
taosRUnLockLatch(&(pIter->pTable->latch));
|
||||
tsdbError("vgId:%d failed to write data of table %s tid %d uid %" PRIu64 " since %s", REPO_ID(pRepo),
|
||||
TABLE_CHAR_NAME(pIter->pTable), TABLE_TID(pIter->pTable), TABLE_UID(pIter->pTable),
|
||||
tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
taosRUnLockLatch(&(pIter->pTable->latch));
|
||||
|
||||
// Move the last block to the new .l file if neccessary
|
||||
if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) {
|
||||
tsdbError("vgId:%d, failed to move last block, since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
// Write the SCompBlock part
|
||||
if (tsdbWriteCompInfo(pHelper) < 0) {
|
||||
tsdbError("vgId:%d, failed to write compInfo part since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
if (tsdbWriteCompIdx(pHelper) < 0) {
|
||||
tsdbError("vgId:%d failed to write compIdx part to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
tfree(dataDir);
|
||||
tsdbCloseHelperFile(pHelper, 0, pGroup);
|
||||
|
||||
pthread_rwlock_wrlock(&(pFileH->fhlock));
|
||||
|
||||
(void)rename(helperNewHeadF(pHelper)->fname, helperHeadF(pHelper)->fname);
|
||||
pGroup->files[TSDB_FILE_TYPE_HEAD].info = helperNewHeadF(pHelper)->info;
|
||||
|
||||
if (newLast) {
|
||||
(void)rename(helperNewLastF(pHelper)->fname, helperLastF(pHelper)->fname);
|
||||
pGroup->files[TSDB_FILE_TYPE_LAST].info = helperNewLastF(pHelper)->info;
|
||||
} else {
|
||||
pGroup->files[TSDB_FILE_TYPE_LAST].info = helperLastF(pHelper)->info;
|
||||
}
|
||||
|
||||
pGroup->files[TSDB_FILE_TYPE_DATA].info = helperDataF(pHelper)->info;
|
||||
|
||||
pthread_rwlock_unlock(&(pFileH->fhlock));
|
||||
|
||||
return 0;
|
||||
|
||||
_err:
|
||||
tfree(dataDir);
|
||||
tsdbCloseHelperFile(pHelper, 1, pGroup);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) {
|
||||
SMemTable *pMem = pRepo->imem;
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
|
||||
SCommitIter *iters = (SCommitIter *)calloc(pMem->maxTables, sizeof(SCommitIter));
|
||||
if (iters == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (tsdbRLockRepoMeta(pRepo) < 0) goto _err;
|
||||
|
||||
// reference all tables
|
||||
for (int i = 0; i < pMem->maxTables; i++) {
|
||||
if (pMeta->tables[i] != NULL) {
|
||||
tsdbRefTable(pMeta->tables[i]);
|
||||
iters[i].pTable = pMeta->tables[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (tsdbUnlockRepoMeta(pRepo) < 0) goto _err;
|
||||
|
||||
for (int i = 0; i < pMem->maxTables; i++) {
|
||||
if ((iters[i].pTable != NULL) && (pMem->tData[i] != NULL) && (TABLE_UID(iters[i].pTable) == pMem->tData[i]->uid)) {
|
||||
if ((iters[i].pIter = tSkipListCreateIter(pMem->tData[i]->pData)) == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
tSkipListIterNext(iters[i].pIter);
|
||||
}
|
||||
}
|
||||
|
||||
return iters;
|
||||
|
||||
_err:
|
||||
tsdbDestroyCommitIters(iters, pMem->maxTables);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables) {
|
||||
if (iters == NULL) return;
|
||||
|
||||
for (int i = 1; i < maxTables; i++) {
|
||||
if (iters[i].pTable != NULL) {
|
||||
tsdbUnRefTable(iters[i].pTable);
|
||||
tSkipListDestroyIter(iters[i].pIter);
|
||||
}
|
||||
}
|
||||
|
||||
free(iters);
|
||||
}
|
|
@ -256,7 +256,8 @@ SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid) {
|
|||
pFileH->pFGroup[pFileH->nFGroups++] = fGroup;
|
||||
qsort((void *)(pFileH->pFGroup), pFileH->nFGroups, sizeof(SFileGroup), compFGroup);
|
||||
pthread_rwlock_unlock(&pFileH->fhlock);
|
||||
return tsdbSearchFGroup(pFileH, fid, TD_EQ);
|
||||
pGroup = tsdbSearchFGroup(pFileH, fid, TD_EQ);
|
||||
ASSERT(pGroup != NULL);
|
||||
}
|
||||
|
||||
return pGroup;
|
||||
|
@ -516,7 +517,7 @@ void tsdbGetFileInfoImpl(char *fname, uint32_t *magic, int64_t *size) {
|
|||
SFile file;
|
||||
SFile * pFile = &file;
|
||||
|
||||
strncpy(pFile->fname, fname, TSDB_FILENAME_LEN);
|
||||
strncpy(pFile->fname, fname, TSDB_FILENAME_LEN - 1);
|
||||
pFile->fd = -1;
|
||||
|
||||
if (tsdbOpenFile(pFile, O_RDONLY) < 0) goto _err;
|
||||
|
|
|
@ -32,18 +32,6 @@
|
|||
#define TSDB_DEFAULT_COMPRESSION TWO_STAGE_COMP
|
||||
#define IS_VALID_COMPRESSION(compression) (((compression) >= NO_COMPRESSION) && ((compression) <= TWO_STAGE_COMP))
|
||||
|
||||
typedef struct {
|
||||
int32_t totalLen;
|
||||
int32_t len;
|
||||
SDataRow row;
|
||||
} SSubmitBlkIter;
|
||||
|
||||
typedef struct {
|
||||
int32_t totalLen;
|
||||
int32_t len;
|
||||
void * pMsg;
|
||||
} SSubmitMsgIter;
|
||||
|
||||
static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg);
|
||||
static int32_t tsdbSetRepoEnv(char *rootDir, STsdbCfg *pCfg);
|
||||
static int32_t tsdbUnsetRepoEnv(char *rootDir);
|
||||
|
@ -52,20 +40,13 @@ static int tsdbLoadConfig(char *rootDir, STsdbCfg *pCfg);
|
|||
static char * tsdbGetCfgFname(char *rootDir);
|
||||
static STsdbRepo * tsdbNewRepo(char *rootDir, STsdbAppH *pAppH, STsdbCfg *pCfg);
|
||||
static void tsdbFreeRepo(STsdbRepo *pRepo);
|
||||
static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter);
|
||||
static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY now, int32_t *affectedrows);
|
||||
static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock);
|
||||
static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter);
|
||||
static int tsdbRestoreInfo(STsdbRepo *pRepo);
|
||||
static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter);
|
||||
static void tsdbAlterCompression(STsdbRepo *pRepo, int8_t compression);
|
||||
static int tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep);
|
||||
static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks);
|
||||
static int keyFGroupCompFunc(const void *key, const void *fgroup);
|
||||
static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg);
|
||||
static void * tsdbDecodeCfg(void *buf, STsdbCfg *pCfg);
|
||||
static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable);
|
||||
static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg);
|
||||
static void tsdbStartStream(STsdbRepo *pRepo);
|
||||
static void tsdbStopStream(STsdbRepo *pRepo);
|
||||
|
||||
|
@ -153,17 +134,20 @@ _err:
|
|||
}
|
||||
|
||||
// Note: all working thread and query thread must stopped when calling this function
|
||||
void tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit) {
|
||||
if (repo == NULL) return;
|
||||
int tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit) {
|
||||
if (repo == NULL) return 0;
|
||||
|
||||
STsdbRepo *pRepo = (STsdbRepo *)repo;
|
||||
int vgId = REPO_ID(pRepo);
|
||||
|
||||
terrno = TSDB_CODE_SUCCESS;
|
||||
|
||||
tsdbStopStream(pRepo);
|
||||
|
||||
if (toCommit) {
|
||||
tsdbAsyncCommit(pRepo);
|
||||
sem_wait(&(pRepo->readyToCommit));
|
||||
terrno = pRepo->code;
|
||||
}
|
||||
tsdbUnRefMemTable(pRepo, pRepo->mem);
|
||||
tsdbUnRefMemTable(pRepo, pRepo->imem);
|
||||
|
@ -175,40 +159,12 @@ void tsdbCloseRepo(TSDB_REPO_T *repo, int toCommit) {
|
|||
tsdbCloseMeta(pRepo);
|
||||
tsdbFreeRepo(pRepo);
|
||||
tsdbDebug("vgId:%d repository is closed", vgId);
|
||||
}
|
||||
|
||||
int32_t tsdbInsertData(TSDB_REPO_T *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *pRsp) {
|
||||
STsdbRepo * pRepo = (STsdbRepo *)repo;
|
||||
SSubmitMsgIter msgIter = {0};
|
||||
|
||||
if (tsdbScanAndConvertSubmitMsg(pRepo, pMsg) < 0) {
|
||||
if (terrno != TSDB_CODE_TDB_TABLE_RECONFIGURE) {
|
||||
tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
}
|
||||
if (terrno != TSDB_CODE_SUCCESS) {
|
||||
return -1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) {
|
||||
tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
SSubmitBlk *pBlock = NULL;
|
||||
int32_t affectedrows = 0;
|
||||
|
||||
TSKEY now = taosGetTimestamp(pRepo->config.precision);
|
||||
while (true) {
|
||||
tsdbGetSubmitMsgNext(&msgIter, &pBlock);
|
||||
if (pBlock == NULL) break;
|
||||
if (tsdbInsertDataToTable(pRepo, pBlock, now, &affectedrows) < 0) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (pRsp != NULL) pRsp->affectedRows = htonl(affectedrows);
|
||||
|
||||
if (tsdbCheckCommit(pRepo) < 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_t eindex, int64_t *size) {
|
||||
|
@ -672,6 +628,7 @@ static STsdbRepo *tsdbNewRepo(char *rootDir, STsdbAppH *pAppH, STsdbCfg *pCfg) {
|
|||
}
|
||||
|
||||
pRepo->state = TSDB_STATE_OK;
|
||||
pRepo->code = TSDB_CODE_SUCCESS;
|
||||
|
||||
int code = pthread_mutex_init(&pRepo->mutex, NULL);
|
||||
if (code != 0) {
|
||||
|
@ -735,93 +692,6 @@ static void tsdbFreeRepo(STsdbRepo *pRepo) {
|
|||
}
|
||||
}
|
||||
|
||||
static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter) {
|
||||
if (pMsg == NULL) {
|
||||
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
|
||||
return -1;
|
||||
}
|
||||
|
||||
pIter->totalLen = pMsg->length;
|
||||
pIter->len = 0;
|
||||
pIter->pMsg = pMsg;
|
||||
if (pMsg->length <= TSDB_SUBMIT_MSG_HEAD_SIZE) {
|
||||
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY now, int32_t *affectedrows) {
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
int64_t points = 0;
|
||||
|
||||
ASSERT(pBlock->tid < pMeta->maxTables);
|
||||
STable *pTable = pMeta->tables[pBlock->tid];
|
||||
ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid);
|
||||
|
||||
SSubmitBlkIter blkIter = {0};
|
||||
SDataRow row = NULL;
|
||||
|
||||
TSKEY minKey = now - tsMsPerDay[pRepo->config.precision] * pRepo->config.keep;
|
||||
TSKEY maxKey = now + tsMsPerDay[pRepo->config.precision] * pRepo->config.daysPerFile;
|
||||
|
||||
tsdbInitSubmitBlkIter(pBlock, &blkIter);
|
||||
while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) {
|
||||
if (dataRowKey(row) < minKey || dataRowKey(row) > maxKey) {
|
||||
tsdbError("vgId:%d table %s tid %d uid %" PRIu64 " timestamp is out of range! now %" PRId64 " minKey %" PRId64
|
||||
" maxKey %" PRId64,
|
||||
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), now, minKey, maxKey);
|
||||
terrno = TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tsdbUpdateRowInMem(pRepo, row, pTable) < 0) return -1;
|
||||
|
||||
(*affectedrows)++;
|
||||
points++;
|
||||
}
|
||||
|
||||
STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion);
|
||||
pRepo->stat.pointsWritten += points * schemaNCols(pSchema);
|
||||
pRepo->stat.totalStorage += points * schemaVLen(pSchema);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock) {
|
||||
if (pIter->len == 0) {
|
||||
pIter->len += TSDB_SUBMIT_MSG_HEAD_SIZE;
|
||||
} else {
|
||||
SSubmitBlk *pSubmitBlk = (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len);
|
||||
pIter->len += (sizeof(SSubmitBlk) + pSubmitBlk->dataLen + pSubmitBlk->schemaLen);
|
||||
}
|
||||
|
||||
if (pIter->len > pIter->totalLen) {
|
||||
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
|
||||
*pPBlock = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
*pPBlock = (pIter->len == pIter->totalLen) ? NULL : (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) {
|
||||
SDataRow row = pIter->row;
|
||||
if (row == NULL) return NULL;
|
||||
|
||||
pIter->len += dataRowLen(row);
|
||||
if (pIter->len >= pIter->totalLen) {
|
||||
pIter->row = NULL;
|
||||
} else {
|
||||
pIter->row = (char *)row + dataRowLen(row);
|
||||
}
|
||||
|
||||
return row;
|
||||
}
|
||||
|
||||
static int tsdbRestoreInfo(STsdbRepo *pRepo) {
|
||||
STsdbMeta * pMeta = pRepo->tsdbMeta;
|
||||
STsdbFileH *pFileH = pRepo->tsdbFileH;
|
||||
|
@ -855,14 +725,6 @@ _err:
|
|||
return -1;
|
||||
}
|
||||
|
||||
static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter) {
|
||||
if (pBlock->dataLen <= 0) return -1;
|
||||
pIter->totalLen = pBlock->dataLen;
|
||||
pIter->len = 0;
|
||||
pIter->row = (SDataRow)(pBlock->data+pBlock->schemaLen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tsdbAlterCompression(STsdbRepo *pRepo, int8_t compression) {
|
||||
int8_t ocompression = pRepo->config.compression;
|
||||
pRepo->config.compression = compression;
|
||||
|
@ -959,134 +821,6 @@ static void *tsdbDecodeCfg(void *buf, STsdbCfg *pCfg) {
|
|||
return buf;
|
||||
}
|
||||
|
||||
static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable) {
|
||||
ASSERT(pTable != NULL);
|
||||
|
||||
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
|
||||
int sversion = schemaVersion(pSchema);
|
||||
|
||||
if (pBlock->sversion == sversion) {
|
||||
return 0;
|
||||
} else {
|
||||
if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE) { // stream table is not allowed to change schema
|
||||
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (pBlock->sversion > sversion) { // may need to update table schema
|
||||
if (pBlock->schemaLen > 0) {
|
||||
tsdbDebug(
|
||||
"vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, update...",
|
||||
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion);
|
||||
ASSERT(pBlock->schemaLen % sizeof(STColumn) == 0);
|
||||
int numOfCols = pBlock->schemaLen / sizeof(STColumn);
|
||||
STColumn *pTCol = (STColumn *)pBlock->data;
|
||||
|
||||
STSchemaBuilder schemaBuilder = {0};
|
||||
if (tdInitTSchemaBuilder(&schemaBuilder, pBlock->sversion) < 0) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||
tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (int i = 0; i < numOfCols; i++) {
|
||||
if (tdAddColToSchema(&schemaBuilder, pTCol[i].type, htons(pTCol[i].colId), htons(pTCol[i].bytes)) < 0) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||
tstrerror(terrno));
|
||||
tdDestroyTSchemaBuilder(&schemaBuilder);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
STSchema *pNSchema = tdGetSchemaFromBuilder(&schemaBuilder);
|
||||
if (pNSchema == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
tdDestroyTSchemaBuilder(&schemaBuilder);
|
||||
return -1;
|
||||
}
|
||||
|
||||
tdDestroyTSchemaBuilder(&schemaBuilder);
|
||||
tsdbUpdateTableSchema(pRepo, pTable, pNSchema, true);
|
||||
} else {
|
||||
tsdbDebug(
|
||||
"vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, reconfigure...",
|
||||
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion);
|
||||
terrno = TSDB_CODE_TDB_TABLE_RECONFIGURE;
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
ASSERT(pBlock->sversion >= 0);
|
||||
if (tsdbGetTableSchemaImpl(pTable, false, false, pBlock->sversion) == NULL) {
|
||||
tsdbError("vgId:%d invalid submit schema version %d to table %s tid %d from client", REPO_ID(pRepo),
|
||||
pBlock->sversion, TABLE_CHAR_NAME(pTable), TABLE_TID(pTable));
|
||||
}
|
||||
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) {
|
||||
ASSERT(pMsg != NULL);
|
||||
STsdbMeta * pMeta = pRepo->tsdbMeta;
|
||||
SSubmitMsgIter msgIter = {0};
|
||||
SSubmitBlk * pBlock = NULL;
|
||||
|
||||
terrno = TSDB_CODE_SUCCESS;
|
||||
pMsg->length = htonl(pMsg->length);
|
||||
pMsg->numOfBlocks = htonl(pMsg->numOfBlocks);
|
||||
|
||||
if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) return -1;
|
||||
while (true) {
|
||||
if (tsdbGetSubmitMsgNext(&msgIter, &pBlock) < 0) return -1;
|
||||
if (pBlock == NULL) break;
|
||||
|
||||
pBlock->uid = htobe64(pBlock->uid);
|
||||
pBlock->tid = htonl(pBlock->tid);
|
||||
pBlock->sversion = htonl(pBlock->sversion);
|
||||
pBlock->dataLen = htonl(pBlock->dataLen);
|
||||
pBlock->schemaLen = htonl(pBlock->schemaLen);
|
||||
pBlock->numOfRows = htons(pBlock->numOfRows);
|
||||
|
||||
if (pBlock->tid <= 0 || pBlock->tid >= pMeta->maxTables) {
|
||||
tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid,
|
||||
pBlock->tid);
|
||||
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
|
||||
return -1;
|
||||
}
|
||||
|
||||
STable *pTable = pMeta->tables[pBlock->tid];
|
||||
if (pTable == NULL || TABLE_UID(pTable) != pBlock->uid) {
|
||||
tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid,
|
||||
pBlock->tid);
|
||||
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
|
||||
tsdbError("vgId:%d invalid action trying to insert a super table %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable));
|
||||
terrno = TSDB_CODE_TDB_INVALID_ACTION;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Check schema version and update schema if needed
|
||||
if (tsdbCheckTableSchema(pRepo, pBlock, pTable) < 0) {
|
||||
if (terrno == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
|
||||
continue;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (terrno != TSDB_CODE_SUCCESS) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks) {
|
||||
// TODO
|
||||
// STsdbCache *pCache = pRepo->tsdbCache;
|
||||
|
|
|
@ -18,117 +18,56 @@
|
|||
|
||||
#define TSDB_DATA_SKIPLIST_LEVEL 5
|
||||
|
||||
static void tsdbFreeBytes(STsdbRepo *pRepo, void *ptr, int bytes);
|
||||
static SMemTable * tsdbNewMemTable(STsdbRepo *pRepo);
|
||||
static void tsdbFreeMemTable(SMemTable *pMemTable);
|
||||
static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable);
|
||||
static void tsdbFreeTableData(STableData *pTableData);
|
||||
static char * tsdbGetTsTupleKey(const void *data);
|
||||
static int tsdbCommitMeta(STsdbRepo *pRepo);
|
||||
static void tsdbEndCommit(STsdbRepo *pRepo);
|
||||
static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey);
|
||||
static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols);
|
||||
static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo);
|
||||
static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables);
|
||||
static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables);
|
||||
static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SDataRow row);
|
||||
static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter);
|
||||
static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter);
|
||||
static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg);
|
||||
static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *affectedrows);
|
||||
static int tsdbCopyRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable, void **ppRow);
|
||||
static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter);
|
||||
static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock);
|
||||
static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable);
|
||||
static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **rows, int rowCounter);
|
||||
static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter);
|
||||
|
||||
// ---------------- INTERNAL FUNCTIONS ----------------
|
||||
int tsdbUpdateRowInMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) {
|
||||
STsdbCfg * pCfg = &pRepo->config;
|
||||
STsdbMeta * pMeta = pRepo->tsdbMeta;
|
||||
TKEY tkey = dataRowTKey(row);
|
||||
TSKEY key = dataRowKey(row);
|
||||
SMemTable * pMemTable = pRepo->mem;
|
||||
STableData *pTableData = NULL;
|
||||
bool isRowDelete = TKEY_IS_DELETED(tkey);
|
||||
static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SDataRow row, TSKEY minKey, TSKEY maxKey,
|
||||
TSKEY now);
|
||||
|
||||
if (isRowDelete) {
|
||||
if (!pCfg->update) {
|
||||
tsdbWarn("vgId:%d vnode is not allowed to update but try to delete a data row", REPO_ID(pRepo));
|
||||
terrno = TSDB_CODE_TDB_INVALID_ACTION;
|
||||
return -1;
|
||||
int32_t tsdbInsertData(TSDB_REPO_T *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *pRsp) {
|
||||
STsdbRepo * pRepo = (STsdbRepo *)repo;
|
||||
SSubmitMsgIter msgIter = {0};
|
||||
SSubmitBlk * pBlock = NULL;
|
||||
int32_t affectedrows = 0;
|
||||
|
||||
if (tsdbScanAndConvertSubmitMsg(pRepo, pMsg) < 0) {
|
||||
if (terrno != TSDB_CODE_TDB_TABLE_RECONFIGURE) {
|
||||
tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
}
|
||||
|
||||
if (key > TABLE_LASTKEY(pTable)) {
|
||||
tsdbTrace("vgId:%d skip to delete row key %" PRId64 " which is larger than table lastKey %" PRId64,
|
||||
REPO_ID(pRepo), key, TABLE_LASTKEY(pTable));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void *pRow = tsdbAllocBytes(pRepo, dataRowLen(row));
|
||||
if (pRow == NULL) {
|
||||
tsdbError("vgId:%d failed to insert row with key %" PRId64 " to table %s while allocate %d bytes since %s",
|
||||
REPO_ID(pRepo), key, TABLE_CHAR_NAME(pTable), dataRowLen(row), tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
dataRowCpy(pRow, row);
|
||||
|
||||
// Operations above may change pRepo->mem, retake those values
|
||||
ASSERT(pRepo->mem != NULL);
|
||||
pMemTable = pRepo->mem;
|
||||
|
||||
if (TABLE_TID(pTable) >= pMemTable->maxTables) {
|
||||
if (tsdbAdjustMemMaxTables(pMemTable, pMeta->maxTables) < 0) {
|
||||
tsdbFreeBytes(pRepo, pRow, dataRowLen(row));
|
||||
tsdbInitSubmitMsgIter(pMsg, &msgIter);
|
||||
while (true) {
|
||||
tsdbGetSubmitMsgNext(&msgIter, &pBlock);
|
||||
if (pBlock == NULL) break;
|
||||
if (tsdbInsertDataToTable(pRepo, pBlock, &affectedrows) < 0) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
pTableData = pMemTable->tData[TABLE_TID(pTable)];
|
||||
|
||||
if (pTableData == NULL || pTableData->uid != TABLE_UID(pTable)) {
|
||||
if (pTableData != NULL) {
|
||||
taosWLockLatch(&(pMemTable->latch));
|
||||
pMemTable->tData[TABLE_TID(pTable)] = NULL;
|
||||
tsdbFreeTableData(pTableData);
|
||||
taosWUnLockLatch(&(pMemTable->latch));
|
||||
}
|
||||
|
||||
pTableData = tsdbNewTableData(pCfg, pTable);
|
||||
if (pTableData == NULL) {
|
||||
tsdbError("vgId:%d failed to insert row with key %" PRId64
|
||||
" to table %s while create new table data object since %s",
|
||||
REPO_ID(pRepo), key, TABLE_CHAR_NAME(pTable), tstrerror(terrno));
|
||||
tsdbFreeBytes(pRepo, (void *)pRow, dataRowLen(row));
|
||||
return -1;
|
||||
}
|
||||
|
||||
pRepo->mem->tData[TABLE_TID(pTable)] = pTableData;
|
||||
}
|
||||
|
||||
ASSERT((pTableData != NULL) && pTableData->uid == TABLE_UID(pTable));
|
||||
|
||||
int64_t oldSize = SL_SIZE(pTableData->pData);
|
||||
if (tSkipListPut(pTableData->pData, pRow) == NULL) {
|
||||
tsdbFreeBytes(pRepo, (void *)pRow, dataRowLen(row));
|
||||
} else {
|
||||
int64_t deltaSize = SL_SIZE(pTableData->pData) - oldSize;
|
||||
if (isRowDelete) {
|
||||
if (TABLE_LASTKEY(pTable) == key) {
|
||||
// TODO: need to update table last key here (may from file)
|
||||
}
|
||||
} else {
|
||||
if (TABLE_LASTKEY(pTable) < key) TABLE_LASTKEY(pTable) = key;
|
||||
}
|
||||
|
||||
if (pMemTable->keyFirst > key) pMemTable->keyFirst = key;
|
||||
if (pMemTable->keyLast < key) pMemTable->keyLast = key;
|
||||
pMemTable->numOfRows += deltaSize;
|
||||
|
||||
if (pTableData->keyFirst > key) pTableData->keyFirst = key;
|
||||
if (pTableData->keyLast < key) pTableData->keyLast = key;
|
||||
pTableData->numOfRows += deltaSize;
|
||||
}
|
||||
|
||||
tsdbTrace("vgId:%d a row is %s table %s tid %d uid %" PRIu64 " key %" PRIu64, REPO_ID(pRepo),
|
||||
isRowDelete ? "deleted from" : "updated in", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable),
|
||||
key);
|
||||
if (pRsp != NULL) pRsp->affectedRows = htonl(affectedrows);
|
||||
|
||||
if (tsdbCheckCommit(pRepo) < 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ---------------- INTERNAL FUNCTIONS ----------------
|
||||
int tsdbRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) {
|
||||
if (pMemTable == NULL) return 0;
|
||||
int ref = T_REF_INC(pMemTable);
|
||||
|
@ -152,7 +91,7 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) {
|
|||
}
|
||||
int code = pthread_cond_signal(&pBufPool->poolNotEmpty);
|
||||
if (code != 0) {
|
||||
tsdbUnlockRepo(pRepo);
|
||||
if (tsdbUnlockRepo(pRepo) < 0) return -1;
|
||||
tsdbError("vgId:%d failed to signal pool not empty since %s", REPO_ID(pRepo), strerror(code));
|
||||
terrno = TAOS_SYSTEM_ERROR(code);
|
||||
return -1;
|
||||
|
@ -189,6 +128,8 @@ int tsdbTakeMemSnapshot(STsdbRepo *pRepo, SMemTable **pMem, SMemTable **pIMem) {
|
|||
}
|
||||
|
||||
void tsdbUnTakeMemSnapShot(STsdbRepo *pRepo, SMemTable *pMem, SMemTable *pIMem) {
|
||||
tsdbDebug("vgId:%d untake memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), pMem, pIMem);
|
||||
|
||||
if (pMem != NULL) {
|
||||
taosRUnLockLatch(&(pMem->latch));
|
||||
tsdbUnRefMemTable(pRepo, pMem);
|
||||
|
@ -197,8 +138,6 @@ void tsdbUnTakeMemSnapShot(STsdbRepo *pRepo, SMemTable *pMem, SMemTable *pIMem)
|
|||
if (pIMem != NULL) {
|
||||
tsdbUnRefMemTable(pRepo, pIMem);
|
||||
}
|
||||
|
||||
tsdbDebug("vgId:%d untake memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), pMem, pIMem);
|
||||
}
|
||||
|
||||
void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) {
|
||||
|
@ -230,6 +169,10 @@ void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) {
|
|||
ASSERT(pRepo->mem->extraBuffList != NULL);
|
||||
SListNode *pNode = (SListNode *)malloc(sizeof(SListNode) + bytes);
|
||||
if (pNode == NULL) {
|
||||
if (listNEles(pRepo->mem->extraBuffList) == 0) {
|
||||
tdListFree(pRepo->mem->extraBuffList);
|
||||
pRepo->mem->extraBuffList = NULL;
|
||||
}
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
|
@ -260,19 +203,23 @@ void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) {
|
|||
}
|
||||
|
||||
int tsdbAsyncCommit(STsdbRepo *pRepo) {
|
||||
if (pRepo->mem == NULL) return 0;
|
||||
|
||||
SMemTable *pIMem = pRepo->imem;
|
||||
|
||||
if (pRepo->mem != NULL) {
|
||||
sem_wait(&(pRepo->readyToCommit));
|
||||
sem_wait(&(pRepo->readyToCommit));
|
||||
|
||||
if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_START);
|
||||
if (tsdbLockRepo(pRepo) < 0) return -1;
|
||||
pRepo->imem = pRepo->mem;
|
||||
pRepo->mem = NULL;
|
||||
tsdbScheduleCommit(pRepo);
|
||||
if (tsdbUnlockRepo(pRepo) < 0) return -1;
|
||||
if (pRepo->code != TSDB_CODE_SUCCESS) {
|
||||
tsdbWarn("vgId:%d try to commit when TSDB not in good state: %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
}
|
||||
|
||||
if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_START, TSDB_CODE_SUCCESS);
|
||||
if (tsdbLockRepo(pRepo) < 0) return -1;
|
||||
pRepo->imem = pRepo->mem;
|
||||
pRepo->mem = NULL;
|
||||
tsdbScheduleCommit(pRepo);
|
||||
if (tsdbUnlockRepo(pRepo) < 0) return -1;
|
||||
|
||||
if (tsdbUnRefMemTable(pRepo, pIMem) < 0) return -1;
|
||||
|
||||
return 0;
|
||||
|
@ -280,10 +227,18 @@ int tsdbAsyncCommit(STsdbRepo *pRepo) {
|
|||
|
||||
int tsdbSyncCommit(TSDB_REPO_T *repo) {
|
||||
STsdbRepo *pRepo = (STsdbRepo *)repo;
|
||||
|
||||
tsdbAsyncCommit(pRepo);
|
||||
sem_wait(&(pRepo->readyToCommit));
|
||||
sem_post(&(pRepo->readyToCommit));
|
||||
return 0;
|
||||
|
||||
if (pRepo->code != TSDB_CODE_SUCCESS) {
|
||||
terrno = pRepo->code;
|
||||
return -1;
|
||||
} else {
|
||||
terrno = TSDB_CODE_SUCCESS;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -406,88 +361,7 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey
|
|||
return 0;
|
||||
}
|
||||
|
||||
void *tsdbCommitData(STsdbRepo *pRepo) {
|
||||
SMemTable * pMem = pRepo->imem;
|
||||
STsdbCfg * pCfg = &pRepo->config;
|
||||
SDataCols * pDataCols = NULL;
|
||||
STsdbMeta * pMeta = pRepo->tsdbMeta;
|
||||
SCommitIter *iters = NULL;
|
||||
SRWHelper whelper = {0};
|
||||
ASSERT(pMem != NULL);
|
||||
|
||||
tsdbInfo("vgId:%d start to commit! keyFirst %" PRId64 " keyLast %" PRId64 " numOfRows %" PRId64, REPO_ID(pRepo),
|
||||
pMem->keyFirst, pMem->keyLast, pMem->numOfRows);
|
||||
|
||||
// Create the iterator to read from cache
|
||||
if (pMem->numOfRows > 0) {
|
||||
iters = tsdbCreateCommitIters(pRepo);
|
||||
if (iters == NULL) {
|
||||
tsdbError("vgId:%d failed to create commit iterator since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
if (tsdbInitWriteHelper(&whelper, pRepo) < 0) {
|
||||
tsdbError("vgId:%d failed to init write helper since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
if ((pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pCfg->maxRowsPerFileBlock)) == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
tsdbError("vgId:%d failed to init data cols with maxRowBytes %d maxCols %d maxRowsPerFileBlock %d since %s",
|
||||
REPO_ID(pRepo), pMeta->maxCols, pMeta->maxRowBytes, pCfg->maxRowsPerFileBlock, tstrerror(terrno));
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
int sfid = (int)(TSDB_KEY_FILEID(pMem->keyFirst, pCfg->daysPerFile, pCfg->precision));
|
||||
int efid = (int)(TSDB_KEY_FILEID(pMem->keyLast, pCfg->daysPerFile, pCfg->precision));
|
||||
|
||||
// Loop to commit to each file
|
||||
for (int fid = sfid; fid <= efid; fid++) {
|
||||
if (tsdbCommitToFile(pRepo, fid, iters, &whelper, pDataCols) < 0) {
|
||||
tsdbError("vgId:%d failed to commit to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
|
||||
goto _exit;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Commit to update meta file
|
||||
if (tsdbCommitMeta(pRepo) < 0) {
|
||||
tsdbError("vgId:%d failed to commit data while committing meta data since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
tsdbFitRetention(pRepo);
|
||||
|
||||
_exit:
|
||||
tdFreeDataCols(pDataCols);
|
||||
tsdbDestroyCommitIters(iters, pMem->maxTables);
|
||||
tsdbDestroyHelper(&whelper);
|
||||
tsdbInfo("vgId:%d commit over", pRepo->config.tsdbId);
|
||||
tsdbEndCommit(pRepo);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// ---------------- LOCAL FUNCTIONS ----------------
|
||||
static void tsdbFreeBytes(STsdbRepo *pRepo, void *ptr, int bytes) {
|
||||
ASSERT(pRepo->mem != NULL);
|
||||
if (pRepo->mem->extraBuffList == NULL) {
|
||||
STsdbBufBlock *pBufBlock = tsdbGetCurrBufBlock(pRepo);
|
||||
ASSERT(pBufBlock != NULL);
|
||||
pBufBlock->offset -= bytes;
|
||||
pBufBlock->remain += bytes;
|
||||
ASSERT(ptr == POINTER_SHIFT(pBufBlock->data, pBufBlock->offset));
|
||||
tsdbTrace("vgId:%d free %d bytes to TSDB buffer pool, nBlocks %d offset %d remain %d", REPO_ID(pRepo), bytes,
|
||||
listNEles(pRepo->mem->bufBlockList), pBufBlock->offset, pBufBlock->remain);
|
||||
} else {
|
||||
SListNode *pNode = (SListNode *)POINTER_SHIFT(ptr, -(int)(sizeof(SListNode)));
|
||||
ASSERT(listTail(pRepo->mem->extraBuffList) == pNode);
|
||||
tdListPopNode(pRepo->mem->extraBuffList, pNode);
|
||||
free(pNode);
|
||||
tsdbTrace("vgId:%d free %d bytes to SYSTEM buffer pool", REPO_ID(pRepo), bytes);
|
||||
}
|
||||
}
|
||||
|
||||
static SMemTable* tsdbNewMemTable(STsdbRepo *pRepo) {
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
|
||||
|
@ -578,240 +452,11 @@ static void tsdbFreeTableData(STableData *pTableData) {
|
|||
|
||||
static char *tsdbGetTsTupleKey(const void *data) { return dataRowTuple((SDataRow)data); }
|
||||
|
||||
|
||||
static int tsdbCommitMeta(STsdbRepo *pRepo) {
|
||||
SMemTable *pMem = pRepo->imem;
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
SActObj * pAct = NULL;
|
||||
SActCont * pCont = NULL;
|
||||
|
||||
if (listNEles(pMem->actList) > 0) {
|
||||
if (tdKVStoreStartCommit(pMeta->pStore) < 0) {
|
||||
tsdbError("vgId:%d failed to commit data while start commit meta since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
SListNode *pNode = NULL;
|
||||
|
||||
while ((pNode = tdListPopHead(pMem->actList)) != NULL) {
|
||||
pAct = (SActObj *)pNode->data;
|
||||
if (pAct->act == TSDB_UPDATE_META) {
|
||||
pCont = (SActCont *)POINTER_SHIFT(pAct, sizeof(SActObj));
|
||||
if (tdUpdateKVStoreRecord(pMeta->pStore, pAct->uid, (void *)(pCont->cont), pCont->len) < 0) {
|
||||
tsdbError("vgId:%d failed to update meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
|
||||
tstrerror(terrno));
|
||||
tdKVStoreEndCommit(pMeta->pStore);
|
||||
goto _err;
|
||||
}
|
||||
} else if (pAct->act == TSDB_DROP_META) {
|
||||
if (tdDropKVStoreRecord(pMeta->pStore, pAct->uid) < 0) {
|
||||
tsdbError("vgId:%d failed to drop meta with uid %" PRIu64 " since %s", REPO_ID(pRepo), pAct->uid,
|
||||
tstrerror(terrno));
|
||||
tdKVStoreEndCommit(pMeta->pStore);
|
||||
goto _err;
|
||||
}
|
||||
} else {
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
|
||||
if (tdKVStoreEndCommit(pMeta->pStore) < 0) {
|
||||
tsdbError("vgId:%d failed to commit data while end commit meta since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
_err:
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void tsdbEndCommit(STsdbRepo *pRepo) {
|
||||
if (pRepo->appH.notifyStatus) pRepo->appH.notifyStatus(pRepo->appH.appH, TSDB_STATUS_COMMIT_OVER);
|
||||
sem_post(&(pRepo->readyToCommit));
|
||||
}
|
||||
|
||||
static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey) {
|
||||
for (int i = 0; i < nIters; i++) {
|
||||
TSKEY nextKey = tsdbNextIterKey((iters + i)->pIter);
|
||||
if (nextKey != TSDB_DATA_TIMESTAMP_NULL && (nextKey >= minKey && nextKey <= maxKey)) return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tsdbGetFidKeyRange(int daysPerFile, int8_t precision, int fileId, TSKEY *minKey, TSKEY *maxKey) {
|
||||
*minKey = fileId * daysPerFile * tsMsPerDay[precision];
|
||||
*maxKey = *minKey + daysPerFile * tsMsPerDay[precision] - 1;
|
||||
}
|
||||
|
||||
static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols) {
|
||||
char * dataDir = NULL;
|
||||
STsdbCfg * pCfg = &pRepo->config;
|
||||
STsdbFileH *pFileH = pRepo->tsdbFileH;
|
||||
SFileGroup *pGroup = NULL;
|
||||
SMemTable * pMem = pRepo->imem;
|
||||
bool newLast = false;
|
||||
|
||||
TSKEY minKey = 0, maxKey = 0;
|
||||
tsdbGetFidKeyRange(pCfg->daysPerFile, pCfg->precision, fid, &minKey, &maxKey);
|
||||
|
||||
// Check if there are data to commit to this file
|
||||
int hasDataToCommit = tsdbHasDataToCommit(iters, pMem->maxTables, minKey, maxKey);
|
||||
if (!hasDataToCommit) {
|
||||
tsdbDebug("vgId:%d no data to commit to file %d", REPO_ID(pRepo), fid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Create and open files for commit
|
||||
dataDir = tsdbGetDataDirName(pRepo->rootDir);
|
||||
if (dataDir == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((pGroup = tsdbCreateFGroupIfNeed(pRepo, dataDir, fid)) == NULL) {
|
||||
tsdbError("vgId:%d failed to create file group %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
// Open files for write/read
|
||||
if (tsdbSetAndOpenHelperFile(pHelper, pGroup) < 0) {
|
||||
tsdbError("vgId:%d failed to set helper file since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
newLast = TSDB_NLAST_FILE_OPENED(pHelper);
|
||||
|
||||
if (tsdbLoadCompIdx(pHelper, NULL) < 0) {
|
||||
tsdbError("vgId:%d failed to load SCompIdx part since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
// Loop to commit data in each table
|
||||
for (int tid = 1; tid < pMem->maxTables; tid++) {
|
||||
SCommitIter *pIter = iters + tid;
|
||||
if (pIter->pTable == NULL) continue;
|
||||
|
||||
taosRLockLatch(&(pIter->pTable->latch));
|
||||
|
||||
if (tsdbSetHelperTable(pHelper, pIter->pTable, pRepo) < 0) goto _err;
|
||||
|
||||
if (pIter->pIter != NULL) {
|
||||
if (tdInitDataCols(pDataCols, tsdbGetTableSchemaImpl(pIter->pTable, false, false, -1)) < 0) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if (tsdbCommitTableData(pHelper, pIter, pDataCols, maxKey) < 0) {
|
||||
taosRUnLockLatch(&(pIter->pTable->latch));
|
||||
tsdbError("vgId:%d failed to write data of table %s tid %d uid %" PRIu64 " since %s", REPO_ID(pRepo),
|
||||
TABLE_CHAR_NAME(pIter->pTable), TABLE_TID(pIter->pTable), TABLE_UID(pIter->pTable),
|
||||
tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
taosRUnLockLatch(&(pIter->pTable->latch));
|
||||
|
||||
// Move the last block to the new .l file if neccessary
|
||||
if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) {
|
||||
tsdbError("vgId:%d, failed to move last block, since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
// Write the SCompBlock part
|
||||
if (tsdbWriteCompInfo(pHelper) < 0) {
|
||||
tsdbError("vgId:%d, failed to write compInfo part since %s", REPO_ID(pRepo), tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
if (tsdbWriteCompIdx(pHelper) < 0) {
|
||||
tsdbError("vgId:%d failed to write compIdx part to file %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
|
||||
goto _err;
|
||||
}
|
||||
|
||||
tfree(dataDir);
|
||||
tsdbCloseHelperFile(pHelper, 0, pGroup);
|
||||
|
||||
pthread_rwlock_wrlock(&(pFileH->fhlock));
|
||||
|
||||
(void)rename(helperNewHeadF(pHelper)->fname, helperHeadF(pHelper)->fname);
|
||||
pGroup->files[TSDB_FILE_TYPE_HEAD].info = helperNewHeadF(pHelper)->info;
|
||||
|
||||
if (newLast) {
|
||||
(void)rename(helperNewLastF(pHelper)->fname, helperLastF(pHelper)->fname);
|
||||
pGroup->files[TSDB_FILE_TYPE_LAST].info = helperNewLastF(pHelper)->info;
|
||||
} else {
|
||||
pGroup->files[TSDB_FILE_TYPE_LAST].info = helperLastF(pHelper)->info;
|
||||
}
|
||||
|
||||
pGroup->files[TSDB_FILE_TYPE_DATA].info = helperDataF(pHelper)->info;
|
||||
|
||||
pthread_rwlock_unlock(&(pFileH->fhlock));
|
||||
|
||||
return 0;
|
||||
|
||||
_err:
|
||||
tfree(dataDir);
|
||||
tsdbCloseHelperFile(pHelper, 1, NULL);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) {
|
||||
SMemTable *pMem = pRepo->imem;
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
|
||||
SCommitIter *iters = (SCommitIter *)calloc(pMem->maxTables, sizeof(SCommitIter));
|
||||
if (iters == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (tsdbRLockRepoMeta(pRepo) < 0) goto _err;
|
||||
|
||||
// reference all tables
|
||||
for (int i = 0; i < pMem->maxTables; i++) {
|
||||
if (pMeta->tables[i] != NULL) {
|
||||
tsdbRefTable(pMeta->tables[i]);
|
||||
iters[i].pTable = pMeta->tables[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (tsdbUnlockRepoMeta(pRepo) < 0) goto _err;
|
||||
|
||||
for (int i = 0; i < pMem->maxTables; i++) {
|
||||
if ((iters[i].pTable != NULL) && (pMem->tData[i] != NULL) && (TABLE_UID(iters[i].pTable) == pMem->tData[i]->uid)) {
|
||||
if ((iters[i].pIter = tSkipListCreateIter(pMem->tData[i]->pData)) == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
tSkipListIterNext(iters[i].pIter);
|
||||
}
|
||||
}
|
||||
|
||||
return iters;
|
||||
|
||||
_err:
|
||||
tsdbDestroyCommitIters(iters, pMem->maxTables);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables) {
|
||||
if (iters == NULL) return;
|
||||
|
||||
for (int i = 1; i < maxTables; i++) {
|
||||
if (iters[i].pTable != NULL) {
|
||||
tsdbUnRefTable(iters[i].pTable);
|
||||
tSkipListDestroyIter(iters[i].pIter);
|
||||
}
|
||||
}
|
||||
|
||||
free(iters);
|
||||
}
|
||||
|
||||
static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables) {
|
||||
ASSERT(pMemTable->maxTables < maxTables);
|
||||
|
||||
|
@ -848,4 +493,400 @@ static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema *
|
|||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter) {
|
||||
if (pBlock->dataLen <= 0) return -1;
|
||||
pIter->totalLen = pBlock->dataLen;
|
||||
pIter->len = 0;
|
||||
pIter->row = (SDataRow)(pBlock->data+pBlock->schemaLen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) {
|
||||
SDataRow row = pIter->row;
|
||||
if (row == NULL) return NULL;
|
||||
|
||||
pIter->len += dataRowLen(row);
|
||||
if (pIter->len >= pIter->totalLen) {
|
||||
pIter->row = NULL;
|
||||
} else {
|
||||
pIter->row = (char *)row + dataRowLen(row);
|
||||
}
|
||||
|
||||
return row;
|
||||
}
|
||||
|
||||
static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SDataRow row, TSKEY minKey, TSKEY maxKey,
|
||||
TSKEY now) {
|
||||
if (dataRowKey(row) < minKey || dataRowKey(row) > maxKey) {
|
||||
tsdbError("vgId:%d table %s tid %d uid %" PRIu64 " timestamp is out of range! now %" PRId64 " minKey %" PRId64
|
||||
" maxKey %" PRId64 " row key %" PRId64,
|
||||
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), now, minKey, maxKey,
|
||||
dataRowKey(row));
|
||||
terrno = TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) {
|
||||
ASSERT(pMsg != NULL);
|
||||
STsdbMeta * pMeta = pRepo->tsdbMeta;
|
||||
SSubmitMsgIter msgIter = {0};
|
||||
SSubmitBlk * pBlock = NULL;
|
||||
SSubmitBlkIter blkIter = {0};
|
||||
SDataRow row = NULL;
|
||||
TSKEY now = taosGetTimestamp(pRepo->config.precision);
|
||||
TSKEY minKey = now - tsMsPerDay[pRepo->config.precision] * pRepo->config.keep;
|
||||
TSKEY maxKey = now + tsMsPerDay[pRepo->config.precision] * pRepo->config.daysPerFile;
|
||||
|
||||
terrno = TSDB_CODE_SUCCESS;
|
||||
pMsg->length = htonl(pMsg->length);
|
||||
pMsg->numOfBlocks = htonl(pMsg->numOfBlocks);
|
||||
|
||||
if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) return -1;
|
||||
while (true) {
|
||||
if (tsdbGetSubmitMsgNext(&msgIter, &pBlock) < 0) return -1;
|
||||
if (pBlock == NULL) break;
|
||||
|
||||
pBlock->uid = htobe64(pBlock->uid);
|
||||
pBlock->tid = htonl(pBlock->tid);
|
||||
pBlock->sversion = htonl(pBlock->sversion);
|
||||
pBlock->dataLen = htonl(pBlock->dataLen);
|
||||
pBlock->schemaLen = htonl(pBlock->schemaLen);
|
||||
pBlock->numOfRows = htons(pBlock->numOfRows);
|
||||
|
||||
if (pBlock->tid <= 0 || pBlock->tid >= pMeta->maxTables) {
|
||||
tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid,
|
||||
pBlock->tid);
|
||||
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
|
||||
return -1;
|
||||
}
|
||||
|
||||
STable *pTable = pMeta->tables[pBlock->tid];
|
||||
if (pTable == NULL || TABLE_UID(pTable) != pBlock->uid) {
|
||||
tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid,
|
||||
pBlock->tid);
|
||||
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
|
||||
tsdbError("vgId:%d invalid action trying to insert a super table %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable));
|
||||
terrno = TSDB_CODE_TDB_INVALID_ACTION;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Check schema version and update schema if needed
|
||||
if (tsdbCheckTableSchema(pRepo, pBlock, pTable) < 0) {
|
||||
if (terrno == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
|
||||
continue;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
tsdbInitSubmitBlkIter(pBlock, &blkIter);
|
||||
while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) {
|
||||
if (tsdbCheckRowRange(pRepo, pTable, row, minKey, maxKey, now) < 0) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (terrno != TSDB_CODE_SUCCESS) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *affectedrows) {
|
||||
STsdbMeta * pMeta = pRepo->tsdbMeta;
|
||||
int64_t points = 0;
|
||||
STable * pTable = NULL;
|
||||
SSubmitBlkIter blkIter = {0};
|
||||
SDataRow row = NULL;
|
||||
void ** rows = NULL;
|
||||
int rowCounter = 0;
|
||||
|
||||
ASSERT(pBlock->tid < pMeta->maxTables);
|
||||
pTable = pMeta->tables[pBlock->tid];
|
||||
ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid);
|
||||
|
||||
rows = (void **)calloc(pBlock->numOfRows, sizeof(void *));
|
||||
if (rows == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
tsdbInitSubmitBlkIter(pBlock, &blkIter);
|
||||
while ((row = tsdbGetSubmitBlkNext(&blkIter)) != NULL) {
|
||||
if (tsdbCopyRowToMem(pRepo, row, pTable, &(rows[rowCounter])) < 0) {
|
||||
tsdbFreeRows(pRepo, rows, rowCounter);
|
||||
goto _err;
|
||||
}
|
||||
|
||||
(*affectedrows)++;
|
||||
points++;
|
||||
|
||||
if (rows[rowCounter] != NULL) {
|
||||
rowCounter++;
|
||||
}
|
||||
}
|
||||
|
||||
if (tsdbInsertDataToTableImpl(pRepo, pTable, rows, rowCounter) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
STSchema *pSchema = tsdbGetTableSchemaByVersion(pTable, pBlock->sversion);
|
||||
pRepo->stat.pointsWritten += points * schemaNCols(pSchema);
|
||||
pRepo->stat.totalStorage += points * schemaVLen(pSchema);
|
||||
|
||||
free(rows);
|
||||
return 0;
|
||||
|
||||
_err:
|
||||
free(rows);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int tsdbCopyRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable, void **ppRow) {
|
||||
STsdbCfg * pCfg = &pRepo->config;
|
||||
TKEY tkey = dataRowTKey(row);
|
||||
TSKEY key = dataRowKey(row);
|
||||
bool isRowDelete = TKEY_IS_DELETED(tkey);
|
||||
|
||||
if (isRowDelete) {
|
||||
if (!pCfg->update) {
|
||||
tsdbWarn("vgId:%d vnode is not allowed to update but try to delete a data row", REPO_ID(pRepo));
|
||||
terrno = TSDB_CODE_TDB_INVALID_ACTION;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (key > TABLE_LASTKEY(pTable)) {
|
||||
tsdbTrace("vgId:%d skip to delete row key %" PRId64 " which is larger than table lastKey %" PRId64,
|
||||
REPO_ID(pRepo), key, TABLE_LASTKEY(pTable));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void *pRow = tsdbAllocBytes(pRepo, dataRowLen(row));
|
||||
if (pRow == NULL) {
|
||||
tsdbError("vgId:%d failed to insert row with key %" PRId64 " to table %s while allocate %d bytes since %s",
|
||||
REPO_ID(pRepo), key, TABLE_CHAR_NAME(pTable), dataRowLen(row), tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
dataRowCpy(pRow, row);
|
||||
ppRow[0] = pRow;
|
||||
|
||||
tsdbTrace("vgId:%d a row is %s table %s tid %d uid %" PRIu64 " key %" PRIu64, REPO_ID(pRepo),
|
||||
isRowDelete ? "deleted from" : "updated in", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable),
|
||||
key);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter) {
|
||||
if (pMsg == NULL) {
|
||||
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
|
||||
return -1;
|
||||
}
|
||||
|
||||
pIter->totalLen = pMsg->length;
|
||||
pIter->len = 0;
|
||||
pIter->pMsg = pMsg;
|
||||
if (pMsg->length <= TSDB_SUBMIT_MSG_HEAD_SIZE) {
|
||||
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock) {
|
||||
if (pIter->len == 0) {
|
||||
pIter->len += TSDB_SUBMIT_MSG_HEAD_SIZE;
|
||||
} else {
|
||||
SSubmitBlk *pSubmitBlk = (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len);
|
||||
pIter->len += (sizeof(SSubmitBlk) + pSubmitBlk->dataLen + pSubmitBlk->schemaLen);
|
||||
}
|
||||
|
||||
if (pIter->len > pIter->totalLen) {
|
||||
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
|
||||
*pPBlock = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
*pPBlock = (pIter->len == pIter->totalLen) ? NULL : (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable) {
|
||||
ASSERT(pTable != NULL);
|
||||
|
||||
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
|
||||
int sversion = schemaVersion(pSchema);
|
||||
|
||||
if (pBlock->sversion == sversion) {
|
||||
return 0;
|
||||
} else {
|
||||
if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE) { // stream table is not allowed to change schema
|
||||
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (pBlock->sversion > sversion) { // may need to update table schema
|
||||
if (pBlock->schemaLen > 0) {
|
||||
tsdbDebug(
|
||||
"vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, update...",
|
||||
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion);
|
||||
ASSERT(pBlock->schemaLen % sizeof(STColumn) == 0);
|
||||
int numOfCols = pBlock->schemaLen / sizeof(STColumn);
|
||||
STColumn *pTCol = (STColumn *)pBlock->data;
|
||||
|
||||
STSchemaBuilder schemaBuilder = {0};
|
||||
if (tdInitTSchemaBuilder(&schemaBuilder, pBlock->sversion) < 0) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||
tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (int i = 0; i < numOfCols; i++) {
|
||||
if (tdAddColToSchema(&schemaBuilder, pTCol[i].type, htons(pTCol[i].colId), htons(pTCol[i].bytes)) < 0) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
|
||||
tstrerror(terrno));
|
||||
tdDestroyTSchemaBuilder(&schemaBuilder);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
STSchema *pNSchema = tdGetSchemaFromBuilder(&schemaBuilder);
|
||||
if (pNSchema == NULL) {
|
||||
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
tdDestroyTSchemaBuilder(&schemaBuilder);
|
||||
return -1;
|
||||
}
|
||||
|
||||
tdDestroyTSchemaBuilder(&schemaBuilder);
|
||||
tsdbUpdateTableSchema(pRepo, pTable, pNSchema, true);
|
||||
} else {
|
||||
tsdbDebug(
|
||||
"vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, reconfigure...",
|
||||
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion);
|
||||
terrno = TSDB_CODE_TDB_TABLE_RECONFIGURE;
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
ASSERT(pBlock->sversion >= 0);
|
||||
if (tsdbGetTableSchemaImpl(pTable, false, false, pBlock->sversion) == NULL) {
|
||||
tsdbError("vgId:%d invalid submit schema version %d to table %s tid %d from client", REPO_ID(pRepo),
|
||||
pBlock->sversion, TABLE_CHAR_NAME(pTable), TABLE_TID(pTable));
|
||||
}
|
||||
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **rows, int rowCounter) {
|
||||
if (rowCounter < 1) return 0;
|
||||
|
||||
SMemTable * pMemTable = NULL;
|
||||
STableData *pTableData = NULL;
|
||||
STsdbMeta * pMeta = pRepo->tsdbMeta;
|
||||
STsdbCfg * pCfg = &(pRepo->config);
|
||||
|
||||
ASSERT(pRepo->mem != NULL);
|
||||
pMemTable = pRepo->mem;
|
||||
|
||||
if (TABLE_TID(pTable) >= pMemTable->maxTables) {
|
||||
if (tsdbAdjustMemMaxTables(pMemTable, pMeta->maxTables) < 0) {
|
||||
tsdbFreeRows(pRepo, rows, rowCounter);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
pTableData = pMemTable->tData[TABLE_TID(pTable)];
|
||||
|
||||
if (pTableData == NULL || pTableData->uid != TABLE_UID(pTable)) {
|
||||
if (pTableData != NULL) {
|
||||
taosWLockLatch(&(pMemTable->latch));
|
||||
pMemTable->tData[TABLE_TID(pTable)] = NULL;
|
||||
tsdbFreeTableData(pTableData);
|
||||
taosWUnLockLatch(&(pMemTable->latch));
|
||||
}
|
||||
|
||||
pTableData = tsdbNewTableData(pCfg, pTable);
|
||||
if (pTableData == NULL) {
|
||||
tsdbError("vgId:%d failed to insert data to table %s uid %" PRId64 " tid %d since %s", REPO_ID(pRepo),
|
||||
TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable), tstrerror(terrno));
|
||||
tsdbFreeRows(pRepo, rows, rowCounter);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pRepo->mem->tData[TABLE_TID(pTable)] = pTableData;
|
||||
}
|
||||
|
||||
ASSERT((pTableData != NULL) && pTableData->uid == TABLE_UID(pTable));
|
||||
|
||||
int64_t osize = SL_SIZE(pTableData->pData);
|
||||
tSkipListPutBatch(pTableData->pData, rows, rowCounter);
|
||||
int64_t dsize = SL_SIZE(pTableData->pData) - osize;
|
||||
|
||||
if (pMemTable->keyFirst > dataRowKey(rows[0])) pMemTable->keyFirst = dataRowKey(rows[0]);
|
||||
if (pMemTable->keyLast < dataRowKey(rows[rowCounter - 1])) pMemTable->keyLast = dataRowKey(rows[rowCounter - 1]);
|
||||
pMemTable->numOfRows += dsize;
|
||||
|
||||
if (pTableData->keyFirst > dataRowKey(rows[0])) pTableData->keyFirst = dataRowKey(rows[0]);
|
||||
if (pTableData->keyLast < dataRowKey(rows[rowCounter - 1])) pTableData->keyLast = dataRowKey(rows[rowCounter - 1]);
|
||||
pTableData->numOfRows += dsize;
|
||||
|
||||
// TODO: impl delete row thing
|
||||
if (TABLE_LASTKEY(pTable) < dataRowKey(rows[rowCounter-1])) TABLE_LASTKEY(pTable) = dataRowKey(rows[rowCounter-1]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter) {
|
||||
ASSERT(pRepo->mem != NULL);
|
||||
STsdbBufPool *pBufPool = pRepo->pPool;
|
||||
|
||||
for (int i = rowCounter - 1; i >= 0; --i) {
|
||||
SDataRow row = (SDataRow)rows[i];
|
||||
int bytes = (int)dataRowLen(row);
|
||||
|
||||
if (pRepo->mem->extraBuffList == NULL) {
|
||||
STsdbBufBlock *pBufBlock = tsdbGetCurrBufBlock(pRepo);
|
||||
ASSERT(pBufBlock != NULL && pBufBlock->offset >= bytes);
|
||||
|
||||
pBufBlock->offset -= bytes;
|
||||
pBufBlock->remain += bytes;
|
||||
ASSERT(row == POINTER_SHIFT(pBufBlock->data, pBufBlock->offset));
|
||||
tsdbTrace("vgId:%d free %d bytes to TSDB buffer pool, nBlocks %d offset %d remain %d", REPO_ID(pRepo), bytes,
|
||||
listNEles(pRepo->mem->bufBlockList), pBufBlock->offset, pBufBlock->remain);
|
||||
|
||||
if (pBufBlock->offset == 0) { // return the block to buffer pool
|
||||
tsdbLockRepo(pRepo);
|
||||
SListNode *pNode = tdListPopTail(pRepo->mem->bufBlockList);
|
||||
tdListPrependNode(pBufPool->bufBlockList, pNode);
|
||||
tsdbUnlockRepo(pRepo);
|
||||
}
|
||||
} else {
|
||||
ASSERT(listNEles(pRepo->mem->extraBuffList) > 0);
|
||||
SListNode *pNode = tdListPopTail(pRepo->mem->extraBuffList);
|
||||
ASSERT(row == pNode->data);
|
||||
free(pNode);
|
||||
tsdbTrace("vgId:%d free %d bytes to SYSTEM buffer pool", REPO_ID(pRepo), bytes);
|
||||
|
||||
if (listNEles(pRepo->mem->extraBuffList) == 0) {
|
||||
tdListFree(pRepo->mem->extraBuffList);
|
||||
pRepo->mem->extraBuffList = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1595,7 +1595,7 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
|
|||
tblkIdx++;
|
||||
} else if (oBlock.numOfRows + pMergeInfo->rowsInserted - pMergeInfo->rowsDeleteSucceed == 0) {
|
||||
// Delete the block and do some stuff
|
||||
ASSERT(pMergeInfo->keyFirst == INT64_MAX && pMergeInfo->keyFirst == INT64_MIN);
|
||||
// ASSERT(pMergeInfo->keyFirst == INT64_MAX && pMergeInfo->keyFirst == INT64_MIN);
|
||||
if (tsdbDeleteSuperBlock(pHelper, tblkIdx) < 0) return -1;
|
||||
*pCommitIter->pIter = slIter;
|
||||
if (oBlock.last && pHelper->hasOldLastBlock) pHelper->hasOldLastBlock = false;
|
||||
|
|
|
@ -131,6 +131,7 @@ SSkipList *tSkipListCreate(uint8_t maxLevel, uint8_t keyType, uint16_t keyLen, _
|
|||
__sl_key_fn_t fn);
|
||||
void tSkipListDestroy(SSkipList *pSkipList);
|
||||
SSkipListNode * tSkipListPut(SSkipList *pSkipList, void *pData);
|
||||
void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata);
|
||||
SArray * tSkipListGet(SSkipList *pSkipList, SSkipListKey pKey);
|
||||
void tSkipListPrint(SSkipList *pSkipList, int16_t nlevel);
|
||||
SSkipListIterator *tSkipListCreateIter(SSkipList *pSkipList);
|
||||
|
|
|
@ -236,6 +236,7 @@ int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLe
|
|||
rInfo.offset = lseek(pStore->fd, 0, SEEK_CUR);
|
||||
if (rInfo.offset < 0) {
|
||||
uError("failed to lseek file %s since %s", pStore->fname, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -254,6 +255,7 @@ int tdUpdateKVStoreRecord(SKVStore *pStore, uint64_t uid, void *cont, int contLe
|
|||
|
||||
if (taosWrite(pStore->fd, cont, contLen) < contLen) {
|
||||
uError("failed to write %d bytes to file %s since %s", contLen, pStore->fname, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -287,17 +287,17 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) {
|
|||
tsLogObj.fileNum = maxFileNum;
|
||||
taosGetLogFileName(fn);
|
||||
|
||||
|
||||
if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) {
|
||||
strcpy(name, fn);
|
||||
strcat(name, ".0");
|
||||
}
|
||||
bool log0Exist = stat(name, &logstat0) >= 0;
|
||||
|
||||
if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) {
|
||||
strcpy(name, fn);
|
||||
strcat(name, ".1");
|
||||
}
|
||||
|
||||
bool log0Exist = stat(name, &logstat0) >= 0;
|
||||
bool log1Exist = stat(name, &logstat1) >= 0;
|
||||
|
||||
// if none of the log files exist, open 0, if both exists, open the old one
|
||||
|
|
|
@ -426,11 +426,11 @@ static int taosDecRefCount(int rsetId, int64_t rid, int remove) {
|
|||
|
||||
(*pSet->fp)(pNode->p);
|
||||
|
||||
uTrace("rsetId:%d p:%p rid:%" PRId64 "is removed, count:%d, free mem: %p", rsetId, pNode->p, rid, pSet->count, pNode);
|
||||
uTrace("rsetId:%d p:%p rid:%" PRId64 " is removed, count:%d, free mem: %p", rsetId, pNode->p, rid, pSet->count, pNode);
|
||||
free(pNode);
|
||||
released = 1;
|
||||
} else {
|
||||
uTrace("rsetId:%d p:%p rid:%" PRId64 "is released, count:%d", rsetId, pNode->p, rid, pNode->count);
|
||||
uTrace("rsetId:%d p:%p rid:%" PRId64 " is released, count:%d", rsetId, pNode->p, rid, pNode->count);
|
||||
}
|
||||
} else {
|
||||
uTrace("rsetId:%d rid:%" PRId64 " is not there, failed to release/remove", rsetId, rid);
|
||||
|
|
|
@ -24,10 +24,12 @@ static SSkipListNode * getPriorNode(SSkipList *pSkipList, const char *val, in
|
|||
static void tSkipListRemoveNodeImpl(SSkipList *pSkipList, SSkipListNode *pNode);
|
||||
static void tSkipListCorrectLevel(SSkipList *pSkipList);
|
||||
static SSkipListIterator *doCreateSkipListIterator(SSkipList *pSkipList, int32_t order);
|
||||
static void tSkipListDoInsert(SSkipList *pSkipList, SSkipListNode **backward, SSkipListNode *pNode);
|
||||
static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward, void *pData);
|
||||
static SSkipListNode * tSkipListNewNode(uint8_t level);
|
||||
static void tSkipListDoInsert(SSkipList *pSkipList, SSkipListNode **direction, SSkipListNode *pNode, bool isForward);
|
||||
static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward, void *pData);
|
||||
static SSkipListNode *tSkipListNewNode(uint8_t level);
|
||||
#define tSkipListFreeNode(n) tfree((n))
|
||||
static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipListNode **direction, bool isForward,
|
||||
bool hasDup);
|
||||
|
||||
static FORCE_INLINE int tSkipListWLock(SSkipList *pSkipList);
|
||||
static FORCE_INLINE int tSkipListRLock(SSkipList *pSkipList);
|
||||
|
@ -109,32 +111,86 @@ SSkipListNode *tSkipListPut(SSkipList *pSkipList, void *pData) {
|
|||
if (pSkipList == NULL || pData == NULL) return NULL;
|
||||
|
||||
SSkipListNode *backward[MAX_SKIP_LIST_LEVEL] = {0};
|
||||
uint8_t dupMode = SL_DUP_MODE(pSkipList);
|
||||
SSkipListNode *pNode = NULL;
|
||||
|
||||
tSkipListWLock(pSkipList);
|
||||
|
||||
bool hasDup = tSkipListGetPosToPut(pSkipList, backward, pData);
|
||||
|
||||
if (hasDup && (dupMode == SL_DISCARD_DUP_KEY || dupMode == SL_UPDATE_DUP_KEY)) {
|
||||
if (dupMode == SL_UPDATE_DUP_KEY) {
|
||||
pNode = SL_NODE_GET_BACKWARD_POINTER(backward[0], 0);
|
||||
atomic_store_ptr(&(pNode->pData), pData);
|
||||
}
|
||||
} else {
|
||||
pNode = tSkipListNewNode(getSkipListRandLevel(pSkipList));
|
||||
if (pNode != NULL) {
|
||||
pNode->pData = pData;
|
||||
|
||||
tSkipListDoInsert(pSkipList, backward, pNode);
|
||||
}
|
||||
}
|
||||
pNode = tSkipListPutImpl(pSkipList, pData, backward, false, hasDup);
|
||||
|
||||
tSkipListUnlock(pSkipList);
|
||||
|
||||
return pNode;
|
||||
}
|
||||
|
||||
// Put a batch of data into skiplist. The batch of data must be in ascending order
|
||||
void tSkipListPutBatch(SSkipList *pSkipList, void **ppData, int ndata) {
|
||||
SSkipListNode *backward[MAX_SKIP_LIST_LEVEL] = {0};
|
||||
SSkipListNode *forward[MAX_SKIP_LIST_LEVEL] = {0};
|
||||
bool hasDup = false;
|
||||
char * pKey = NULL;
|
||||
char * pDataKey = NULL;
|
||||
int compare = 0;
|
||||
|
||||
tSkipListWLock(pSkipList);
|
||||
|
||||
// backward to put the first data
|
||||
hasDup = tSkipListGetPosToPut(pSkipList, backward, ppData[0]);
|
||||
tSkipListPutImpl(pSkipList, ppData[0], backward, false, hasDup);
|
||||
|
||||
for (int level = 0; level < pSkipList->maxLevel; level++) {
|
||||
forward[level] = SL_NODE_GET_BACKWARD_POINTER(backward[level], level);
|
||||
}
|
||||
|
||||
// forward to put the rest of data
|
||||
for (int idata = 1; idata < ndata; idata++) {
|
||||
pDataKey = pSkipList->keyFn(ppData[idata]);
|
||||
hasDup = false;
|
||||
|
||||
// Compare max key
|
||||
pKey = SL_GET_MAX_KEY(pSkipList);
|
||||
compare = pSkipList->comparFn(pDataKey, pKey);
|
||||
if (compare > 0) {
|
||||
for (int i = 0; i < pSkipList->maxLevel; i++) {
|
||||
forward[i] = SL_NODE_GET_BACKWARD_POINTER(pSkipList->pTail, i);
|
||||
}
|
||||
} else {
|
||||
SSkipListNode *px = pSkipList->pHead;
|
||||
for (int i = pSkipList->maxLevel - 1; i >= 0; --i) {
|
||||
if (i < pSkipList->level) {
|
||||
// set new px
|
||||
if (forward[i] != pSkipList->pHead) {
|
||||
if (px == pSkipList->pHead ||
|
||||
pSkipList->comparFn(SL_GET_NODE_KEY(pSkipList, forward[i]), SL_GET_NODE_KEY(pSkipList, px)) > 0) {
|
||||
px = forward[i];
|
||||
}
|
||||
}
|
||||
|
||||
SSkipListNode *p = SL_NODE_GET_FORWARD_POINTER(px, i);
|
||||
while (p != pSkipList->pTail) {
|
||||
pKey = SL_GET_NODE_KEY(pSkipList, p);
|
||||
|
||||
compare = pSkipList->comparFn(pKey, pDataKey);
|
||||
if (compare >= 0) {
|
||||
if (compare == 0 && !hasDup) hasDup = true;
|
||||
break;
|
||||
} else {
|
||||
px = p;
|
||||
p = SL_NODE_GET_FORWARD_POINTER(px, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
forward[i] = px;
|
||||
}
|
||||
}
|
||||
|
||||
tSkipListPutImpl(pSkipList, ppData[idata], forward, true, hasDup);
|
||||
}
|
||||
|
||||
tSkipListUnlock(pSkipList);
|
||||
}
|
||||
|
||||
uint32_t tSkipListRemove(SSkipList *pSkipList, SSkipListKey key) {
|
||||
uint32_t count = 0;
|
||||
|
||||
|
@ -310,22 +366,25 @@ void tSkipListPrint(SSkipList *pSkipList, int16_t nlevel) {
|
|||
}
|
||||
}
|
||||
|
||||
static void tSkipListDoInsert(SSkipList *pSkipList, SSkipListNode **backward, SSkipListNode *pNode) {
|
||||
static void tSkipListDoInsert(SSkipList *pSkipList, SSkipListNode **direction, SSkipListNode *pNode, bool isForward) {
|
||||
for (int32_t i = 0; i < pNode->level; ++i) {
|
||||
if (i >= pSkipList->level) {
|
||||
SL_NODE_GET_FORWARD_POINTER(pNode, i) = pSkipList->pTail;
|
||||
SL_NODE_GET_BACKWARD_POINTER(pNode, i) = pSkipList->pHead;
|
||||
SL_NODE_GET_FORWARD_POINTER(pSkipList->pHead, i) = pNode;
|
||||
SL_NODE_GET_BACKWARD_POINTER(pSkipList->pTail, i) = pNode;
|
||||
SSkipListNode *x = direction[i];
|
||||
if (isForward) {
|
||||
SL_NODE_GET_BACKWARD_POINTER(pNode, i) = x;
|
||||
|
||||
SSkipListNode *next = SL_NODE_GET_FORWARD_POINTER(x, i);
|
||||
SL_NODE_GET_BACKWARD_POINTER(next, i) = pNode;
|
||||
|
||||
SL_NODE_GET_FORWARD_POINTER(pNode, i) = next;
|
||||
SL_NODE_GET_FORWARD_POINTER(x, i) = pNode;
|
||||
} else {
|
||||
SSkipListNode *x = backward[i];
|
||||
SL_NODE_GET_FORWARD_POINTER(pNode, i) = x;
|
||||
|
||||
SSkipListNode *prev = SL_NODE_GET_BACKWARD_POINTER(x, i);
|
||||
SL_NODE_GET_FORWARD_POINTER(prev, i) = pNode;
|
||||
|
||||
SL_NODE_GET_BACKWARD_POINTER(x, i) = pNode;
|
||||
SL_NODE_GET_BACKWARD_POINTER(pNode, i) = prev;
|
||||
SL_NODE_GET_BACKWARD_POINTER(x, i) = pNode;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -377,7 +436,7 @@ static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward,
|
|||
char * pDataKey = pSkipList->keyFn(pData);
|
||||
|
||||
if (pSkipList->size == 0) {
|
||||
for (int i = 0; i < pSkipList->level; i++) {
|
||||
for (int i = 0; i < pSkipList->maxLevel; i++) {
|
||||
backward[i] = pSkipList->pTail;
|
||||
}
|
||||
} else {
|
||||
|
@ -387,7 +446,7 @@ static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward,
|
|||
pKey = SL_GET_MAX_KEY(pSkipList);
|
||||
compare = pSkipList->comparFn(pDataKey, pKey);
|
||||
if (compare >= 0) {
|
||||
for (int i = 0; i < pSkipList->level; i++) {
|
||||
for (int i = 0; i < pSkipList->maxLevel; i++) {
|
||||
backward[i] = pSkipList->pTail;
|
||||
}
|
||||
|
||||
|
@ -398,7 +457,7 @@ static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward,
|
|||
pKey = SL_GET_MIN_KEY(pSkipList);
|
||||
compare = pSkipList->comparFn(pDataKey, pKey);
|
||||
if (compare < 0) {
|
||||
for (int i = 0; i < pSkipList->level; i++) {
|
||||
for (int i = 0; i < pSkipList->maxLevel; i++) {
|
||||
backward[i] = SL_NODE_GET_FORWARD_POINTER(pSkipList->pHead, i);
|
||||
}
|
||||
|
||||
|
@ -406,18 +465,20 @@ static bool tSkipListGetPosToPut(SSkipList *pSkipList, SSkipListNode **backward,
|
|||
}
|
||||
|
||||
SSkipListNode *px = pSkipList->pTail;
|
||||
for (int i = pSkipList->level - 1; i >= 0; --i) {
|
||||
SSkipListNode *p = SL_NODE_GET_BACKWARD_POINTER(px, i);
|
||||
while (p != pSkipList->pHead) {
|
||||
pKey = SL_GET_NODE_KEY(pSkipList, p);
|
||||
for (int i = pSkipList->maxLevel - 1; i >= 0; --i) {
|
||||
if (i < pSkipList->level) {
|
||||
SSkipListNode *p = SL_NODE_GET_BACKWARD_POINTER(px, i);
|
||||
while (p != pSkipList->pHead) {
|
||||
pKey = SL_GET_NODE_KEY(pSkipList, p);
|
||||
|
||||
compare = pSkipList->comparFn(pKey, pDataKey);
|
||||
if (compare <= 0) {
|
||||
if (compare == 0 && !hasDupKey) hasDupKey = true;
|
||||
break;
|
||||
} else {
|
||||
px = p;
|
||||
p = SL_NODE_GET_BACKWARD_POINTER(px, i);
|
||||
compare = pSkipList->comparFn(pKey, pDataKey);
|
||||
if (compare <= 0) {
|
||||
if (compare == 0 && !hasDupKey) hasDupKey = true;
|
||||
break;
|
||||
} else {
|
||||
px = p;
|
||||
p = SL_NODE_GET_BACKWARD_POINTER(px, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -579,6 +640,32 @@ static SSkipListNode *tSkipListNewNode(uint8_t level) {
|
|||
return pNode;
|
||||
}
|
||||
|
||||
static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipListNode **direction, bool isForward,
|
||||
bool hasDup) {
|
||||
uint8_t dupMode = SL_DUP_MODE(pSkipList);
|
||||
SSkipListNode *pNode = NULL;
|
||||
|
||||
if (hasDup && (dupMode == SL_DISCARD_DUP_KEY || dupMode == SL_UPDATE_DUP_KEY)) {
|
||||
if (dupMode == SL_UPDATE_DUP_KEY) {
|
||||
if (isForward) {
|
||||
pNode = SL_NODE_GET_FORWARD_POINTER(direction[0], 0);
|
||||
} else {
|
||||
pNode = SL_NODE_GET_BACKWARD_POINTER(direction[0], 0);
|
||||
}
|
||||
atomic_store_ptr(&(pNode->pData), pData);
|
||||
}
|
||||
} else {
|
||||
pNode = tSkipListNewNode(getSkipListRandLevel(pSkipList));
|
||||
if (pNode != NULL) {
|
||||
pNode->pData = pData;
|
||||
|
||||
tSkipListDoInsert(pSkipList, direction, pNode, isForward);
|
||||
}
|
||||
}
|
||||
|
||||
return pNode;
|
||||
}
|
||||
|
||||
// static int32_t tSkipListEndParQuery(SSkipList *pSkipList, SSkipListNode *pStartNode, SSkipListKey *pEndKey,
|
||||
// int32_t cond, SSkipListNode ***pRes) {
|
||||
// pthread_rwlock_rdlock(&pSkipList->lock);
|
||||
|
|
|
@ -58,13 +58,13 @@ uint32_t taosGetIpFromFqdn(const char *fqdn) {
|
|||
} else {
|
||||
#ifdef EAI_SYSTEM
|
||||
if (ret == EAI_SYSTEM) {
|
||||
uError("failed to get the ip address, fqdn:%s, code:%d, reason:%s", fqdn, ret, strerror(errno));
|
||||
uError("failed to get the ip address, fqdn:%s, since:%s", fqdn, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
} else {
|
||||
uError("failed to get the ip address, fqdn:%s, code:%d, reason:%s", fqdn, ret, gai_strerror(ret));
|
||||
uError("failed to get the ip address, fqdn:%s, since:%s", fqdn, gai_strerror(ret));
|
||||
}
|
||||
#else
|
||||
uError("failed to get the ip address, fqdn:%s, code:%d, reason:%s", fqdn, ret, gai_strerror(ret));
|
||||
uError("failed to get the ip address, fqdn:%s, since:%s", fqdn, gai_strerror(ret));
|
||||
#endif
|
||||
return 0xFFFFFFFF;
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ int32_t taosWriteMsg(SOCKET fd, void *buf, int32_t nbytes) {
|
|||
while (nleft > 0) {
|
||||
nwritten = (int32_t)taosWriteSocket(fd, (char *)ptr, (size_t)nleft);
|
||||
if (nwritten <= 0) {
|
||||
if (errno == EINTR)
|
||||
if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK)
|
||||
continue;
|
||||
else
|
||||
return -1;
|
||||
|
@ -133,7 +133,7 @@ int32_t taosReadMsg(SOCKET fd, void *buf, int32_t nbytes) {
|
|||
if (nread == 0) {
|
||||
break;
|
||||
} else if (nread < 0) {
|
||||
if (errno == EINTR) {
|
||||
if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) {
|
||||
continue;
|
||||
} else {
|
||||
return -1;
|
||||
|
|
|
@ -37,10 +37,12 @@ extern int32_t vDebugFlag;
|
|||
typedef struct {
|
||||
int32_t vgId; // global vnode group ID
|
||||
int32_t refCount; // reference count
|
||||
int32_t queuedMsg;
|
||||
int32_t delay;
|
||||
int8_t status;
|
||||
int8_t role;
|
||||
int8_t accessState;
|
||||
int8_t isFull;
|
||||
uint64_t version; // current version
|
||||
uint64_t fversion; // version on saved data file
|
||||
void *wqueue;
|
||||
|
@ -58,7 +60,7 @@ typedef struct {
|
|||
char *rootDir;
|
||||
tsem_t sem;
|
||||
int8_t dropped;
|
||||
char db[TSDB_DB_NAME_LEN];
|
||||
char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
|
||||
} SVnodeObj;
|
||||
|
||||
void vnodeInitWriteFp(void);
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include "vnodeCfg.h"
|
||||
|
||||
static void vnodeLoadCfg(SVnodeObj *pVnode, SCreateVnodeMsg* vnodeMsg) {
|
||||
strcpy(pVnode->db, vnodeMsg->db);
|
||||
tstrncpy(pVnode->db, vnodeMsg->db, sizeof(pVnode->db));
|
||||
pVnode->cfgVersion = vnodeMsg->cfg.cfgVersion;
|
||||
pVnode->tsdbCfg.cacheBlockSize = vnodeMsg->cfg.cacheBlockSize;
|
||||
pVnode->tsdbCfg.totalBlocks = vnodeMsg->cfg.totalBlocks;
|
||||
|
@ -97,7 +97,7 @@ int32_t vnodeReadCfg(SVnodeObj *pVnode) {
|
|||
vError("vgId:%d, failed to read %s, db not found", pVnode->vgId, file);
|
||||
goto PARSE_VCFG_ERROR;
|
||||
}
|
||||
strcpy(vnodeMsg.db, db->valuestring);
|
||||
tstrncpy(vnodeMsg.db, db->valuestring, sizeof(vnodeMsg.db));
|
||||
|
||||
cJSON *cfgVersion = cJSON_GetObjectItem(root, "cfgVersion");
|
||||
if (!cfgVersion || cfgVersion->type != cJSON_Number) {
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
|
||||
static SHashObj*tsVnodesHash;
|
||||
static void vnodeCleanUp(SVnodeObj *pVnode);
|
||||
static int vnodeProcessTsdbStatus(void *arg, int status);
|
||||
static int vnodeProcessTsdbStatus(void *arg, int status, int eno);
|
||||
static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fversion);
|
||||
static int vnodeGetWalInfo(void *ahandle, char *fileName, int64_t *fileId);
|
||||
static void vnodeNotifyRole(void *ahandle, int8_t role);
|
||||
|
@ -381,6 +381,7 @@ int32_t vnodeClose(int32_t vgId) {
|
|||
void vnodeRelease(void *pVnodeRaw) {
|
||||
if (pVnodeRaw == NULL) return;
|
||||
SVnodeObj *pVnode = pVnodeRaw;
|
||||
int32_t code = 0;
|
||||
int32_t vgId = pVnode->vgId;
|
||||
|
||||
int32_t refCount = atomic_sub_fetch_32(&pVnode->refCount, 1);
|
||||
|
@ -406,7 +407,7 @@ void vnodeRelease(void *pVnodeRaw) {
|
|||
}
|
||||
|
||||
if (pVnode->tsdb) {
|
||||
tsdbCloseRepo(pVnode->tsdb, 1);
|
||||
code = tsdbCloseRepo(pVnode->tsdb, 1);
|
||||
pVnode->tsdb = NULL;
|
||||
}
|
||||
|
||||
|
@ -418,7 +419,11 @@ void vnodeRelease(void *pVnodeRaw) {
|
|||
}
|
||||
|
||||
if (pVnode->wal) {
|
||||
walRemoveAllOldFiles(pVnode->wal);
|
||||
if (code != 0) {
|
||||
vError("vgId:%d, failed to commit while close tsdb repo, keep wal", pVnode->vgId);
|
||||
} else {
|
||||
walRemoveAllOldFiles(pVnode->wal);
|
||||
}
|
||||
walClose(pVnode->wal);
|
||||
pVnode->wal = NULL;
|
||||
}
|
||||
|
@ -590,9 +595,16 @@ static void vnodeCleanUp(SVnodeObj *pVnode) {
|
|||
}
|
||||
|
||||
// TODO: this is a simple implement
|
||||
static int vnodeProcessTsdbStatus(void *arg, int status) {
|
||||
static int vnodeProcessTsdbStatus(void *arg, int status, int eno) {
|
||||
SVnodeObj *pVnode = arg;
|
||||
|
||||
if (eno != TSDB_CODE_SUCCESS) {
|
||||
vError("vgId:%d, failed to commit since %s, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, tstrerror(eno),
|
||||
pVnode->fversion, pVnode->version);
|
||||
pVnode->isFull = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (status == TSDB_STATUS_COMMIT_START) {
|
||||
pVnode->fversion = pVnode->version;
|
||||
vDebug("vgId:%d, start commit, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, pVnode->fversion, pVnode->version);
|
||||
|
@ -605,6 +617,7 @@ static int vnodeProcessTsdbStatus(void *arg, int status) {
|
|||
|
||||
if (status == TSDB_STATUS_COMMIT_OVER) {
|
||||
vDebug("vgId:%d, commit over, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, pVnode->fversion, pVnode->version);
|
||||
pVnode->isFull = 0;
|
||||
walRemoveOneOldFile(pVnode->wal);
|
||||
return vnodeSaveVersion(pVnode);
|
||||
}
|
||||
|
|
|
@ -28,13 +28,15 @@
|
|||
#include "syncInt.h"
|
||||
#include "tcq.h"
|
||||
|
||||
static int32_t (*vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *, void *, SRspRet *);
|
||||
static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
||||
static int32_t vnodeProcessCreateTableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
||||
static int32_t vnodeProcessDropTableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
||||
static int32_t vnodeProcessAlterTableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
||||
static int32_t vnodeProcessDropStableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
||||
static int32_t vnodeProcessUpdateTagValMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pRet);
|
||||
#define MAX_QUEUED_MSG_NUM 10000
|
||||
|
||||
static int32_t (*vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *, void *pCont, SRspRet *);
|
||||
static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pCont, SRspRet *);
|
||||
static int32_t vnodeProcessCreateTableMsg(SVnodeObj *pVnode, void *pCont, SRspRet *);
|
||||
static int32_t vnodeProcessDropTableMsg(SVnodeObj *pVnode, void *pCont, SRspRet *);
|
||||
static int32_t vnodeProcessAlterTableMsg(SVnodeObj *pVnode, void *pCont, SRspRet *);
|
||||
static int32_t vnodeProcessDropStableMsg(SVnodeObj *pVnode, void *pCont, SRspRet *);
|
||||
static int32_t vnodeProcessUpdateTagValMsg(SVnodeObj *pVnode, void *pCont, SRspRet *);
|
||||
|
||||
void vnodeInitWriteFp(void) {
|
||||
vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_SUBMIT] = vnodeProcessSubmitMsg;
|
||||
|
@ -100,8 +102,8 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara
|
|||
return syncCode;
|
||||
}
|
||||
|
||||
static int32_t vnodeCheckWrite(void *param) {
|
||||
SVnodeObj *pVnode = param;
|
||||
static int32_t vnodeCheckWrite(void *vparam) {
|
||||
SVnodeObj *pVnode = vparam;
|
||||
if (!(pVnode->accessState & TSDB_VN_WRITE_ACCCESS)) {
|
||||
vDebug("vgId:%d, no write auth, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode);
|
||||
return TSDB_CODE_VND_NO_WRITE_AUTH;
|
||||
|
@ -119,11 +121,16 @@ static int32_t vnodeCheckWrite(void *param) {
|
|||
return TSDB_CODE_APP_NOT_READY;
|
||||
}
|
||||
|
||||
if (pVnode->isFull) {
|
||||
vDebug("vgId:%d, vnode is full, refCount:%d", pVnode->vgId, pVnode->refCount);
|
||||
return TSDB_CODE_VND_IS_FULL;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void vnodeConfirmForward(void *param, uint64_t version, int32_t code) {
|
||||
SVnodeObj *pVnode = (SVnodeObj *)param;
|
||||
void vnodeConfirmForward(void *vparam, uint64_t version, int32_t code) {
|
||||
SVnodeObj *pVnode = vparam;
|
||||
syncConfirmForward(pVnode->sync, version, code);
|
||||
}
|
||||
|
||||
|
@ -237,8 +244,25 @@ int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rpar
|
|||
memcpy(pWrite->pHead, pHead, sizeof(SWalHead) + pHead->len);
|
||||
|
||||
atomic_add_fetch_32(&pVnode->refCount, 1);
|
||||
vTrace("vgId:%d, get vnode wqueue, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode);
|
||||
|
||||
int32_t queued = atomic_add_fetch_32(&pVnode->queuedMsg, 1);
|
||||
if (queued > MAX_QUEUED_MSG_NUM) {
|
||||
vDebug("vgId:%d, too many msg:%d in vwqueue, flow control", pVnode->vgId, queued);
|
||||
taosMsleep(1);
|
||||
}
|
||||
|
||||
vTrace("vgId:%d, write into vwqueue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedMsg);
|
||||
|
||||
taosWriteQitem(pVnode->wqueue, qtype, pWrite);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void vnodeFreeFromWQueue(void *vparam, SVWriteMsg *pWrite) {
|
||||
SVnodeObj *pVnode = vparam;
|
||||
|
||||
atomic_sub_fetch_32(&pVnode->queuedMsg, 1);
|
||||
vTrace("vgId:%d, free from vwqueue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedMsg);
|
||||
|
||||
taosFreeQitem(pWrite);
|
||||
vnodeRelease(pVnode);
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ extern int32_t wDebugFlag;
|
|||
#define WAL_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + 16)
|
||||
#define WAL_SIGNATURE ((uint32_t)(0xFAFBFDFE))
|
||||
#define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12)
|
||||
#define WAL_FILE_LEN (TSDB_FILENAME_LEN + 32)
|
||||
#define WAL_FILE_LEN (WAL_PATH_LEN + 32)
|
||||
#define WAL_FILE_NUM 3
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -132,7 +132,7 @@ https://www.taosdata.com/cn/all-downloads/
|
|||
配置完成后,在命令行内使用taos shell连接server端
|
||||
|
||||
```shell
|
||||
C:\TDengine>taos
|
||||
C:\TDengine>taos -h td01
|
||||
Welcome to the TDengine shell from Linux, Client Version:2.0.1.1
|
||||
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
|
||||
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
README.md
|
||||
target/
|
||||
!.mvn/wrapper/maven-wrapper.jar
|
||||
!**/src/main/**/target/
|
||||
!**/src/test/**/target/
|
||||
|
||||
### STS ###
|
||||
.apt_generated
|
||||
.classpath
|
||||
.factorypath
|
||||
.project
|
||||
.settings
|
||||
.springBeans
|
||||
.sts4-cache
|
||||
|
||||
### IntelliJ IDEA ###
|
||||
.idea
|
||||
*.iws
|
||||
*.iml
|
||||
*.ipr
|
||||
|
||||
### NetBeans ###
|
||||
/nbproject/private/
|
||||
/nbbuild/
|
||||
/dist/
|
||||
/nbdist/
|
||||
/.nb-gradle/
|
||||
build/
|
||||
!**/src/main/**/build/
|
||||
!**/src/test/**/build/
|
||||
|
||||
### VS Code ###
|
||||
.vscode/
|
118
tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/MavenWrapperDownloader.java
vendored
Normal file
118
tests/examples/JDBC/mybatisplus-demo/.mvn/wrapper/MavenWrapperDownloader.java
vendored
Normal file
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
* Copyright 2007-present the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.net.*;
|
||||
import java.io.*;
|
||||
import java.nio.channels.*;
|
||||
import java.util.Properties;
|
||||
|
||||
public class MavenWrapperDownloader {
|
||||
|
||||
private static final String WRAPPER_VERSION = "0.5.6";
|
||||
/**
|
||||
* Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
|
||||
*/
|
||||
private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
|
||||
+ WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
|
||||
|
||||
/**
|
||||
* Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
|
||||
* use instead of the default one.
|
||||
*/
|
||||
private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
|
||||
".mvn/wrapper/maven-wrapper.properties";
|
||||
|
||||
/**
|
||||
* Path where the maven-wrapper.jar will be saved to.
|
||||
*/
|
||||
private static final String MAVEN_WRAPPER_JAR_PATH =
|
||||
".mvn/wrapper/maven-wrapper.jar";
|
||||
|
||||
/**
|
||||
* Name of the property which should be used to override the default download url for the wrapper.
|
||||
*/
|
||||
private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
|
||||
|
||||
public static void main(String args[]) {
|
||||
System.out.println("- Downloader started");
|
||||
File baseDirectory = new File(args[0]);
|
||||
System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
|
||||
|
||||
// If the maven-wrapper.properties exists, read it and check if it contains a custom
|
||||
// wrapperUrl parameter.
|
||||
File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
|
||||
String url = DEFAULT_DOWNLOAD_URL;
|
||||
if (mavenWrapperPropertyFile.exists()) {
|
||||
FileInputStream mavenWrapperPropertyFileInputStream = null;
|
||||
try {
|
||||
mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
|
||||
Properties mavenWrapperProperties = new Properties();
|
||||
mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
|
||||
url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
|
||||
} catch (IOException e) {
|
||||
System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
|
||||
} finally {
|
||||
try {
|
||||
if (mavenWrapperPropertyFileInputStream != null) {
|
||||
mavenWrapperPropertyFileInputStream.close();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
// Ignore ...
|
||||
}
|
||||
}
|
||||
}
|
||||
System.out.println("- Downloading from: " + url);
|
||||
|
||||
File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
|
||||
if (!outputFile.getParentFile().exists()) {
|
||||
if (!outputFile.getParentFile().mkdirs()) {
|
||||
System.out.println(
|
||||
"- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'");
|
||||
}
|
||||
}
|
||||
System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
|
||||
try {
|
||||
downloadFileFromURL(url, outputFile);
|
||||
System.out.println("Done");
|
||||
System.exit(0);
|
||||
} catch (Throwable e) {
|
||||
System.out.println("- Error downloading");
|
||||
e.printStackTrace();
|
||||
System.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
private static void downloadFileFromURL(String urlString, File destination) throws Exception {
|
||||
if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) {
|
||||
String username = System.getenv("MVNW_USERNAME");
|
||||
char[] password = System.getenv("MVNW_PASSWORD").toCharArray();
|
||||
Authenticator.setDefault(new Authenticator() {
|
||||
@Override
|
||||
protected PasswordAuthentication getPasswordAuthentication() {
|
||||
return new PasswordAuthentication(username, password);
|
||||
}
|
||||
});
|
||||
}
|
||||
URL website = new URL(urlString);
|
||||
ReadableByteChannel rbc;
|
||||
rbc = Channels.newChannel(website.openStream());
|
||||
FileOutputStream fos = new FileOutputStream(destination);
|
||||
fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
|
||||
fos.close();
|
||||
rbc.close();
|
||||
}
|
||||
|
||||
}
|
Binary file not shown.
|
@ -0,0 +1,2 @@
|
|||
distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip
|
||||
wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar
|
|
@ -0,0 +1,322 @@
|
|||
#!/bin/sh
|
||||
# ----------------------------------------------------------------------------
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Maven Start Up Batch script
|
||||
#
|
||||
# Required ENV vars:
|
||||
# ------------------
|
||||
# JAVA_HOME - location of a JDK home dir
|
||||
#
|
||||
# Optional ENV vars
|
||||
# -----------------
|
||||
# M2_HOME - location of maven2's installed home dir
|
||||
# MAVEN_OPTS - parameters passed to the Java VM when running Maven
|
||||
# e.g. to debug Maven itself, use
|
||||
# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
|
||||
# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
if [ -z "$MAVEN_SKIP_RC" ]; then
|
||||
|
||||
if [ -f /etc/mavenrc ]; then
|
||||
. /etc/mavenrc
|
||||
fi
|
||||
|
||||
if [ -f "$HOME/.mavenrc" ]; then
|
||||
. "$HOME/.mavenrc"
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
# OS specific support. $var _must_ be set to either true or false.
|
||||
cygwin=false
|
||||
darwin=false
|
||||
mingw=false
|
||||
case "$(uname)" in
|
||||
CYGWIN*) cygwin=true ;;
|
||||
MINGW*) mingw=true ;;
|
||||
Darwin*)
|
||||
darwin=true
|
||||
# Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
|
||||
# See https://developer.apple.com/library/mac/qa/qa1170/_index.html
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
if [ -x "/usr/libexec/java_home" ]; then
|
||||
export JAVA_HOME="$(/usr/libexec/java_home)"
|
||||
else
|
||||
export JAVA_HOME="/Library/Java/Home"
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
if [ -r /etc/gentoo-release ]; then
|
||||
JAVA_HOME=$(java-config --jre-home)
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$M2_HOME" ]; then
|
||||
## resolve links - $0 may be a link to maven's home
|
||||
PRG="$0"
|
||||
|
||||
# need this for relative symlinks
|
||||
while [ -h "$PRG" ]; do
|
||||
ls=$(ls -ld "$PRG")
|
||||
link=$(expr "$ls" : '.*-> \(.*\)$')
|
||||
if expr "$link" : '/.*' >/dev/null; then
|
||||
PRG="$link"
|
||||
else
|
||||
PRG="$(dirname "$PRG")/$link"
|
||||
fi
|
||||
done
|
||||
|
||||
saveddir=$(pwd)
|
||||
|
||||
M2_HOME=$(dirname "$PRG")/..
|
||||
|
||||
# make it fully qualified
|
||||
M2_HOME=$(cd "$M2_HOME" && pwd)
|
||||
|
||||
cd "$saveddir"
|
||||
# echo Using m2 at $M2_HOME
|
||||
fi
|
||||
|
||||
# For Cygwin, ensure paths are in UNIX format before anything is touched
|
||||
if $cygwin; then
|
||||
[ -n "$M2_HOME" ] &&
|
||||
M2_HOME=$(cygpath --unix "$M2_HOME")
|
||||
[ -n "$JAVA_HOME" ] &&
|
||||
JAVA_HOME=$(cygpath --unix "$JAVA_HOME")
|
||||
[ -n "$CLASSPATH" ] &&
|
||||
CLASSPATH=$(cygpath --path --unix "$CLASSPATH")
|
||||
fi
|
||||
|
||||
# For Mingw, ensure paths are in UNIX format before anything is touched
|
||||
if $mingw; then
|
||||
[ -n "$M2_HOME" ] &&
|
||||
M2_HOME="$( (
|
||||
cd "$M2_HOME"
|
||||
pwd
|
||||
))"
|
||||
[ -n "$JAVA_HOME" ] &&
|
||||
JAVA_HOME="$( (
|
||||
cd "$JAVA_HOME"
|
||||
pwd
|
||||
))"
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
javaExecutable="$(which javac)"
|
||||
if [ -n "$javaExecutable" ] && ! [ "$(expr \"$javaExecutable\" : '\([^ ]*\)')" = "no" ]; then
|
||||
# readlink(1) is not available as standard on Solaris 10.
|
||||
readLink=$(which readlink)
|
||||
if [ ! $(expr "$readLink" : '\([^ ]*\)') = "no" ]; then
|
||||
if $darwin; then
|
||||
javaHome="$(dirname \"$javaExecutable\")"
|
||||
javaExecutable="$(cd \"$javaHome\" && pwd -P)/javac"
|
||||
else
|
||||
javaExecutable="$(readlink -f \"$javaExecutable\")"
|
||||
fi
|
||||
javaHome="$(dirname \"$javaExecutable\")"
|
||||
javaHome=$(expr "$javaHome" : '\(.*\)/bin')
|
||||
JAVA_HOME="$javaHome"
|
||||
export JAVA_HOME
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$JAVACMD" ]; then
|
||||
if [ -n "$JAVA_HOME" ]; then
|
||||
if [ -x "$JAVA_HOME/jre/sh/java" ]; then
|
||||
# IBM's JDK on AIX uses strange locations for the executables
|
||||
JAVACMD="$JAVA_HOME/jre/sh/java"
|
||||
else
|
||||
JAVACMD="$JAVA_HOME/bin/java"
|
||||
fi
|
||||
else
|
||||
JAVACMD="$(which java)"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -x "$JAVACMD" ]; then
|
||||
echo "Error: JAVA_HOME is not defined correctly." >&2
|
||||
echo " We cannot execute $JAVACMD" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
echo "Warning: JAVA_HOME environment variable is not set."
|
||||
fi
|
||||
|
||||
CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
|
||||
|
||||
# traverses directory structure from process work directory to filesystem root
|
||||
# first directory with .mvn subdirectory is considered project base directory
|
||||
find_maven_basedir() {
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
echo "Path not specified to find_maven_basedir"
|
||||
return 1
|
||||
fi
|
||||
|
||||
basedir="$1"
|
||||
wdir="$1"
|
||||
while [ "$wdir" != '/' ]; do
|
||||
if [ -d "$wdir"/.mvn ]; then
|
||||
basedir=$wdir
|
||||
break
|
||||
fi
|
||||
# workaround for JBEAP-8937 (on Solaris 10/Sparc)
|
||||
if [ -d "${wdir}" ]; then
|
||||
wdir=$(
|
||||
cd "$wdir/.."
|
||||
pwd
|
||||
)
|
||||
fi
|
||||
# end of workaround
|
||||
done
|
||||
echo "${basedir}"
|
||||
}
|
||||
|
||||
# concatenates all lines of a file
|
||||
concat_lines() {
|
||||
if [ -f "$1" ]; then
|
||||
echo "$(tr -s '\n' ' ' <"$1")"
|
||||
fi
|
||||
}
|
||||
|
||||
BASE_DIR=$(find_maven_basedir "$(pwd)")
|
||||
if [ -z "$BASE_DIR" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
##########################################################################################
|
||||
# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
|
||||
# This allows using the maven wrapper in projects that prohibit checking in binary data.
|
||||
##########################################################################################
|
||||
if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Found .mvn/wrapper/maven-wrapper.jar"
|
||||
fi
|
||||
else
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
|
||||
fi
|
||||
if [ -n "$MVNW_REPOURL" ]; then
|
||||
jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||
else
|
||||
jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||
fi
|
||||
while IFS="=" read key value; do
|
||||
case "$key" in wrapperUrl)
|
||||
jarUrl="$value"
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done <"$BASE_DIR/.mvn/wrapper/maven-wrapper.properties"
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Downloading from: $jarUrl"
|
||||
fi
|
||||
wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar"
|
||||
if $cygwin; then
|
||||
wrapperJarPath=$(cygpath --path --windows "$wrapperJarPath")
|
||||
fi
|
||||
|
||||
if command -v wget >/dev/null; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Found wget ... using wget"
|
||||
fi
|
||||
if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
|
||||
wget "$jarUrl" -O "$wrapperJarPath"
|
||||
else
|
||||
wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath"
|
||||
fi
|
||||
elif command -v curl >/dev/null; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Found curl ... using curl"
|
||||
fi
|
||||
if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
|
||||
curl -o "$wrapperJarPath" "$jarUrl" -f
|
||||
else
|
||||
curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f
|
||||
fi
|
||||
|
||||
else
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Falling back to using Java to download"
|
||||
fi
|
||||
javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java"
|
||||
# For Cygwin, switch paths to Windows format before running javac
|
||||
if $cygwin; then
|
||||
javaClass=$(cygpath --path --windows "$javaClass")
|
||||
fi
|
||||
if [ -e "$javaClass" ]; then
|
||||
if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo " - Compiling MavenWrapperDownloader.java ..."
|
||||
fi
|
||||
# Compiling the Java class
|
||||
("$JAVA_HOME/bin/javac" "$javaClass")
|
||||
fi
|
||||
if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
|
||||
# Running the downloader
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo " - Running MavenWrapperDownloader.java ..."
|
||||
fi
|
||||
("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR")
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
##########################################################################################
|
||||
# End of extension
|
||||
##########################################################################################
|
||||
|
||||
export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo $MAVEN_PROJECTBASEDIR
|
||||
fi
|
||||
MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
|
||||
|
||||
# For Cygwin, switch paths to Windows format before running java
|
||||
if $cygwin; then
|
||||
[ -n "$M2_HOME" ] &&
|
||||
M2_HOME=$(cygpath --path --windows "$M2_HOME")
|
||||
[ -n "$JAVA_HOME" ] &&
|
||||
JAVA_HOME=$(cygpath --path --windows "$JAVA_HOME")
|
||||
[ -n "$CLASSPATH" ] &&
|
||||
CLASSPATH=$(cygpath --path --windows "$CLASSPATH")
|
||||
[ -n "$MAVEN_PROJECTBASEDIR" ] &&
|
||||
MAVEN_PROJECTBASEDIR=$(cygpath --path --windows "$MAVEN_PROJECTBASEDIR")
|
||||
fi
|
||||
|
||||
# Provide a "standardized" way to retrieve the CLI args that will
|
||||
# work with both Windows and non-Windows executions.
|
||||
MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
|
||||
export MAVEN_CMD_LINE_ARGS
|
||||
|
||||
WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
|
||||
|
||||
exec "$JAVACMD" \
|
||||
$MAVEN_OPTS \
|
||||
-classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
|
||||
"-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
|
||||
${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
|
|
@ -0,0 +1,182 @@
|
|||
@REM ----------------------------------------------------------------------------
|
||||
@REM Licensed to the Apache Software Foundation (ASF) under one
|
||||
@REM or more contributor license agreements. See the NOTICE file
|
||||
@REM distributed with this work for additional information
|
||||
@REM regarding copyright ownership. The ASF licenses this file
|
||||
@REM to you under the Apache License, Version 2.0 (the
|
||||
@REM "License"); you may not use this file except in compliance
|
||||
@REM with the License. You may obtain a copy of the License at
|
||||
@REM
|
||||
@REM https://www.apache.org/licenses/LICENSE-2.0
|
||||
@REM
|
||||
@REM Unless required by applicable law or agreed to in writing,
|
||||
@REM software distributed under the License is distributed on an
|
||||
@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
@REM KIND, either express or implied. See the License for the
|
||||
@REM specific language governing permissions and limitations
|
||||
@REM under the License.
|
||||
@REM ----------------------------------------------------------------------------
|
||||
|
||||
@REM ----------------------------------------------------------------------------
|
||||
@REM Maven Start Up Batch script
|
||||
@REM
|
||||
@REM Required ENV vars:
|
||||
@REM JAVA_HOME - location of a JDK home dir
|
||||
@REM
|
||||
@REM Optional ENV vars
|
||||
@REM M2_HOME - location of maven2's installed home dir
|
||||
@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
|
||||
@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending
|
||||
@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
|
||||
@REM e.g. to debug Maven itself, use
|
||||
@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
|
||||
@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
|
||||
@REM ----------------------------------------------------------------------------
|
||||
|
||||
@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
|
||||
@echo off
|
||||
@REM set title of command window
|
||||
title %0
|
||||
@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on'
|
||||
@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
|
||||
|
||||
@REM set %HOME% to equivalent of $HOME
|
||||
if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
|
||||
|
||||
@REM Execute a user defined script before this one
|
||||
if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
|
||||
@REM check for pre script, once with legacy .bat ending and once with .cmd ending
|
||||
if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
|
||||
if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
|
||||
:skipRcPre
|
||||
|
||||
@setlocal
|
||||
|
||||
set ERROR_CODE=0
|
||||
|
||||
@REM To isolate internal variables from possible post scripts, we use another setlocal
|
||||
@setlocal
|
||||
|
||||
@REM ==== START VALIDATION ====
|
||||
if not "%JAVA_HOME%" == "" goto OkJHome
|
||||
|
||||
echo.
|
||||
echo Error: JAVA_HOME not found in your environment. >&2
|
||||
echo Please set the JAVA_HOME variable in your environment to match the >&2
|
||||
echo location of your Java installation. >&2
|
||||
echo.
|
||||
goto error
|
||||
|
||||
:OkJHome
|
||||
if exist "%JAVA_HOME%\bin\java.exe" goto init
|
||||
|
||||
echo.
|
||||
echo Error: JAVA_HOME is set to an invalid directory. >&2
|
||||
echo JAVA_HOME = "%JAVA_HOME%" >&2
|
||||
echo Please set the JAVA_HOME variable in your environment to match the >&2
|
||||
echo location of your Java installation. >&2
|
||||
echo.
|
||||
goto error
|
||||
|
||||
@REM ==== END VALIDATION ====
|
||||
|
||||
:init
|
||||
|
||||
@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
|
||||
@REM Fallback to current working directory if not found.
|
||||
|
||||
set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
|
||||
IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
|
||||
|
||||
set EXEC_DIR=%CD%
|
||||
set WDIR=%EXEC_DIR%
|
||||
:findBaseDir
|
||||
IF EXIST "%WDIR%"\.mvn goto baseDirFound
|
||||
cd ..
|
||||
IF "%WDIR%"=="%CD%" goto baseDirNotFound
|
||||
set WDIR=%CD%
|
||||
goto findBaseDir
|
||||
|
||||
:baseDirFound
|
||||
set MAVEN_PROJECTBASEDIR=%WDIR%
|
||||
cd "%EXEC_DIR%"
|
||||
goto endDetectBaseDir
|
||||
|
||||
:baseDirNotFound
|
||||
set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
|
||||
cd "%EXEC_DIR%"
|
||||
|
||||
:endDetectBaseDir
|
||||
|
||||
IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
|
||||
|
||||
@setlocal EnableExtensions EnableDelayedExpansion
|
||||
for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
|
||||
@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
|
||||
|
||||
:endReadAdditionalConfig
|
||||
|
||||
SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
|
||||
set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
|
||||
set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
|
||||
|
||||
set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||
|
||||
FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO (
|
||||
IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
|
||||
)
|
||||
|
||||
@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
|
||||
@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
|
||||
if exist %WRAPPER_JAR% (
|
||||
if "%MVNW_VERBOSE%" == "true" (
|
||||
echo Found %WRAPPER_JAR%
|
||||
)
|
||||
) else (
|
||||
if not "%MVNW_REPOURL%" == "" (
|
||||
SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||
)
|
||||
if "%MVNW_VERBOSE%" == "true" (
|
||||
echo Couldn't find %WRAPPER_JAR%, downloading it ...
|
||||
echo Downloading from: %DOWNLOAD_URL%
|
||||
)
|
||||
|
||||
powershell -Command "&{"^
|
||||
"$webclient = new-object System.Net.WebClient;"^
|
||||
"if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^
|
||||
"$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^
|
||||
"}"^
|
||||
"[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^
|
||||
"}"
|
||||
if "%MVNW_VERBOSE%" == "true" (
|
||||
echo Finished downloading %WRAPPER_JAR%
|
||||
)
|
||||
)
|
||||
@REM End of extension
|
||||
|
||||
@REM Provide a "standardized" way to retrieve the CLI args that will
|
||||
@REM work with both Windows and non-Windows executions.
|
||||
set MAVEN_CMD_LINE_ARGS=%*
|
||||
|
||||
%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
|
||||
if ERRORLEVEL 1 goto error
|
||||
goto end
|
||||
|
||||
:error
|
||||
set ERROR_CODE=1
|
||||
|
||||
:end
|
||||
@endlocal & set ERROR_CODE=%ERROR_CODE%
|
||||
|
||||
if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
|
||||
@REM check for post script, once with legacy .bat ending and once with .cmd ending
|
||||
if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
|
||||
if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
|
||||
:skipRcPost
|
||||
|
||||
@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
|
||||
if "%MAVEN_BATCH_PAUSE%" == "on" pause
|
||||
|
||||
if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
|
||||
|
||||
exit /B %ERROR_CODE%
|
|
@ -0,0 +1,101 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-parent</artifactId>
|
||||
<version>2.4.0</version>
|
||||
<relativePath/> <!-- lookup parent from repository -->
|
||||
</parent>
|
||||
<groupId>com.taosdata.example</groupId>
|
||||
<artifactId>mybatisplus-demo</artifactId>
|
||||
<version>0.0.1-SNAPSHOT</version>
|
||||
<name>mybatisplus-demo</name>
|
||||
<description>Demo project for tdengine</description>
|
||||
|
||||
<properties>
|
||||
<java.version>1.8</java.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.projectlombok</groupId>
|
||||
<artifactId>lombok</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.baomidou</groupId>
|
||||
<artifactId>mybatis-plus-boot-starter</artifactId>
|
||||
<version>3.1.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.h2database</groupId>
|
||||
<artifactId>h2</artifactId>
|
||||
<scope>runtime</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>2.0.11</version>
|
||||
</dependency>
|
||||
|
||||
<!-- https://mvnrepository.com/artifact/mysql/mysql-connector-java -->
|
||||
<dependency>
|
||||
<groupId>mysql</groupId>
|
||||
<artifactId>mysql-connector-java</artifactId>
|
||||
<version>5.1.47</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-web</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-devtools</artifactId>
|
||||
<scope>runtime</scope>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<version>4.12</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-maven-plugin</artifactId>
|
||||
</plugin>
|
||||
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<version>2.17</version>
|
||||
<configuration>
|
||||
<includes>
|
||||
<include>**/*Test.java</include>
|
||||
</includes>
|
||||
<excludes>
|
||||
<exclude>**/Abstract*.java</exclude>
|
||||
</excludes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
|
||||
</project>
|
|
@ -0,0 +1,15 @@
|
|||
package com.taosdata.example.mybatisplusdemo;
|
||||
|
||||
import org.mybatis.spring.annotation.MapperScan;
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
|
||||
@SpringBootApplication
|
||||
@MapperScan("com.taosdata.example.mybatisplusdemo.mapper")
|
||||
public class MybatisplusDemoApplication {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(MybatisplusDemoApplication.class, args);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
package com.taosdata.example.mybatisplusdemo.config;
|
||||
|
||||
import com.baomidou.mybatisplus.extension.plugins.PaginationInterceptor;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
@Configuration
|
||||
public class MybatisPlusConfig {
|
||||
|
||||
|
||||
/** mybatis 3.4.1 pagination config start ***/
|
||||
// @Bean
|
||||
// public MybatisPlusInterceptor mybatisPlusInterceptor() {
|
||||
// MybatisPlusInterceptor interceptor = new MybatisPlusInterceptor();
|
||||
// interceptor.addInnerInterceptor(new PaginationInnerInterceptor());
|
||||
// return interceptor;
|
||||
// }
|
||||
|
||||
// @Bean
|
||||
// public ConfigurationCustomizer configurationCustomizer() {
|
||||
// return configuration -> configuration.setUseDeprecatedExecutor(false);
|
||||
// }
|
||||
|
||||
@Bean
|
||||
public PaginationInterceptor paginationInterceptor() {
|
||||
// return new PaginationInterceptor();
|
||||
PaginationInterceptor paginationInterceptor = new PaginationInterceptor();
|
||||
//TODO: mybatis-plus do not support TDengine, use postgresql Dialect
|
||||
paginationInterceptor.setDialectType("postgresql");
|
||||
|
||||
return paginationInterceptor;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
package com.taosdata.example.mybatisplusdemo.domain;
|
||||
|
||||
import lombok.Data;
|
||||
|
||||
import java.sql.Timestamp;
|
||||
|
||||
@Data
|
||||
public class Temperature {
|
||||
|
||||
private Timestamp ts;
|
||||
private float temperature;
|
||||
private String location;
|
||||
private int tbIndex;
|
||||
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
package com.taosdata.example.mybatisplusdemo.domain;
|
||||
|
||||
import lombok.Data;
|
||||
|
||||
import java.sql.Timestamp;
|
||||
|
||||
@Data
|
||||
public class Weather {
|
||||
|
||||
private Timestamp ts;
|
||||
private float temperature;
|
||||
private int humidity;
|
||||
private String location;
|
||||
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
package com.taosdata.example.mybatisplusdemo.mapper;
|
||||
|
||||
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
|
||||
import com.taosdata.example.mybatisplusdemo.domain.Temperature;
|
||||
import org.apache.ibatis.annotations.Insert;
|
||||
import org.apache.ibatis.annotations.Param;
|
||||
import org.apache.ibatis.annotations.Update;
|
||||
|
||||
public interface TemperatureMapper extends BaseMapper<Temperature> {
|
||||
|
||||
@Update("CREATE TABLE if not exists temperature(ts timestamp, temperature float) tags(location nchar(64), tbIndex int)")
|
||||
int createSuperTable();
|
||||
|
||||
@Update("create table #{tbName} using temperature tags( #{location}, #{tbindex})")
|
||||
int createTable(@Param("tbName") String tbName, @Param("location") String location, @Param("tbindex") int tbindex);
|
||||
|
||||
@Update("drop table if exists temperature")
|
||||
void dropSuperTable();
|
||||
|
||||
@Insert("insert into t${tbIndex}(ts, temperature) values(#{ts}, #{temperature})")
|
||||
int insertOne(Temperature one);
|
||||
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
package com.taosdata.example.mybatisplusdemo.mapper;
|
||||
|
||||
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
|
||||
import com.taosdata.example.mybatisplusdemo.domain.Weather;
|
||||
|
||||
public interface WeatherMapper extends BaseMapper<Weather> {
|
||||
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
spring:
|
||||
datasource:
|
||||
# driver-class-name: org.h2.Driver
|
||||
# schema: classpath:db/schema-mysql.sql
|
||||
# data: classpath:db/data-mysql.sql
|
||||
# url: jdbc:h2:mem:test
|
||||
# username: root
|
||||
# password: test
|
||||
|
||||
# driver-class-name: com.mysql.jdbc.Driver
|
||||
# url: jdbc:mysql://master:3306/test?useSSL=false
|
||||
# username: root
|
||||
# password: 123456
|
||||
|
||||
driver-class-name: com.taosdata.jdbc.TSDBDriver
|
||||
url: jdbc:TAOS://localhost:6030/mp_test
|
||||
user: root
|
||||
password: taosdata
|
||||
charset: UTF-8
|
||||
locale: en_US.UTF-8
|
||||
timezone: UTC-8
|
||||
|
||||
mybatis-plus:
|
||||
configuration:
|
||||
map-underscore-to-camel-case: false
|
||||
|
||||
logging:
|
||||
level:
|
||||
com:
|
||||
taosdata:
|
||||
example:
|
||||
mybatisplusdemo:
|
||||
mapper: debug
|
||||
|
|
@ -0,0 +1,140 @@
|
|||
package com.taosdata.example.mybatisplusdemo.mapper;
|
||||
|
||||
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
|
||||
import com.baomidou.mybatisplus.core.metadata.IPage;
|
||||
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
|
||||
import com.taosdata.example.mybatisplusdemo.domain.Temperature;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
|
||||
import java.sql.Timestamp;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
|
||||
@RunWith(SpringJUnit4ClassRunner.class)
|
||||
@SpringBootTest
|
||||
public class TemperatureMapperTest {
|
||||
|
||||
private static Random random = new Random(System.currentTimeMillis());
|
||||
private static String[] locations = {"北京", "上海", "深圳", "广州", "杭州"};
|
||||
|
||||
@Before
|
||||
public void before() {
|
||||
mapper.dropSuperTable();
|
||||
// create table temperature
|
||||
mapper.createSuperTable();
|
||||
// create table t_X using temperature
|
||||
for (int i = 0; i < 10; i++) {
|
||||
mapper.createTable("t" + i, locations[random.nextInt(locations.length)], i);
|
||||
}
|
||||
// insert into table
|
||||
int affectRows = 0;
|
||||
// insert 10 tables
|
||||
for (int i = 0; i < 10; i++) {
|
||||
// each table insert 5 rows
|
||||
for (int j = 0; j < 5; j++) {
|
||||
Temperature one = new Temperature();
|
||||
one.setTs(new Timestamp(1605024000000l));
|
||||
one.setTemperature(random.nextFloat() * 50);
|
||||
one.setLocation("望京");
|
||||
one.setTbIndex(i);
|
||||
affectRows += mapper.insertOne(one);
|
||||
}
|
||||
}
|
||||
Assert.assertEquals(50, affectRows);
|
||||
}
|
||||
|
||||
@After
|
||||
public void after() {
|
||||
mapper.dropSuperTable();
|
||||
}
|
||||
|
||||
@Autowired
|
||||
private TemperatureMapper mapper;
|
||||
|
||||
/***
|
||||
* test SelectList
|
||||
* **/
|
||||
@Test
|
||||
public void testSelectList() {
|
||||
List<Temperature> temperatureList = mapper.selectList(null);
|
||||
temperatureList.forEach(System.out::println);
|
||||
}
|
||||
|
||||
/***
|
||||
* test InsertOne which is a custom metheod
|
||||
* ***/
|
||||
@Test
|
||||
public void testInsert() {
|
||||
Temperature one = new Temperature();
|
||||
one.setTs(new Timestamp(1605024000000l));
|
||||
one.setTemperature(random.nextFloat() * 50);
|
||||
one.setLocation("望京");
|
||||
int affectRows = mapper.insertOne(one);
|
||||
Assert.assertEquals(1, affectRows);
|
||||
}
|
||||
|
||||
/***
|
||||
* test SelectOne
|
||||
* **/
|
||||
@Test
|
||||
public void testSelectOne() {
|
||||
QueryWrapper<Temperature> wrapper = new QueryWrapper<>();
|
||||
wrapper.eq("location", "beijing");
|
||||
Temperature one = mapper.selectOne(wrapper);
|
||||
System.out.println(one);
|
||||
Assert.assertNotNull(one);
|
||||
}
|
||||
|
||||
/***
|
||||
* test select By map
|
||||
* ***/
|
||||
@Test
|
||||
public void testSelectByMap() {
|
||||
Map<String, Object> map = new HashMap<>();
|
||||
map.put("location", "beijing");
|
||||
List<Temperature> temperatures = mapper.selectByMap(map);
|
||||
Assert.assertEquals(1, temperatures.size());
|
||||
}
|
||||
|
||||
/***
|
||||
* test selectObjs
|
||||
* **/
|
||||
@Test
|
||||
public void testSelectObjs() {
|
||||
List<Object> ts = mapper.selectObjs(null);
|
||||
System.out.println(ts);
|
||||
}
|
||||
|
||||
/**
|
||||
* test selectC ount
|
||||
* **/
|
||||
@Test
|
||||
public void testSelectCount() {
|
||||
int count = mapper.selectCount(null);
|
||||
Assert.assertEquals(5, count);
|
||||
}
|
||||
|
||||
/****
|
||||
* 分页
|
||||
*/
|
||||
@Test
|
||||
public void testSelectPage() {
|
||||
IPage page = new Page(1, 2);
|
||||
IPage<Temperature> temperatureIPage = mapper.selectPage(page, null);
|
||||
System.out.println("total : " + temperatureIPage.getTotal());
|
||||
System.out.println("pages : " + temperatureIPage.getPages());
|
||||
for (Temperature temperature : temperatureIPage.getRecords()) {
|
||||
System.out.println(temperature);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
package com.taosdata.example.mybatisplusdemo.mapper;
|
||||
|
||||
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
|
||||
import com.baomidou.mybatisplus.core.metadata.IPage;
|
||||
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
|
||||
import com.taosdata.example.mybatisplusdemo.domain.Weather;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
|
||||
import java.sql.Timestamp;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
|
||||
@RunWith(SpringJUnit4ClassRunner.class)
|
||||
@SpringBootTest
|
||||
public class WeatherMapperTest {
|
||||
|
||||
private static Random random = new Random(System.currentTimeMillis());
|
||||
|
||||
@Autowired
|
||||
private WeatherMapper mapper;
|
||||
|
||||
@Test
|
||||
public void testSelectList() {
|
||||
List<Weather> weathers = mapper.selectList(null);
|
||||
weathers.forEach(System.out::println);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInsert() {
|
||||
Weather one = new Weather();
|
||||
one.setTs(new Timestamp(1605024000000l));
|
||||
one.setTemperature(random.nextFloat() * 50);
|
||||
one.setHumidity(random.nextInt(100));
|
||||
one.setLocation("望京");
|
||||
int affectRows = mapper.insert(one);
|
||||
Assert.assertEquals(1, affectRows);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSelectOne() {
|
||||
QueryWrapper<Weather> wrapper = new QueryWrapper<>();
|
||||
wrapper.eq("location", "beijing");
|
||||
Weather one = mapper.selectOne(wrapper);
|
||||
System.out.println(one);
|
||||
Assert.assertEquals(12.22f, one.getTemperature(), 0.00f);
|
||||
Assert.assertEquals("beijing", one.getLocation());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSelectByMap() {
|
||||
Map<String, Object> map = new HashMap<>();
|
||||
map.put("location", "beijing");
|
||||
List<Weather> weathers = mapper.selectByMap(map);
|
||||
Assert.assertEquals(1, weathers.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSelectObjs() {
|
||||
List<Object> ts = mapper.selectObjs(null);
|
||||
System.out.println(ts);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSelectCount() {
|
||||
int count = mapper.selectCount(null);
|
||||
// Assert.assertEquals(5, count);
|
||||
System.out.println(count);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSelectPage() {
|
||||
IPage page = new Page(1, 2);
|
||||
IPage<Weather> weatherIPage = mapper.selectPage(page, null);
|
||||
System.out.println("total : " + weatherIPage.getTotal());
|
||||
System.out.println("pages : " + weatherIPage.getPages());
|
||||
for (Weather weather : weatherIPage.getRecords()) {
|
||||
System.out.println(weather);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -50,10 +50,10 @@ static void queryDB(TAOS *taos, char *command) {
|
|||
taos_free_result(pSql);
|
||||
}
|
||||
|
||||
void Test(char *qstr, const char *input, int i);
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
TAOS * taos;
|
||||
char qstr[1024];
|
||||
TAOS_RES *result;
|
||||
|
||||
// connect to server
|
||||
if (argc < 2) {
|
||||
|
@ -63,41 +63,26 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
// init TAOS
|
||||
taos_init();
|
||||
|
||||
taos = taos_connect(argv[1], "root", "taosdata", NULL, 0);
|
||||
for (int i = 0; i < 4000000; i++) {
|
||||
Test(qstr, argv[1], i);
|
||||
}
|
||||
taos_cleanup();
|
||||
}
|
||||
void Test(char *qstr, const char *input, int index) {
|
||||
TAOS *taos = taos_connect(input, "root", "taosdata", NULL, 0);
|
||||
printf("==================test at %d\n================================", index);
|
||||
queryDB(taos, "drop database if exists demo");
|
||||
queryDB(taos, "create database demo");
|
||||
TAOS_RES *result;
|
||||
if (taos == NULL) {
|
||||
printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/);
|
||||
exit(1);
|
||||
}
|
||||
printf("success to connect to server\n");
|
||||
|
||||
|
||||
//taos_query(taos, "drop database demo");
|
||||
queryDB(taos, "drop database if exists demo");
|
||||
|
||||
//result = taos_query(taos, "create database demo");
|
||||
//if (result == NULL) {
|
||||
// printf("failed to create database, reason:%s\n", "null result"/*taos_errstr(taos)*/);
|
||||
// exit(1);
|
||||
//}
|
||||
queryDB(taos, "create database demo");
|
||||
printf("success to create database\n");
|
||||
|
||||
//taos_query(taos, "use demo");
|
||||
queryDB(taos, "use demo");
|
||||
|
||||
// create table
|
||||
//if (taos_query(taos, "create table m1 (ts timestamp, ti tinyint, si smallint, i int, bi bigint, f float, d double, b binary(10))") == 0) {
|
||||
// printf("failed to create table, reason:%s\n", taos_errstr(result));
|
||||
// exit(1);
|
||||
//}
|
||||
queryDB(taos, "create table m1 (ts timestamp, ti tinyint, si smallint, i int, bi bigint, f float, d double, b binary(10))");
|
||||
printf("success to create table\n");
|
||||
|
||||
// sleep for one second to make sure table is created on data node
|
||||
// taosMsleep(1000);
|
||||
|
||||
// insert 10 records
|
||||
int i = 0;
|
||||
for (i = 0; i < 10; ++i) {
|
||||
sprintf(qstr, "insert into m1 values (%" PRId64 ", %d, %d, %d, %d, %f, %lf, '%s')", 1546300800000 + i * 1000, i, i, i, i*10000000, i*1.0, i*2.0, "hello");
|
||||
|
@ -117,7 +102,6 @@ int main(int argc, char *argv[]) {
|
|||
}
|
||||
taos_free_result(result);
|
||||
|
||||
//sleep(1);
|
||||
}
|
||||
printf("success to insert rows, total %d rows\n", i);
|
||||
|
||||
|
@ -147,5 +131,6 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
taos_free_result(result);
|
||||
printf("====demo end====\n\n");
|
||||
return getchar();
|
||||
taos_close(taos);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Subproject commit f2ffd30521b8e8afbc9d25c75f8eeeb6a48bd030
|
|
@ -1,3 +0,0 @@
|
|||
/target
|
||||
**/*.rs.bk
|
||||
Cargo.lock
|
|
@ -1,7 +0,0 @@
|
|||
[package]
|
||||
name = "tdengine"
|
||||
version = "0.1.0"
|
||||
authors = ["Chunhua Jiang <jiangch@3reality.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
|
@ -1,20 +0,0 @@
|
|||
# TDengine driver connector for Rust
|
||||
|
||||
It's a rust implementation for [TDengine](https://github.com/taosdata/TDengine), an open-sourced big data platform designed and optimized for the Internet of Things (IoT), Connected Cars, Industrial IoT, and IT Infrastructure and Application Monitoring.
|
||||
|
||||
## Dependencies
|
||||
- Rust:
|
||||
```
|
||||
curl https://sh.rustup.rs -sSf | sh
|
||||
```
|
||||
|
||||
## Run with Sample
|
||||
|
||||
Build and run basic sample:
|
||||
```
|
||||
cargo run --example demo
|
||||
```
|
||||
Build and run subscribe sample:
|
||||
```
|
||||
cargo run --example subscribe
|
||||
```
|
|
@ -1,10 +0,0 @@
|
|||
// build.rs
|
||||
|
||||
use std::env;
|
||||
|
||||
fn main() {
|
||||
let project_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
|
||||
|
||||
println!("cargo:rustc-link-search={}", project_dir); // the "-L" flag
|
||||
println!("cargo:rustc-link-lib=taos"); // the "-l" flag
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue