merge with develop
This commit is contained in:
commit
dac3710a68
|
@ -132,10 +132,7 @@ bool tscIsSelectivityWithTagQuery(SSqlCmd* pCmd);
|
|||
void tscAddSpecialColumnForSelect(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId, SColumnIndex* pIndex,
|
||||
SSchema* pColSchema, int16_t isTag);
|
||||
|
||||
//void addRequiredTagColumn(SQueryInfo* pQueryInfo, int32_t tagColIndex, int32_t tableIndex);
|
||||
void addRequiredTagColumn(STableMetaInfo* pTableMetaInfo, SColumnIndex* index);
|
||||
|
||||
int32_t tscSetTableId(STableMetaInfo* pTableMetaInfo, SSQLToken* pzTableName, SSqlObj* pSql);
|
||||
int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SSQLToken* pzTableName, SSqlObj* pSql);
|
||||
void tscClearInterpInfo(SQueryInfo* pQueryInfo);
|
||||
|
||||
bool tscIsInsertData(char* sqlstr);
|
||||
|
|
|
@ -56,6 +56,7 @@ typedef struct STableMeta {
|
|||
STableComInfo tableInfo;
|
||||
uint8_t tableType;
|
||||
int16_t sversion;
|
||||
int16_t tversion;
|
||||
SCMVgroupInfo vgroupInfo;
|
||||
int32_t sid; // the index of one table in a virtual node
|
||||
uint64_t uid; // unique id of a table
|
||||
|
@ -294,7 +295,6 @@ typedef struct STscObj {
|
|||
char writeAuth : 1;
|
||||
char superAuth : 1;
|
||||
uint32_t connId;
|
||||
struct SSqlObj * pSql;
|
||||
struct SSqlObj * pHb;
|
||||
struct SSqlObj * sqlList;
|
||||
struct SSqlStream *streamList;
|
||||
|
@ -411,7 +411,7 @@ void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, void (*fp)(), void *param, const
|
|||
void tscProcessMultiVnodesInsertFromFile(SSqlObj *pSql);
|
||||
void tscKillSTableQuery(SSqlObj *pSql);
|
||||
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen);
|
||||
bool tscIsUpdateQuery(STscObj *pObj);
|
||||
bool tscIsUpdateQuery(SSqlObj* pSql);
|
||||
bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
|
||||
|
||||
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
|
||||
|
|
|
@ -62,7 +62,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp
|
|||
* Method: executeQueryImp
|
||||
* Signature: ([BJ)I
|
||||
*/
|
||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp
|
||||
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp
|
||||
(JNIEnv *, jobject, jbyteArray, jlong);
|
||||
|
||||
/*
|
||||
|
@ -71,7 +71,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp
|
|||
* Signature: (J)I
|
||||
*/
|
||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrCodeImp
|
||||
(JNIEnv *, jobject, jlong);
|
||||
(JNIEnv *, jobject, jlong, jlong);
|
||||
|
||||
/*
|
||||
* Class: com_taosdata_jdbc_TSDBJNIConnector
|
||||
|
@ -87,7 +87,7 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp
|
|||
* Signature: (J)J
|
||||
*/
|
||||
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp
|
||||
(JNIEnv *, jobject, jlong);
|
||||
(JNIEnv *env, jobject jobj, jlong con, jlong tres);
|
||||
|
||||
/*
|
||||
* Class: com_taosdata_jdbc_TSDBJNIConnector
|
||||
|
@ -103,7 +103,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_freeResultSetImp
|
|||
* Signature: (J)I
|
||||
*/
|
||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsImp
|
||||
(JNIEnv *, jobject, jlong);
|
||||
(JNIEnv *env, jobject jobj, jlong con, jlong res);
|
||||
|
||||
/*
|
||||
* Class: com_taosdata_jdbc_TSDBJNIConnector
|
||||
|
@ -142,8 +142,8 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp
|
|||
* Method: consumeImp
|
||||
* Signature: (J)Lcom/taosdata/jdbc/TSDBResultSetRowData;
|
||||
*/
|
||||
JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp
|
||||
(JNIEnv *, jobject, jlong, jint);
|
||||
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp
|
||||
(JNIEnv *, jobject, jlong);
|
||||
|
||||
/*
|
||||
* Class: com_taosdata_jdbc_TSDBJNIConnector
|
||||
|
|
|
@ -13,19 +13,35 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "com_taosdata_jdbc_TSDBJNIConnector.h"
|
||||
#include "os.h"
|
||||
#include "taos.h"
|
||||
#include "tscSubquery.h"
|
||||
#include "tlog.h"
|
||||
#include "tscUtil.h"
|
||||
#include "tsclient.h"
|
||||
#include "tlog.h"
|
||||
#include "ttime.h"
|
||||
|
||||
#define jniError(...) { if (jniDebugFlag & DEBUG_ERROR) { taosPrintLog("ERROR JNI ", jniDebugFlag, __VA_ARGS__); }}
|
||||
#define jniWarn(...) { if (jniDebugFlag & DEBUG_WARN) { taosPrintLog("WARN JNI ", jniDebugFlag, __VA_ARGS__); }}
|
||||
#define jniTrace(...) { if (jniDebugFlag & DEBUG_TRACE) { taosPrintLog("JNI ", jniDebugFlag, __VA_ARGS__); }}
|
||||
#define jniPrint(...) { taosPrintLog("JNI ", tscEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }
|
||||
#include "com_taosdata_jdbc_TSDBJNIConnector.h"
|
||||
|
||||
#define jniError(...) \
|
||||
{ \
|
||||
if (jniDebugFlag & DEBUG_ERROR) { \
|
||||
taosPrintLog("ERROR JNI ", jniDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
#define jniWarn(...) \
|
||||
{ \
|
||||
if (jniDebugFlag & DEBUG_WARN) { \
|
||||
taosPrintLog("WARN JNI ", jniDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
#define jniTrace(...) \
|
||||
{ \
|
||||
if (jniDebugFlag & DEBUG_TRACE) { \
|
||||
taosPrintLog("JNI ", jniDebugFlag, __VA_ARGS__); \
|
||||
} \
|
||||
}
|
||||
#define jniPrint(...) \
|
||||
{ taosPrintLog("JNI ", tscEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }
|
||||
|
||||
int __init = 0;
|
||||
|
||||
|
@ -117,7 +133,8 @@ void jniGetGlobalMethod(JNIEnv *env) {
|
|||
jniTrace("native method register finished");
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setAllocModeImp(JNIEnv *env, jobject jobj, jint jMode, jstring jPath, jboolean jAutoDump) {
|
||||
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setAllocModeImp(JNIEnv *env, jobject jobj, jint jMode,
|
||||
jstring jPath, jboolean jAutoDump) {
|
||||
if (jPath != NULL) {
|
||||
const char *path = (*env)->GetStringUTFChars(env, jPath, NULL);
|
||||
taosSetAllocMode(jMode, path, !!jAutoDump);
|
||||
|
@ -172,14 +189,14 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions(JNIEnv
|
|||
}
|
||||
(*env)->ReleaseStringUTFChars(env, optionValue, charset);
|
||||
} else if (optionIndex == TSDB_OPTION_TIMEZONE) {
|
||||
const char *timezone = (*env)->GetStringUTFChars(env, optionValue, NULL);
|
||||
if (timezone && strlen(timezone) != 0) {
|
||||
res = taos_options(TSDB_OPTION_TIMEZONE, timezone);
|
||||
const char *tz1 = (*env)->GetStringUTFChars(env, optionValue, NULL);
|
||||
if (tz1 && strlen(tz1) != 0) {
|
||||
res = taos_options(TSDB_OPTION_TIMEZONE, tz1);
|
||||
jniTrace("set timezone to %s, result:%d", timezone, res);
|
||||
} else {
|
||||
jniTrace("input timezone is empty");
|
||||
}
|
||||
(*env)->ReleaseStringUTFChars(env, optionValue, timezone);
|
||||
(*env)->ReleaseStringUTFChars(env, optionValue, tz1);
|
||||
} else {
|
||||
jniError("option index:%d is not found", optionIndex);
|
||||
}
|
||||
|
@ -219,9 +236,9 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEn
|
|||
}
|
||||
|
||||
/*
|
||||
* set numOfThreadsPerCore = 0
|
||||
* means only one thread for client side scheduler
|
||||
*/
|
||||
* set numOfThreadsPerCore = 0
|
||||
* means only one thread for client side scheduler
|
||||
*/
|
||||
tsNumOfThreadsPerCore = 0.0;
|
||||
|
||||
ret = (jlong)taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, jport);
|
||||
|
@ -241,8 +258,8 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEn
|
|||
return ret;
|
||||
}
|
||||
|
||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(JNIEnv *env, jobject jobj,
|
||||
jbyteArray jsql, jlong con) {
|
||||
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(JNIEnv *env, jobject jobj,
|
||||
jbyteArray jsql, jlong con) {
|
||||
TAOS *tscon = (TAOS *)con;
|
||||
if (tscon == NULL) {
|
||||
jniError("jobj:%p, connection is already closed", jobj);
|
||||
|
@ -264,65 +281,76 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J
|
|||
|
||||
(*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)dst);
|
||||
if ((*env)->ExceptionCheck(env)) {
|
||||
//todo handle error
|
||||
// todo handle error
|
||||
}
|
||||
|
||||
jniTrace("jobj:%p, conn:%p, sql:%s", jobj, tscon, dst);
|
||||
|
||||
int code = taos_query(tscon, dst);
|
||||
if (code != 0) {
|
||||
jniError("jobj:%p, conn:%p, code:%s, msg:%s", jobj, tscon, tstrerror(code), taos_errstr(tscon));
|
||||
free(dst);
|
||||
return JNI_TDENGINE_ERROR;
|
||||
} else {
|
||||
int32_t affectRows = 0;
|
||||
SSqlObj *pSql = ((STscObj *)tscon)->pSql;
|
||||
SSqlObj *pSql = taos_query(tscon, dst);
|
||||
int32_t code = taos_errno(pSql);
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
jniError("jobj:%p, conn:%p, code:%s, msg:%s", jobj, tscon, tstrerror(code), taos_errstr(pSql));
|
||||
} else {
|
||||
int32_t affectRows = 0;
|
||||
if (pSql->cmd.command == TSDB_SQL_INSERT) {
|
||||
affectRows = taos_affected_rows(tscon);
|
||||
affectRows = taos_affected_rows(pSql);
|
||||
jniTrace("jobj:%p, conn:%p, code:%s, affect rows:%d", jobj, tscon, tstrerror(code), affectRows);
|
||||
} else {
|
||||
jniTrace("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code));
|
||||
}
|
||||
|
||||
free(dst);
|
||||
return affectRows;
|
||||
}
|
||||
|
||||
free(dst);
|
||||
return (jlong)pSql;
|
||||
}
|
||||
|
||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrCodeImp(JNIEnv *env, jobject jobj, jlong con) {
|
||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrCodeImp(JNIEnv *env, jobject jobj, jlong con, jlong tres) {
|
||||
TAOS *tscon = (TAOS *)con;
|
||||
if (tscon == NULL) {
|
||||
jniError("jobj:%p, connection is closed", jobj);
|
||||
return (jint)-TSDB_CODE_INVALID_CONNECTION;
|
||||
return (jint)TSDB_CODE_INVALID_CONNECTION;
|
||||
}
|
||||
|
||||
return (jint)-taos_errno(tscon);
|
||||
if ((void *)tres == NULL) {
|
||||
jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon);
|
||||
return JNI_RESULT_SET_NULL;
|
||||
}
|
||||
|
||||
TAOS_RES *pSql = (TAOS_RES *)tres;
|
||||
|
||||
return (jint)taos_errno(pSql);
|
||||
}
|
||||
|
||||
JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp(JNIEnv *env, jobject jobj, jlong con) {
|
||||
TAOS *tscon = (TAOS *)con;
|
||||
return (*env)->NewStringUTF(env, (const char *)taos_errstr(tscon));
|
||||
JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp(JNIEnv *env, jobject jobj, jlong tres) {
|
||||
TAOS_RES *pSql = (TAOS_RES *)tres;
|
||||
return (*env)->NewStringUTF(env, (const char *)taos_errstr(pSql));
|
||||
}
|
||||
|
||||
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp(JNIEnv *env, jobject jobj, jlong con) {
|
||||
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp(JNIEnv *env, jobject jobj, jlong con,
|
||||
jlong tres) {
|
||||
TAOS *tscon = (TAOS *)con;
|
||||
if (tscon == NULL) {
|
||||
jniError("jobj:%p, connection is closed", jobj);
|
||||
return JNI_CONNECTION_NULL;
|
||||
}
|
||||
|
||||
jlong ret = 0;
|
||||
|
||||
if (tscIsUpdateQuery(tscon)) {
|
||||
ret = 0; // for update query, no result pointer
|
||||
jniTrace("jobj:%p, conn:%p, no resultset", jobj, tscon);
|
||||
} else {
|
||||
ret = (jlong) taos_use_result(tscon);
|
||||
jniTrace("jobj:%p, conn:%p, get resultset:%p", jobj, tscon, (void *) ret);
|
||||
if ((void *)tres == NULL) {
|
||||
jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon);
|
||||
return JNI_RESULT_SET_NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
SSqlObj *pSql = (TAOS_RES *)tres;
|
||||
STscObj *pObj = pSql->pTscObj;
|
||||
|
||||
if (tscIsUpdateQuery(pSql)) {
|
||||
taos_free_result(pSql); // free result here
|
||||
jniTrace("jobj:%p, conn:%p, no resultset, %p", jobj, pObj, (void *)tres);
|
||||
return 0;
|
||||
} else {
|
||||
jniTrace("jobj:%p, conn:%p, get resultset, %p", jobj, pObj, (void *)tres);
|
||||
return tres;
|
||||
}
|
||||
}
|
||||
|
||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_freeResultSetImp(JNIEnv *env, jobject jobj, jlong con,
|
||||
|
@ -343,17 +371,21 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_freeResultSetImp(
|
|||
return JNI_SUCCESS;
|
||||
}
|
||||
|
||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsImp(JNIEnv *env, jobject jobj,
|
||||
jlong con) {
|
||||
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsImp(JNIEnv *env, jobject jobj, jlong con,
|
||||
jlong res) {
|
||||
TAOS *tscon = (TAOS *)con;
|
||||
if (tscon == NULL) {
|
||||
jniError("jobj:%p, connection is closed", jobj);
|
||||
return JNI_CONNECTION_NULL;
|
||||
}
|
||||
|
||||
jint ret = taos_affected_rows(tscon);
|
||||
if ((void *)res == NULL) {
|
||||
jniError("jobj:%p, conn:%p, resultset is null", jobj, tscon);
|
||||
return JNI_RESULT_SET_NULL;
|
||||
}
|
||||
|
||||
jniTrace("jobj:%p, conn:%p, affect rows:%d", jobj, tscon, (void *)con, ret);
|
||||
jint ret = taos_affected_rows((SSqlObj *)res);
|
||||
jniTrace("jobj:%p, conn:%p, sql:%p, affect rows:%d", jobj, tscon, (void *)con, res, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -405,7 +437,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getSchemaMetaData
|
|||
* @return
|
||||
*/
|
||||
jstring jniFromNCharToByteArray(JNIEnv *env, char *nchar, int32_t maxBytes) {
|
||||
int len = (int)strlen(nchar);
|
||||
int len = (int)strlen(nchar);
|
||||
if (len > maxBytes) { // no terminated symbol exists '\0'
|
||||
len = maxBytes;
|
||||
}
|
||||
|
@ -439,7 +471,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
|
|||
|
||||
TAOS_ROW row = taos_fetch_row(result);
|
||||
if (row == NULL) {
|
||||
int tserrno = taos_errno(tscon);
|
||||
int tserrno = taos_errno(result);
|
||||
if (tserrno == 0) {
|
||||
jniTrace("jobj:%p, conn:%p, resultset:%p, fields size is %d, fetch row to the end", jobj, tscon, res, num_fields);
|
||||
return JNI_FETCH_END;
|
||||
|
@ -476,24 +508,22 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
|
|||
float fv = 0;
|
||||
fv = GET_FLOAT_VAL(row[i]);
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat)fv);
|
||||
}
|
||||
break;
|
||||
} break;
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
double dv = 0;
|
||||
dv = GET_DOUBLE_VAL(row[i]);
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble)dv);
|
||||
}
|
||||
break;
|
||||
} break;
|
||||
case TSDB_DATA_TYPE_BINARY: {
|
||||
strncpy(tmp, row[i], (size_t) fields[i].bytes); // handle the case that terminated does not exist
|
||||
strncpy(tmp, row[i], (size_t)fields[i].bytes); // handle the case that terminated does not exist
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetStringFp, i, (*env)->NewStringUTF(env, tmp));
|
||||
|
||||
memset(tmp, 0, (size_t) fields[i].bytes);
|
||||
memset(tmp, 0, (size_t)fields[i].bytes);
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_NCHAR: {
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteArrayFp, i,
|
||||
jniFromNCharToByteArray(env, (char*)row[i], fields[i].bytes));
|
||||
jniFromNCharToByteArray(env, (char *)row[i], fields[i].bytes));
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
|
@ -552,110 +582,24 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_subscribeImp(JNI
|
|||
return sub;
|
||||
}
|
||||
|
||||
static jobject convert_one_row(JNIEnv *env, TAOS_ROW row, TAOS_FIELD* fields, int num_fields) {
|
||||
jobject rowobj = (*env)->NewObject(env, g_rowdataClass, g_rowdataConstructor, num_fields);
|
||||
jniTrace("created a rowdata object, rowobj:%p", rowobj);
|
||||
|
||||
for (int i = 0; i < num_fields; i++) {
|
||||
if (row[i] == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (fields[i].type) {
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetBooleanFp, i, (jboolean)(*((char *)row[i]) == 1));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteFp, i, (jbyte) * ((char *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetShortFp, i, (jshort) * ((short *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetIntFp, i, (jint) * (int *)row[i]);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetLongFp, i, (jlong) * ((int64_t *)row[i]));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
float fv = 0;
|
||||
fv = GET_FLOAT_VAL(row[i]);
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetFloatFp, i, (jfloat)fv);
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE:{
|
||||
double dv = 0;
|
||||
dv = GET_DOUBLE_VAL(row[i]);
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble)dv);
|
||||
}
|
||||
break;
|
||||
case TSDB_DATA_TYPE_BINARY: {
|
||||
char tmp[TSDB_MAX_BYTES_PER_ROW] = {0};
|
||||
strncpy(tmp, row[i], (size_t) fields[i].bytes); // handle the case that terminated does not exist
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetStringFp, i, (*env)->NewStringUTF(env, tmp));
|
||||
|
||||
memset(tmp, 0, (size_t) fields[i].bytes);
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteArrayFp, i,
|
||||
jniFromNCharToByteArray(env, (char*)row[i], fields[i].bytes));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetTimestampFp, i, (jlong) * ((int64_t *)row[i]));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return rowobj;
|
||||
}
|
||||
|
||||
JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEnv *env, jobject jobj, jlong sub, jint timeout) {
|
||||
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_consumeImp(JNIEnv *env, jobject jobj, jlong sub) {
|
||||
jniTrace("jobj:%p, in TSDBJNIConnector_consumeImp, sub:%ld", jobj, sub);
|
||||
jniGetGlobalMethod(env);
|
||||
|
||||
TAOS_SUB *tsub = (TAOS_SUB *)sub;
|
||||
jobject rows = (*env)->NewObject(env, g_arrayListClass, g_arrayListConstructFp);
|
||||
|
||||
int64_t start = taosGetTimestampMs();
|
||||
int count = 0;
|
||||
TAOS_RES *res = taos_consume(tsub);
|
||||
|
||||
while (true) {
|
||||
TAOS_RES * res = taos_consume(tsub);
|
||||
if (res == NULL) {
|
||||
jniError("jobj:%p, tsub:%p, taos_consume returns NULL", jobj, tsub);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
TAOS_FIELD *fields = taos_fetch_fields(res);
|
||||
int num_fields = taos_num_fields(res);
|
||||
while (true) {
|
||||
TAOS_ROW row = taos_fetch_row(res);
|
||||
if (row == NULL) {
|
||||
break;
|
||||
}
|
||||
jobject rowobj = convert_one_row(env, row, fields, num_fields);
|
||||
(*env)->CallBooleanMethod(env, rows, g_arrayListAddFp, rowobj);
|
||||
count++;
|
||||
}
|
||||
|
||||
if (count > 0) {
|
||||
break;
|
||||
}
|
||||
if (timeout == -1) {
|
||||
continue;
|
||||
}
|
||||
if (((int)(taosGetTimestampMs() - start)) >= timeout) {
|
||||
jniTrace("jobj:%p, sub:%ld, timeout", jobj, sub);
|
||||
break;
|
||||
}
|
||||
if (res == NULL) {
|
||||
jniError("jobj:%p, tsub:%p, taos_consume returns NULL", jobj, tsub);
|
||||
return 0l;
|
||||
}
|
||||
|
||||
return rows;
|
||||
return (long)res;
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp(JNIEnv *env, jobject jobj, jlong sub, jboolean keepProgress) {
|
||||
JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_unsubscribeImp(JNIEnv *env, jobject jobj, jlong sub,
|
||||
jboolean keepProgress) {
|
||||
TAOS_SUB *tsub = (TAOS_SUB *)sub;
|
||||
taos_unsubscribe(tsub, keepProgress);
|
||||
}
|
||||
|
@ -678,7 +622,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTab
|
|||
char *dst = (char *)calloc(1, sizeof(char) * (len + 1));
|
||||
(*env)->GetByteArrayRegion(env, jsql, 0, len, (jbyte *)dst);
|
||||
if ((*env)->ExceptionCheck(env)) {
|
||||
//todo handle error
|
||||
// todo handle error
|
||||
}
|
||||
|
||||
int code = taos_validate_sql(tscon, dst);
|
||||
|
|
|
@ -50,14 +50,15 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
|
|||
pSql->maxRetry = TSDB_MAX_REPLICA_NUM;
|
||||
pSql->fp = fp;
|
||||
|
||||
sem_init(&pSql->rspSem, 0, 0);
|
||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE)) {
|
||||
tscError("failed to malloc payload");
|
||||
tscQueueAsyncError(fp, param, TSDB_CODE_CLI_OUT_OF_MEMORY);
|
||||
return;
|
||||
}
|
||||
|
||||
pSql->sqlstr = realloc(pSql->sqlstr, sqlLen + 1);
|
||||
|
||||
// todo check for OOM problem
|
||||
pSql->sqlstr = calloc(1, sqlLen + 1);
|
||||
if (pSql->sqlstr == NULL) {
|
||||
tscError("%p failed to malloc sql string buffer", pSql);
|
||||
tscQueueAsyncError(fp, param, TSDB_CODE_CLI_OUT_OF_MEMORY);
|
||||
|
@ -95,7 +96,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa
|
|||
|
||||
int32_t sqlLen = strlen(sqlstr);
|
||||
if (sqlLen > tsMaxSQLStringLen) {
|
||||
tscError("sql string too long");
|
||||
tscError("sql string exceeds max length:%d", tsMaxSQLStringLen);
|
||||
terrno = TSDB_CODE_INVALID_SQL;
|
||||
tscQueueAsyncError(fp, param, TSDB_CODE_INVALID_SQL);
|
||||
return;
|
||||
|
@ -486,7 +487,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
|
|||
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
|
||||
code = tscGetTableMeta(pSql, pTableMetaInfo);
|
||||
assert(code == TSDB_CODE_SUCCESS && pTableMetaInfo->pTableMeta != NULL);
|
||||
(*pSql->fp)(pSql->param, NULL, code);
|
||||
(*pSql->fp)(pSql->param, pSql, code);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -793,7 +793,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
|||
}
|
||||
|
||||
STableMetaInfo *pSTableMeterMetaInfo = tscGetMetaInfo(pQueryInfo, STABLE_INDEX);
|
||||
tscSetTableId(pSTableMeterMetaInfo, &sToken, pSql);
|
||||
tscSetTableFullName(pSTableMeterMetaInfo, &sToken, pSql);
|
||||
|
||||
strncpy(pTag->name, pSTableMeterMetaInfo->name, TSDB_TABLE_ID_LEN);
|
||||
code = tscGetTableMeta(pSql, pSTableMeterMetaInfo);
|
||||
|
@ -834,9 +834,8 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
|||
sql += index;
|
||||
|
||||
if (TK_STRING == sToken.type) {
|
||||
sToken.n = strdequote(sToken.z);
|
||||
strtrim(sToken.z);
|
||||
sToken.n = (uint32_t)strlen(sToken.z);
|
||||
strdequote(sToken.z);
|
||||
sToken.n = strtrim(sToken.z);
|
||||
}
|
||||
|
||||
if (sToken.type == TK_RP) {
|
||||
|
@ -925,7 +924,11 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
|||
|
||||
for (int32_t i = 0; i < spd.numOfCols; ++i) {
|
||||
if (!spd.hasVal[i]) { // current tag column do not have any value to insert, set it to null
|
||||
setNull(ptr, pTagSchema[i].type, pTagSchema[i].bytes);
|
||||
if (pTagSchema[i].type == TSDB_DATA_TYPE_BINARY || pTagSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
|
||||
setVardataNull(ptr, pTagSchema[i].type);
|
||||
} else {
|
||||
setNull(ptr, pTagSchema[i].type, pTagSchema[i].bytes);
|
||||
}
|
||||
}
|
||||
|
||||
ptr += pTagSchema[i].bytes;
|
||||
|
@ -944,7 +947,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
|
|||
return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr);
|
||||
}
|
||||
|
||||
int32_t ret = tscSetTableId(pTableMetaInfo, &tableToken, pSql);
|
||||
int32_t ret = tscSetTableFullName(pTableMetaInfo, &tableToken, pSql);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return ret;
|
||||
}
|
||||
|
@ -1087,7 +1090,7 @@ int doParseInsertSql(SSqlObj *pSql, char *str) {
|
|||
goto _error_clean;
|
||||
}
|
||||
|
||||
if ((code = tscSetTableId(pTableMetaInfo, &sToken, pSql)) != TSDB_CODE_SUCCESS) {
|
||||
if ((code = tscSetTableFullName(pTableMetaInfo, &sToken, pSql)) != TSDB_CODE_SUCCESS) {
|
||||
goto _error_clean;
|
||||
}
|
||||
|
||||
|
@ -1205,9 +1208,8 @@ int doParseInsertSql(SSqlObj *pSql, char *str) {
|
|||
str += index;
|
||||
|
||||
if (TK_STRING == sToken.type) {
|
||||
sToken.n = strdequote(sToken.z);
|
||||
strtrim(sToken.z);
|
||||
sToken.n = (uint32_t)strlen(sToken.z);
|
||||
strdequote(sToken.z);
|
||||
sToken.n = strtrim(sToken.z);
|
||||
}
|
||||
|
||||
if (sToken.type == TK_RP) {
|
||||
|
|
|
@ -494,7 +494,7 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
|
|||
tsem_init(&pSql->rspSem, 0, 0);
|
||||
pSql->signature = pSql;
|
||||
pSql->pTscObj = pObj;
|
||||
pSql->pTscObj->pSql = pSql;
|
||||
//pSql->pTscObj->pSql = pSql;
|
||||
pSql->maxRetry = TSDB_MAX_REPLICA_NUM;
|
||||
|
||||
pStmt->pSql = pSql;
|
||||
|
@ -515,7 +515,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
|
|||
//doAsyncQuery(pObj, pSql, waitForQueryRsp, taos, sqlstr, sqlLen);
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
pSql->param = (void*)pStmt->taos;
|
||||
pSql->param = (void*)pSql;
|
||||
pSql->fp = waitForQueryRsp;
|
||||
pSql->insertType = TSDB_QUERY_TYPE_STMT_INSERT;
|
||||
|
||||
|
@ -613,7 +613,9 @@ int taos_stmt_execute(TAOS_STMT* stmt) {
|
|||
} else {
|
||||
tfree(pStmt->pSql->sqlstr);
|
||||
pStmt->pSql->sqlstr = sql;
|
||||
ret = taos_query(pStmt->taos, pStmt->pSql->sqlstr);
|
||||
SSqlObj* pSql = taos_query((TAOS*)pStmt->taos, pStmt->pSql->sqlstr);
|
||||
ret = taos_errno(pSql);
|
||||
taos_free_result(pSql);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
|
|
|
@ -89,13 +89,23 @@ void tscSaveSlowQueryFp(void *handle, void *tmrId) {
|
|||
}
|
||||
|
||||
void tscSaveSlowQuery(SSqlObj *pSql) {
|
||||
const static int64_t SLOW_QUERY_INTERVAL = 3000000L;
|
||||
if (pSql->res.useconds < SLOW_QUERY_INTERVAL) return;
|
||||
const static int64_t SLOW_QUERY_INTERVAL = 3000000L; // todo configurable
|
||||
size_t size = 200; // other part of sql string, expect the main sql str
|
||||
|
||||
if (pSql->res.useconds < SLOW_QUERY_INTERVAL) {
|
||||
return;
|
||||
}
|
||||
|
||||
tscTrace("%p query time:%" PRId64 " sql:%s", pSql, pSql->res.useconds, pSql->sqlstr);
|
||||
int32_t sqlSize = TSDB_SLOW_QUERY_SQL_LEN + size;
|
||||
|
||||
char *sql = malloc(200);
|
||||
int len = snprintf(sql, 200, "insert into %s.slowquery values(now, '%s', %" PRId64 ", %" PRId64 ", '", tsMonitorDbName,
|
||||
char *sql = malloc(sqlSize);
|
||||
if (sql == NULL) {
|
||||
tscError("%p failed to allocate memory to sent slow query to dnode", pSql);
|
||||
return;
|
||||
}
|
||||
|
||||
int len = snprintf(sql, size, "insert into %s.slowquery values(now, '%s', %" PRId64 ", %" PRId64 ", '", tsMonitorDbName,
|
||||
pSql->pTscObj->user, pSql->stime, pSql->res.useconds);
|
||||
int sqlLen = snprintf(sql + len, TSDB_SLOW_QUERY_SQL_LEN, "%s", pSql->sqlstr);
|
||||
if (sqlLen > TSDB_SLOW_QUERY_SQL_LEN - 1) {
|
||||
|
@ -103,8 +113,8 @@ void tscSaveSlowQuery(SSqlObj *pSql) {
|
|||
} else {
|
||||
sqlLen += len;
|
||||
}
|
||||
strcpy(sql + sqlLen, "')");
|
||||
|
||||
strcpy(sql + sqlLen, "')");
|
||||
taosTmrStart(tscSaveSlowQueryFp, 200, sql, tscTmr);
|
||||
}
|
||||
|
||||
|
|
|
@ -163,8 +163,7 @@ static int32_t handlePassword(SSqlCmd* pCmd, SSQLToken* pPwd) {
|
|||
}
|
||||
|
||||
strdequote(pPwd->z);
|
||||
strtrim(pPwd->z); // trim space before and after passwords
|
||||
pPwd->n = strlen(pPwd->z);
|
||||
pPwd->n = strtrim(pPwd->z); // trim space before and after passwords
|
||||
|
||||
if (pPwd->n <= 0) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
|
@ -226,7 +225,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
} else if (pInfo->type == TSDB_SQL_DROP_TABLE) {
|
||||
assert(pInfo->pDCLInfo->nTokens == 1);
|
||||
|
||||
if (tscSetTableId(pTableMetaInfo, pzName, pSql) != TSDB_CODE_SUCCESS) {
|
||||
if (tscSetTableFullName(pTableMetaInfo, pzName, pSql) != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
|
||||
}
|
||||
} else if (pInfo->type == TSDB_SQL_DROP_DNODE) {
|
||||
|
@ -353,7 +352,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
}
|
||||
|
||||
if (tscSetTableId(pTableMetaInfo, pToken, pSql) != TSDB_CODE_SUCCESS) {
|
||||
if (tscSetTableFullName(pTableMetaInfo, pToken, pSql) != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
}
|
||||
|
||||
|
@ -686,7 +685,7 @@ int32_t parseSlidingClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t tscSetTableId(STableMetaInfo* pTableMetaInfo, SSQLToken* pzTableName, SSqlObj* pSql) {
|
||||
int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SSQLToken* pzTableName, SSqlObj* pSql) {
|
||||
const char* msg = "name too long";
|
||||
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
@ -1465,7 +1464,6 @@ static int32_t setExprInfoForFunctions(SQueryInfo* pQueryInfo, SSchema* pSchema,
|
|||
|
||||
int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExprItem* pItem, bool finalResult) {
|
||||
STableMetaInfo* pTableMetaInfo = NULL;
|
||||
|
||||
int32_t optr = pItem->pNode->nSQLOptr;
|
||||
|
||||
const char* msg1 = "not support column types";
|
||||
|
@ -4302,7 +4300,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
return invalidSqlErrMsg(pQueryInfo->msg, msg1);
|
||||
}
|
||||
|
||||
if (tscSetTableId(pTableMetaInfo, &(pAlterSQL->name), pSql) != TSDB_CODE_SUCCESS) {
|
||||
if (tscSetTableFullName(pTableMetaInfo, &(pAlterSQL->name), pSql) != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(pQueryInfo->msg, msg2);
|
||||
}
|
||||
|
||||
|
@ -4410,7 +4408,9 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) {
|
||||
// Note: update can only be applied to table not super table.
|
||||
// the following is handle display tags value for meters created according to super table
|
||||
// the following is used to handle tags value for table created according to super table
|
||||
pCmd->command = TSDB_SQL_UPDATE_TAGS_VAL;
|
||||
|
||||
tVariantList* pVarList = pAlterSQL->varList;
|
||||
tVariant* pTagName = &pVarList->a[0].pVar;
|
||||
|
||||
|
@ -4433,15 +4433,38 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
|
|||
|
||||
// validate the length of binary
|
||||
if ((pTagsSchema->type == TSDB_DATA_TYPE_BINARY || pTagsSchema->type == TSDB_DATA_TYPE_NCHAR) &&
|
||||
pVarList->a[1].pVar.nLen > pTagsSchema->bytes) {
|
||||
(pVarList->a[1].pVar.nLen + VARSTR_HEADER_SIZE) > pTagsSchema->bytes) {
|
||||
return invalidSqlErrMsg(pQueryInfo->msg, msg14);
|
||||
}
|
||||
|
||||
char name1[128] = {0};
|
||||
strncpy(name1, pTagName->pz, pTagName->nLen);
|
||||
int32_t size = sizeof(SUpdateTableTagValMsg) + pTagsSchema->bytes + TSDB_EXTRA_PAYLOAD_SIZE;
|
||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
|
||||
tscError("%p failed to malloc for alter table msg", pSql);
|
||||
return TSDB_CODE_CLI_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
TAOS_FIELD f = tscCreateField(TSDB_DATA_TYPE_INT, name1, tDataTypeDesc[TSDB_DATA_TYPE_INT].nSize);
|
||||
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
|
||||
SUpdateTableTagValMsg* pUpdateMsg = (SUpdateTableTagValMsg*) (pCmd->payload + tsRpcHeadSize);
|
||||
pUpdateMsg->head.vgId = htonl(pTableMeta->vgroupInfo.vgId);
|
||||
pUpdateMsg->tid = htonl(pTableMeta->sid);
|
||||
pUpdateMsg->uid = htobe64(pTableMeta->uid);
|
||||
pUpdateMsg->colId = htons(pTagsSchema->colId);
|
||||
pUpdateMsg->type = htons(pTagsSchema->type);
|
||||
pUpdateMsg->bytes = htons(pTagsSchema->bytes);
|
||||
pUpdateMsg->tversion = htons(pTableMeta->tversion);
|
||||
|
||||
tVariantDump(&pVarList->a[1].pVar, pUpdateMsg->data, pTagsSchema->type, true);
|
||||
|
||||
int32_t len = 0;
|
||||
if (pTagsSchema->type != TSDB_DATA_TYPE_BINARY && pTagsSchema->type != TSDB_DATA_TYPE_NCHAR) {
|
||||
len = tDataTypeDesc[pTagsSchema->type].nSize;
|
||||
} else {
|
||||
len = varDataLen(pUpdateMsg->data);
|
||||
}
|
||||
|
||||
pUpdateMsg->tagValLen = htonl(len); // length may be changed after dump data
|
||||
|
||||
int32_t total = sizeof(SUpdateTableTagValMsg) + len;
|
||||
pUpdateMsg->head.contLen = htonl(total);
|
||||
|
||||
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN) {
|
||||
tFieldList* pFieldList = pAlterSQL->pAddColumns;
|
||||
|
@ -4654,8 +4677,7 @@ int32_t validateColumnName(char* name) {
|
|||
|
||||
if (token.type == TK_STRING) {
|
||||
strdequote(token.z);
|
||||
strtrim(token.z);
|
||||
token.n = (uint32_t)strlen(token.z);
|
||||
token.n = strtrim(token.z);
|
||||
|
||||
int32_t k = tSQLGetToken(token.z, &token.type);
|
||||
if (k != token.n) {
|
||||
|
@ -5499,7 +5521,7 @@ int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSqlInfo* p
|
|||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
}
|
||||
|
||||
if (tscSetTableId(pTableMetaInfo, pzTableName, pSql) != TSDB_CODE_SUCCESS) {
|
||||
if (tscSetTableFullName(pTableMetaInfo, pzTableName, pSql) != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
|
||||
}
|
||||
|
||||
|
@ -5554,7 +5576,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
|
|||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
}
|
||||
|
||||
if (tscSetTableId(pStableMeterMetaInfo, pToken, pSql) != TSDB_CODE_SUCCESS) {
|
||||
if (tscSetTableFullName(pStableMeterMetaInfo, pToken, pSql) != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
}
|
||||
|
||||
|
@ -5600,7 +5622,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
|
|||
}
|
||||
|
||||
STableMetaInfo* pTableMeterMetaInfo = tscGetMetaInfo(pQueryInfo, TABLE_INDEX);
|
||||
ret = tscSetTableId(pTableMeterMetaInfo, &pInfo->pCreateTableInfo->name, pSql);
|
||||
ret = tscSetTableFullName(pTableMeterMetaInfo, &pInfo->pCreateTableInfo->name, pSql);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return ret;
|
||||
}
|
||||
|
@ -5643,7 +5665,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
|
|||
return invalidSqlErrMsg(pQueryInfo->msg, msg1);
|
||||
}
|
||||
|
||||
if (tscSetTableId(pTableMetaInfo, &srcToken, pSql) != TSDB_CODE_SUCCESS) {
|
||||
if (tscSetTableFullName(pTableMetaInfo, &srcToken, pSql) != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(pQueryInfo->msg, msg2);
|
||||
}
|
||||
|
||||
|
@ -5674,7 +5696,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
|
|||
}
|
||||
|
||||
// set the created table[stream] name
|
||||
if (tscSetTableId(pTableMetaInfo, pzTableName, pSql) != TSDB_CODE_SUCCESS) {
|
||||
if (tscSetTableFullName(pTableMetaInfo, pzTableName, pSql) != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(pQueryInfo->msg, msg1);
|
||||
}
|
||||
|
||||
|
@ -5783,7 +5805,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
|
|||
STableMetaInfo* pMeterInfo1 = tscGetMetaInfo(pQueryInfo, i);
|
||||
|
||||
SSQLToken t = {.type = TSDB_DATA_TYPE_BINARY, .n = pTableItem->nLen, .z = pTableItem->pz};
|
||||
if (tscSetTableId(pMeterInfo1, &t, pSql) != TSDB_CODE_SUCCESS) {
|
||||
if (tscSetTableFullName(pMeterInfo1, &t, pSql) != TSDB_CODE_SUCCESS) {
|
||||
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
|
||||
}
|
||||
|
||||
|
|
|
@ -168,6 +168,8 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
|
|||
pTableMeta->sid = pTableMetaMsg->sid;
|
||||
pTableMeta->uid = pTableMetaMsg->uid;
|
||||
pTableMeta->vgroupInfo = pTableMetaMsg->vgroup;
|
||||
pTableMeta->sversion = pTableMetaMsg->sversion;
|
||||
pTableMeta->tversion = pTableMetaMsg->tversion;
|
||||
|
||||
memcpy(pTableMeta->schema, pTableMetaMsg->schema, schemaSize);
|
||||
|
||||
|
|
|
@ -222,9 +222,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
|
|||
if (pObj->signature != pObj) {
|
||||
tscTrace("%p sql is already released or DB connection is closed, freed:%d pObj:%p signature:%p", pSql, pSql->freed,
|
||||
pObj, pObj->signature);
|
||||
if (pSql != pObj->pSql) {
|
||||
tscFreeSqlObj(pSql);
|
||||
}
|
||||
tscFreeSqlObj(pSql);
|
||||
rpcFreeCont(rpcMsg->pCont);
|
||||
return;
|
||||
}
|
||||
|
@ -259,6 +257,9 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
|
|||
rpcMsg->code = TSDB_CODE_NOT_READY;
|
||||
rpcFreeCont(rpcMsg->pCont);
|
||||
return;
|
||||
} else if (pCmd->command == TSDB_SQL_META) {
|
||||
// rpcFreeCont(rpcMsg->pCont);
|
||||
// return;
|
||||
} else {
|
||||
tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), ++pSql->retry);
|
||||
|
||||
|
@ -333,21 +334,11 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
|
|||
rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql);
|
||||
|
||||
if (rpcMsg->code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||
void *taosres = tscKeepConn[pCmd->command] ? pSql : NULL;
|
||||
rpcMsg->code = pRes->code ? pRes->code : pRes->numOfRows;
|
||||
|
||||
tscTrace("%p SQL result:%s res:%p", pSql, tstrerror(pRes->code), pSql);
|
||||
|
||||
/*
|
||||
* Whether to free sqlObj or not should be decided before call the user defined function, since this SqlObj
|
||||
* may be freed in UDF, and reused by other threads before tscShouldBeFreed called, in which case
|
||||
* tscShouldBeFreed checks an object which is actually allocated by other threads.
|
||||
*
|
||||
* If this block of memory is re-allocated for an insert thread, in which tscKeepConn[command] equals to 0,
|
||||
* the tscShouldBeFreed will success and tscFreeSqlObj free it immediately.
|
||||
*/
|
||||
bool shouldFree = tscShouldBeFreed(pSql);
|
||||
(*pSql->fp)(pSql->param, taosres, rpcMsg->code);
|
||||
(*pSql->fp)(pSql->param, pSql, rpcMsg->code);
|
||||
|
||||
if (shouldFree) {
|
||||
tscTrace("%p sqlObj is automatically freed", pSql);
|
||||
|
@ -1255,28 +1246,24 @@ int tscEstimateAlterTableMsgLength(SSqlCmd *pCmd) {
|
|||
}
|
||||
|
||||
int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||
SCMAlterTableMsg *pAlterTableMsg;
|
||||
char * pMsg;
|
||||
int msgLen = 0;
|
||||
int size = 0;
|
||||
char *pMsg;
|
||||
int msgLen = 0;
|
||||
|
||||
SSqlCmd * pCmd = &pSql->cmd;
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
|
||||
size = tscEstimateAlterTableMsgLength(pCmd);
|
||||
SAlterTableSQL *pAlterInfo = pInfo->pAlterInfo;
|
||||
int size = tscEstimateAlterTableMsgLength(pCmd);
|
||||
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
|
||||
tscError("%p failed to malloc for alter table msg", pSql);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pAlterTableMsg = (SCMAlterTableMsg *)pCmd->payload;
|
||||
|
||||
SCMAlterTableMsg *pAlterTableMsg = (SCMAlterTableMsg *)pCmd->payload;
|
||||
tscGetDBInfoFromMeterId(pTableMetaInfo->name, pAlterTableMsg->db);
|
||||
|
||||
SAlterTableSQL *pAlterInfo = pInfo->pAlterInfo;
|
||||
|
||||
strcpy(pAlterTableMsg->tableId, pTableMetaInfo->name);
|
||||
pAlterTableMsg->type = htons(pAlterInfo->type);
|
||||
|
||||
|
@ -1297,6 +1284,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
pMsg += pAlterInfo->tagData.dataLen;
|
||||
|
||||
msgLen = pMsg - (char*)pAlterTableMsg;
|
||||
|
||||
pCmd->payloadLen = msgLen;
|
||||
pCmd->msgType = TSDB_MSG_TYPE_CM_ALTER_TABLE;
|
||||
|
||||
|
@ -1305,6 +1293,16 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int tscBuildUpdateTagMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
pCmd->msgType = TSDB_MSG_TYPE_UPDATE_TAG_VAL;
|
||||
|
||||
SUpdateTableTagValMsg* pUpdateMsg = (SUpdateTableTagValMsg*) (pCmd->payload + tsRpcHeadSize);
|
||||
pCmd->payloadLen = htonl(pUpdateMsg->head.contLen);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int tscAlterDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
pCmd->payloadLen = sizeof(SCMAlterDbMsg);
|
||||
|
@ -1785,7 +1783,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
|
|||
|
||||
pMetaMsg->sid = htonl(pMetaMsg->sid);
|
||||
pMetaMsg->sversion = htons(pMetaMsg->sversion);
|
||||
|
||||
pMetaMsg->tversion = htons(pMetaMsg->tversion);
|
||||
pMetaMsg->vgroup.vgId = htonl(pMetaMsg->vgroup.vgId);
|
||||
|
||||
pMetaMsg->uid = htobe64(pMetaMsg->uid);
|
||||
|
@ -2534,6 +2532,7 @@ void tscInitMsgsFp() {
|
|||
tscBuildMsg[TSDB_SQL_DROP_DNODE] = tscBuildDropDnodeMsg;
|
||||
tscBuildMsg[TSDB_SQL_CFG_DNODE] = tscBuildCfgDnodeMsg;
|
||||
tscBuildMsg[TSDB_SQL_ALTER_TABLE] = tscBuildAlterTableMsg;
|
||||
tscBuildMsg[TSDB_SQL_UPDATE_TAGS_VAL] = tscBuildUpdateTagMsg;
|
||||
tscBuildMsg[TSDB_SQL_ALTER_DB] = tscAlterDbMsg;
|
||||
|
||||
tscBuildMsg[TSDB_SQL_CONNECT] = tscBuildConnectMsg;
|
||||
|
|
|
@ -52,7 +52,7 @@ static bool validPassword(const char* passwd) {
|
|||
return validImpl(passwd, TSDB_PASSWORD_LEN);
|
||||
}
|
||||
|
||||
STscObj *taosConnectImpl(const char *ip, const char *user, const char *pass, const char *db, uint16_t port,
|
||||
SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, const char *db, uint16_t port,
|
||||
void (*fp)(void *, TAOS_RES *, int), void *param, void **taos) {
|
||||
taos_init();
|
||||
|
||||
|
@ -120,10 +120,8 @@ STscObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
|
|||
pSql->pTscObj = pObj;
|
||||
pSql->signature = pSql;
|
||||
pSql->maxRetry = TSDB_MAX_REPLICA_NUM;
|
||||
|
||||
tsem_init(&pSql->rspSem, 0, 0);
|
||||
|
||||
pObj->pSql = pSql;
|
||||
pObj->pDnodeConn = pDnodeConn;
|
||||
|
||||
pSql->fp = fp;
|
||||
|
@ -143,41 +141,37 @@ STscObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
|
|||
|
||||
// tsRpcHeaderSize will be updated during RPC initialization, so only after it initialization, this value is valid
|
||||
tsInsertHeadSize = tsRpcHeadSize + sizeof(SMsgDesc) + sizeof(SSubmitMsg);
|
||||
return pObj;
|
||||
return pSql;
|
||||
}
|
||||
|
||||
static void syncConnCallback(void *param, TAOS_RES *tres, int code) {
|
||||
STscObj *pObj = (STscObj *)param;
|
||||
assert(pObj != NULL && pObj->pSql != NULL);
|
||||
SSqlObj *pSql = (SSqlObj *) tres;
|
||||
assert(pSql != NULL);
|
||||
|
||||
if (code < 0) {
|
||||
pObj->pSql->res.code = code;
|
||||
}
|
||||
|
||||
sem_post(&pObj->pSql->rspSem);
|
||||
sem_post(&pSql->rspSem);
|
||||
}
|
||||
|
||||
TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port) {
|
||||
tscTrace("try to create a connection to %s:%u, user:%s db:%s", ip, port, user, db);
|
||||
|
||||
STscObj *pObj = taosConnectImpl(ip, user, pass, db, port, NULL, NULL, NULL);
|
||||
if (pObj != NULL) {
|
||||
SSqlObj* pSql = pObj->pSql;
|
||||
assert(pSql != NULL);
|
||||
|
||||
STscObj* pObj = NULL;
|
||||
SSqlObj *pSql = taosConnectImpl(ip, user, pass, db, port, syncConnCallback, NULL, (void**) &pObj);
|
||||
if (pSql != NULL) {
|
||||
pSql->fp = syncConnCallback;
|
||||
pSql->param = pObj;
|
||||
pSql->param = pSql;
|
||||
|
||||
tscProcessSql(pSql);
|
||||
sem_wait(&pSql->rspSem);
|
||||
|
||||
if (pSql->res.code != TSDB_CODE_SUCCESS) {
|
||||
terrno = pSql->res.code;
|
||||
taos_free_result(pSql);
|
||||
taos_close(pObj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tscTrace("%p DB connection is opening, dnodeConn:%p", pObj, pObj->pDnodeConn);
|
||||
taos_free_result(pSql);
|
||||
|
||||
// version compare only requires the first 3 segments of the version string
|
||||
int code = taosCheckVersion(version, taos_get_server_info(pObj), 3);
|
||||
|
@ -195,17 +189,14 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha
|
|||
|
||||
TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int),
|
||||
void *param, void **taos) {
|
||||
STscObj* pObj = taosConnectImpl(ip, user, pass, db, port, fp, param, taos);
|
||||
if (pObj == NULL) {
|
||||
SSqlObj* pSql = taosConnectImpl(ip, user, pass, db, port, fp, param, taos);
|
||||
if (pSql == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SSqlObj* pSql = pObj->pSql;
|
||||
|
||||
pSql->res.code = tscProcessSql(pSql);
|
||||
tscTrace("%p DB async connection is opening", pObj);
|
||||
|
||||
return pObj;
|
||||
tscTrace("%p DB async connection is opening", taos);
|
||||
return taos;
|
||||
}
|
||||
|
||||
void taos_close(TAOS *taos) {
|
||||
|
@ -265,41 +256,40 @@ int taos_query_imp(STscObj *pObj, SSqlObj *pSql) {
|
|||
}
|
||||
|
||||
void waitForQueryRsp(void *param, TAOS_RES *tres, int code) {
|
||||
assert(param != NULL);
|
||||
SSqlObj *pSql = ((STscObj *)param)->pSql;
|
||||
|
||||
// valid error code is less than 0
|
||||
if (code < 0) {
|
||||
pSql->res.code = code;
|
||||
}
|
||||
assert(tres != NULL);
|
||||
|
||||
SSqlObj *pSql = (SSqlObj *) tres;
|
||||
sem_post(&pSql->rspSem);
|
||||
}
|
||||
|
||||
int taos_query(TAOS *taos, const char *sqlstr) {
|
||||
STscObj *pObj = (STscObj *)taos;
|
||||
if (pObj == NULL || pObj->signature != pObj) {
|
||||
terrno = TSDB_CODE_DISCONNECTED;
|
||||
return TSDB_CODE_DISCONNECTED;
|
||||
}
|
||||
|
||||
SSqlObj* pSql = pObj->pSql;
|
||||
size_t sqlLen = strlen(sqlstr);
|
||||
doAsyncQuery(pObj, pSql, waitForQueryRsp, taos, sqlstr, sqlLen);
|
||||
|
||||
// wait for the callback function to post the semaphore
|
||||
tsem_wait(&pSql->rspSem);
|
||||
return pSql->res.code;
|
||||
}
|
||||
|
||||
TAOS_RES *taos_use_result(TAOS *taos) {
|
||||
TAOS_RES* taos_query(TAOS *taos, const char *sqlstr) {
|
||||
STscObj *pObj = (STscObj *)taos;
|
||||
if (pObj == NULL || pObj->signature != pObj) {
|
||||
terrno = TSDB_CODE_DISCONNECTED;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return pObj->pSql;
|
||||
int32_t sqlLen = strlen(sqlstr);
|
||||
if (sqlLen > tsMaxSQLStringLen) {
|
||||
tscError("sql string exceeds max length:%d", tsMaxSQLStringLen);
|
||||
terrno = TSDB_CODE_INVALID_SQL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
taosNotePrintTsc(sqlstr);
|
||||
|
||||
SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
|
||||
if (pSql == NULL) {
|
||||
tscError("failed to malloc sqlObj");
|
||||
terrno = TSDB_CODE_CLI_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
doAsyncQuery(pObj, pSql, waitForQueryRsp, taos, sqlstr, sqlLen);
|
||||
|
||||
// wait for the callback function to post the semaphore
|
||||
tsem_wait(&pSql->rspSem);
|
||||
return pSql;
|
||||
}
|
||||
|
||||
int taos_result_precision(TAOS_RES *res) {
|
||||
|
@ -332,18 +322,18 @@ int taos_num_fields(TAOS_RES *res) {
|
|||
return num;
|
||||
}
|
||||
|
||||
int taos_field_count(TAOS *taos) {
|
||||
STscObj *pObj = (STscObj *)taos;
|
||||
if (pObj == NULL || pObj->signature != pObj) return 0;
|
||||
int taos_field_count(TAOS_RES *tres) {
|
||||
SSqlObj* pSql = (SSqlObj*) tres;
|
||||
if (pSql == NULL || pSql->signature != pSql) return 0;
|
||||
|
||||
return taos_num_fields(pObj->pSql);
|
||||
return taos_num_fields(pSql);
|
||||
}
|
||||
|
||||
int taos_affected_rows(TAOS *taos) {
|
||||
STscObj *pObj = (STscObj *)taos;
|
||||
if (pObj == NULL || pObj->signature != pObj) return 0;
|
||||
int taos_affected_rows(TAOS_RES *tres) {
|
||||
SSqlObj* pSql = (SSqlObj*) tres;
|
||||
if (pSql == NULL || pSql->signature != pSql) return 0;
|
||||
|
||||
return (pObj->pSql->res.numOfRows);
|
||||
return (pSql->res.numOfRows);
|
||||
}
|
||||
|
||||
TAOS_FIELD *taos_fetch_fields(TAOS_RES *res) {
|
||||
|
@ -385,9 +375,8 @@ int taos_fetch_block_impl(TAOS_RES *res, TAOS_ROW *rows) {
|
|||
SSqlObj *pSql = (SSqlObj *)res;
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
STscObj *pObj = pSql->pTscObj;
|
||||
|
||||
if (pRes->qhandle == 0 || pObj->pSql != pSql) {
|
||||
if (pRes->qhandle == 0 || pSql->signature != pSql) {
|
||||
*rows = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
@ -521,95 +510,76 @@ int taos_select_db(TAOS *taos, const char *db) {
|
|||
}
|
||||
|
||||
snprintf(sql, tListLen(sql), "use %s", db);
|
||||
return taos_query(taos, sql);
|
||||
SSqlObj* pSql = taos_query(taos, sql);
|
||||
int32_t code = pSql->res.code;
|
||||
taos_free_result(pSql);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
void taos_free_result(TAOS_RES *res) {
|
||||
if (res == NULL) return;
|
||||
|
||||
SSqlObj *pSql = (SSqlObj *)res;
|
||||
tscTrace("%p start to free result", res);
|
||||
|
||||
if (pSql == NULL || pSql->signature != pSql) {
|
||||
tscTrace("%p result has been freed", pSql);
|
||||
return;
|
||||
}
|
||||
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
|
||||
tscTrace("%p start to free result", pSql);
|
||||
|
||||
if (pSql->signature != pSql) return;
|
||||
|
||||
STscObj* pObj = pSql->pTscObj;
|
||||
// The semaphore can not be changed while freeing async sub query objects.
|
||||
if (pRes == NULL || pRes->qhandle == 0) {
|
||||
/* Query rsp is not received from vnode, so the qhandle is NULL */
|
||||
tscTrace("%p qhandle is null, abort free, fp:%p", pSql, pSql->fp);
|
||||
|
||||
// The semaphore can not be changed while freeing async sub query objects.
|
||||
if (pObj->pSql != pSql) {
|
||||
tscTrace("%p SqlObj is freed by app", pSql);
|
||||
tscFreeSqlObj(pSql);
|
||||
} else {
|
||||
tscPartiallyFreeSqlObj(pSql);
|
||||
}
|
||||
|
||||
tscTrace("%p SqlObj is freed by app, qhandle is null", pSql);
|
||||
tscFreeSqlObj(pSql);
|
||||
return;
|
||||
}
|
||||
|
||||
// set freeFlag to 1 in retrieve message if there are un-retrieved results data in node
|
||||
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
|
||||
if (pQueryInfo == NULL) {
|
||||
tscPartiallyFreeSqlObj(pSql);
|
||||
tscFreeSqlObj(pSql);
|
||||
return;
|
||||
}
|
||||
|
||||
pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE;
|
||||
STscObj* pTscObj = pSql->pTscObj;
|
||||
|
||||
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
|
||||
|
||||
/*
|
||||
* case 1. Partial data have been retrieved from vnodes, but not all data has been retrieved yet.
|
||||
* We need to recycle the connection by noticing the vnode return 0 results.
|
||||
* case 2. When the query response is received from vnodes and the numOfRows is set to 0, the user calls
|
||||
* taos_free_result before the taos_fetch_row is called in non-stream computing,
|
||||
* we need to recycle the connection.
|
||||
* case 3. If the query process is cancelled by user in stable query, tscProcessSql should not be called
|
||||
* for each subquery. Because the failure of execution tsProcessSql may trigger the callback function
|
||||
* be executed, and the retry efforts may result in double free the resources, e.g.,SRetrieveSupport
|
||||
* If the query process is cancelled by user in stable query, tscProcessSql should not be called
|
||||
* for each subquery. Because the failure of execution tsProcessSql may trigger the callback function
|
||||
* be executed, and the retry efforts may result in double free the resources, e.g.,SRetrieveSupport
|
||||
*/
|
||||
if ((pCmd->command == TSDB_SQL_SELECT ||
|
||||
pCmd->command == TSDB_SQL_SHOW ||
|
||||
pCmd->command == TSDB_SQL_RETRIEVE ||
|
||||
pCmd->command == TSDB_SQL_FETCH) && pRes->code == TSDB_CODE_SUCCESS && pRes->completed == false &&
|
||||
if (pRes->code == TSDB_CODE_SUCCESS && pRes->completed == false &&
|
||||
(pCmd->command == TSDB_SQL_SELECT || pCmd->command == TSDB_SQL_SHOW ||
|
||||
pCmd->command == TSDB_SQL_RETRIEVE || pCmd->command == TSDB_SQL_FETCH) &&
|
||||
(pCmd->command == TSDB_SQL_SELECT && pSql->pStream == NULL && pTableMetaInfo->pTableMeta != NULL)) {
|
||||
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
|
||||
|
||||
tscTrace("%p send msg to free qhandle in vnode, code:%d, numOfRows:%d, command:%s", pSql, pRes->code, pRes->numOfRows,
|
||||
sqlCmd[pCmd->command]);
|
||||
|
||||
tscTrace("%p start to send msg to free qhandle in dnode, command:%s", pSql, sqlCmd[pCmd->command]);
|
||||
pSql->freed = 1;
|
||||
tscProcessSql(pSql);
|
||||
|
||||
// waits for response and then goes on
|
||||
if (pTscObj->pSql == pSql) {
|
||||
// in case of sync model query, waits for response and then goes on
|
||||
if (pSql->fp == waitForQueryRsp || pSql->fp == waitForRetrieveRsp) {
|
||||
sem_wait(&pSql->rspSem);
|
||||
}
|
||||
} else { // if no free resource msg is sent to vnode, we free this object immediately.
|
||||
if (pTscObj->pSql != pSql) {
|
||||
tscFreeSqlObj(pSql);
|
||||
tscTrace("%p sql result is freed by app", pSql);
|
||||
} else {
|
||||
tscPartiallyFreeSqlObj(pSql);
|
||||
tscTrace("%p sql result is freed by app", pSql);
|
||||
}
|
||||
}
|
||||
|
||||
tscFreeSqlObj(pSql);
|
||||
tscTrace("%p sql result is freed by app", pSql);
|
||||
}
|
||||
|
||||
// todo should not be used in async query
|
||||
int taos_errno(TAOS *taos) {
|
||||
STscObj *pObj = (STscObj *)taos;
|
||||
int taos_errno(TAOS_RES *tres) {
|
||||
SSqlObj *pSql = (SSqlObj *) tres;
|
||||
|
||||
if (pObj == NULL || pObj->signature != pObj) {
|
||||
if (pSql == NULL || pSql->signature != pSql) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
return pObj->pSql->res.code;
|
||||
return pSql->res.code;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -632,13 +602,12 @@ static bool hasAdditionalErrorInfo(int32_t code, SSqlCmd *pCmd) {
|
|||
}
|
||||
|
||||
// todo should not be used in async model
|
||||
char *taos_errstr(TAOS *taos) {
|
||||
STscObj *pObj = (STscObj *)taos;
|
||||
char *taos_errstr(TAOS_RES *tres) {
|
||||
SSqlObj *pSql = (SSqlObj *) tres;
|
||||
|
||||
if (pObj == NULL || pObj->signature != pObj)
|
||||
return (char*)tstrerror(terrno);
|
||||
|
||||
SSqlObj* pSql = pObj->pSql;
|
||||
if (pSql == NULL || pSql->signature != pSql) {
|
||||
return (char*) tstrerror(terrno);
|
||||
}
|
||||
|
||||
if (hasAdditionalErrorInfo(pSql->res.code, &pSql->cmd)) {
|
||||
return pSql->cmd.payload;
|
||||
|
@ -769,7 +738,8 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
|
|||
return TSDB_CODE_DISCONNECTED;
|
||||
}
|
||||
|
||||
SSqlObj *pSql = pObj->pSql;
|
||||
SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
|
||||
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
SSqlCmd *pCmd = &pSql->cmd;
|
||||
|
||||
|
@ -847,9 +817,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
|
|||
tblName[len] = '\0';
|
||||
|
||||
str = nextStr + 1;
|
||||
|
||||
strtrim(tblName);
|
||||
len = (uint32_t)strlen(tblName);
|
||||
len = strtrim(tblName);
|
||||
|
||||
SSQLToken sToken = {.n = len, .type = TK_ID, .z = tblName};
|
||||
tSQLGetToken(tblName, &sToken.type);
|
||||
|
@ -861,7 +829,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t t
|
|||
return code;
|
||||
}
|
||||
|
||||
if ((code = tscSetTableId(pTableMetaInfo, &sToken, pSql)) != TSDB_CODE_SUCCESS) {
|
||||
if ((code = tscSetTableFullName(pTableMetaInfo, &sToken, pSql)) != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -902,7 +870,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
|
|||
return TSDB_CODE_DISCONNECTED;
|
||||
}
|
||||
|
||||
SSqlObj *pSql = pObj->pSql;
|
||||
SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
|
||||
SSqlRes *pRes = &pSql->res;
|
||||
|
||||
pRes->numOfTotal = 0; // the number of getting table meta from server
|
||||
|
|
|
@ -459,14 +459,14 @@ static int64_t tscGetLaunchTimestamp(const SSqlStream *pStream) {
|
|||
return (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? timer / 1000L : timer;
|
||||
}
|
||||
|
||||
static void setErrorInfo(STscObj* pObj, int32_t code, char* info) {
|
||||
if (pObj == NULL) {
|
||||
static void setErrorInfo(SSqlObj* pSql, int32_t code, char* info) {
|
||||
if (pSql == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
SSqlCmd* pCmd = &pObj->pSql->cmd;
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
|
||||
pObj->pSql->res.code = code;
|
||||
pSql->res.code = code;
|
||||
|
||||
if (info != NULL) {
|
||||
strncpy(pCmd->payload, info, pCmd->payloadLen);
|
||||
|
@ -480,7 +480,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
|
|||
|
||||
SSqlObj *pSql = (SSqlObj *)calloc(1, sizeof(SSqlObj));
|
||||
if (pSql == NULL) {
|
||||
setErrorInfo(pObj, TSDB_CODE_CLI_OUT_OF_MEMORY, NULL);
|
||||
setErrorInfo(pSql, TSDB_CODE_CLI_OUT_OF_MEMORY, NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -490,14 +490,14 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
|
|||
SSqlRes *pRes = &pSql->res;
|
||||
int ret = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE);
|
||||
if (TSDB_CODE_SUCCESS != ret) {
|
||||
setErrorInfo(pObj, ret, NULL);
|
||||
setErrorInfo(pSql, ret, NULL);
|
||||
free(pSql);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pSql->sqlstr = strdup(sqlstr);
|
||||
if (pSql->sqlstr == NULL) {
|
||||
setErrorInfo(pObj, TSDB_CODE_CLI_OUT_OF_MEMORY, NULL);
|
||||
setErrorInfo(pSql, TSDB_CODE_CLI_OUT_OF_MEMORY, NULL);
|
||||
|
||||
tfree(pSql);
|
||||
return NULL;
|
||||
|
@ -511,7 +511,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
|
|||
tscResetSqlCmdObj(&pSql->cmd);
|
||||
ret = tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE);
|
||||
if (TSDB_CODE_SUCCESS != ret) {
|
||||
setErrorInfo(pObj, ret, NULL);
|
||||
setErrorInfo(pSql, ret, NULL);
|
||||
tscError("%p open stream failed, sql:%s, code:%d", pSql, sqlstr, TSDB_CODE_CLI_OUT_OF_MEMORY);
|
||||
tscFreeSqlObj(pSql);
|
||||
return NULL;
|
||||
|
@ -521,7 +521,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
|
|||
SQLInfoDestroy(&SQLInfo);
|
||||
|
||||
if (pRes->code != TSDB_CODE_SUCCESS) {
|
||||
setErrorInfo(pObj, pRes->code, pCmd->payload);
|
||||
setErrorInfo(pSql, pRes->code, pCmd->payload);
|
||||
|
||||
tscError("%p open stream failed, sql:%s, reason:%s, code:%d", pSql, sqlstr, pCmd->payload, pRes->code);
|
||||
tscFreeSqlObj(pSql);
|
||||
|
@ -530,7 +530,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
|
|||
|
||||
SSqlStream *pStream = (SSqlStream *)calloc(1, sizeof(SSqlStream));
|
||||
if (pStream == NULL) {
|
||||
setErrorInfo(pObj, TSDB_CODE_CLI_OUT_OF_MEMORY, NULL);
|
||||
setErrorInfo(pSql, TSDB_CODE_CLI_OUT_OF_MEMORY, NULL);
|
||||
|
||||
tscError("%p open stream failed, sql:%s, reason:%s, code:%d", pSql, sqlstr, pCmd->payload, pRes->code);
|
||||
tscFreeSqlObj(pSql);
|
||||
|
|
|
@ -181,21 +181,23 @@ static SArray* getTableList( SSqlObj* pSql ) {
|
|||
const char* p = strstr( pSql->sqlstr, " from " );
|
||||
char* sql = alloca(strlen(p) + 32);
|
||||
sprintf(sql, "select tbid(tbname)%s", p);
|
||||
int code = taos_query( pSql->pTscObj, sql );
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tscError("failed to retrieve table id: %s", tstrerror(code));
|
||||
|
||||
SSqlObj* pSql1 = taos_query(pSql->pTscObj, sql);
|
||||
if (terrno != TSDB_CODE_SUCCESS) {
|
||||
tscError("failed to retrieve table id: %s", tstrerror(terrno));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
TAOS_RES* res = taos_use_result( pSql->pTscObj );
|
||||
TAOS_ROW row;
|
||||
SArray* result = taosArrayInit( 128, sizeof(STidTags) );
|
||||
while ((row = taos_fetch_row(res))) {
|
||||
while ((row = taos_fetch_row(pSql1))) {
|
||||
STidTags tags;
|
||||
memcpy(&tags, row[0], sizeof(tags));
|
||||
taosArrayPush(result, &tags);
|
||||
}
|
||||
|
||||
taos_free_result(pSql1);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -1715,6 +1715,7 @@ static void multiVnodeInsertMerge(void* param, TAOS_RES* tres, int numOfRows) {
|
|||
pParentObj->res.numOfRows += numOfRows;
|
||||
}
|
||||
|
||||
taos_free_result(tres);
|
||||
int32_t completed = atomic_add_fetch_32(&pState->numOfCompleted, 1);
|
||||
if (completed < total) {
|
||||
return;
|
||||
|
@ -1732,7 +1733,7 @@ static void multiVnodeInsertMerge(void* param, TAOS_RES* tres, int numOfRows) {
|
|||
pParentObj->fp = pParentObj->fetchFp;
|
||||
|
||||
// all data has been sent to vnode, call user function
|
||||
(*pParentObj->fp)(pParentObj->param, tres, numOfRows);
|
||||
(*pParentObj->fp)(pParentObj->param, pParentObj, numOfRows);
|
||||
}
|
||||
|
||||
int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
|
||||
|
|
|
@ -409,7 +409,9 @@ void tscPartiallyFreeSqlObj(SSqlObj* pSql) {
|
|||
}
|
||||
|
||||
void tscFreeSqlObj(SSqlObj* pSql) {
|
||||
if (pSql == NULL || pSql->signature != pSql) return;
|
||||
if (pSql == NULL || pSql->signature != pSql) {
|
||||
return;
|
||||
}
|
||||
|
||||
tscTrace("%p start to free sql object", pSql);
|
||||
tscPartiallyFreeSqlObj(pSql);
|
||||
|
@ -424,6 +426,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
|
|||
pCmd->allocSize = 0;
|
||||
|
||||
tfree(pSql->sqlstr);
|
||||
sem_destroy(&pSql->rspSem);
|
||||
free(pSql);
|
||||
}
|
||||
|
||||
|
@ -651,6 +654,7 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock) {
|
|||
for (int32_t i = 0; i < numOfRows; ++i) {
|
||||
SDataRow trow = (SDataRow)pDataBlock;
|
||||
dataRowSetLen(trow, TD_DATA_ROW_HEAD_SIZE + flen);
|
||||
dataRowSetVersion(trow, pTableMeta->sversion);
|
||||
|
||||
int toffset = 0;
|
||||
for (int32_t j = 0; j < tinfo.numOfColumns; j++) {
|
||||
|
@ -749,16 +753,7 @@ void tscCloseTscObj(STscObj* pObj) {
|
|||
assert(pObj != NULL);
|
||||
|
||||
pObj->signature = NULL;
|
||||
SSqlObj* pSql = pObj->pSql;
|
||||
|
||||
if (pSql) {
|
||||
terrno = pSql->res.code;
|
||||
sem_destroy(&pSql->rspSem);
|
||||
}
|
||||
|
||||
taosTmrStopA(&(pObj->pTimer));
|
||||
tscFreeSqlObj(pSql);
|
||||
|
||||
pthread_mutex_destroy(&pObj->mutex);
|
||||
|
||||
if (pObj->pDnodeConn != NULL) {
|
||||
|
@ -1237,9 +1232,8 @@ void tscColumnListDestroy(SArray* pColumnList) {
|
|||
*
|
||||
*/
|
||||
static int32_t validateQuoteToken(SSQLToken* pToken) {
|
||||
pToken->n = strdequote(pToken->z);
|
||||
strtrim(pToken->z);
|
||||
pToken->n = (uint32_t)strlen(pToken->z);
|
||||
strdequote(pToken->z);
|
||||
pToken->n = strtrim(pToken->z);
|
||||
|
||||
int32_t k = tSQLGetToken(pToken->z, &pToken->type);
|
||||
|
||||
|
@ -1261,9 +1255,8 @@ int32_t tscValidateName(SSQLToken* pToken) {
|
|||
char* sep = strnchr(pToken->z, TS_PATH_DELIMITER[0], pToken->n, true);
|
||||
if (sep == NULL) { // single part
|
||||
if (pToken->type == TK_STRING) {
|
||||
pToken->n = strdequote(pToken->z);
|
||||
strtrim(pToken->z);
|
||||
pToken->n = (uint32_t)strlen(pToken->z);
|
||||
strdequote(pToken->z);
|
||||
pToken->n = strtrim(pToken->z);
|
||||
|
||||
int len = tSQLGetToken(pToken->z, &pToken->type);
|
||||
|
||||
|
@ -1288,8 +1281,7 @@ int32_t tscValidateName(SSQLToken* pToken) {
|
|||
char* pStr = pToken->z;
|
||||
|
||||
if (pToken->type == TK_SPACE) {
|
||||
strtrim(pToken->z);
|
||||
pToken->n = (uint32_t)strlen(pToken->z);
|
||||
pToken->n = strtrim(pToken->z);
|
||||
}
|
||||
|
||||
pToken->n = tSQLGetToken(pToken->z, &pToken->type);
|
||||
|
@ -1470,22 +1462,24 @@ bool tscShouldFreeHeatBeat(SSqlObj* pHb) {
|
|||
* If connection need to be recycled, the SqlObj also should be freed.
|
||||
*/
|
||||
bool tscShouldBeFreed(SSqlObj* pSql) {
|
||||
if (pSql == NULL || pSql->signature != pSql || pSql->fp == NULL) {
|
||||
if (pSql == NULL || pSql->signature != pSql) {
|
||||
return false;
|
||||
}
|
||||
|
||||
assert(pSql->fp != NULL);
|
||||
|
||||
STscObj* pTscObj = pSql->pTscObj;
|
||||
if (pSql->pStream != NULL || pTscObj->pHb == pSql || pTscObj->pSql == pSql || pSql->pSubscription != NULL) {
|
||||
if (pSql->pStream != NULL || pTscObj->pHb == pSql || pSql->pSubscription != NULL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// only the table meta and super table vgroup query will free resource automatically
|
||||
int32_t command = pSql->cmd.command;
|
||||
if (command == TSDB_SQL_CONNECT || command == TSDB_SQL_INSERT) {
|
||||
if (command == TSDB_SQL_META || command == TSDB_SQL_STABLEVGROUP) {
|
||||
return true;
|
||||
} else {
|
||||
return tscKeepConn[command] == 0 ||
|
||||
(pSql->res.code != TSDB_CODE_ACTION_IN_PROGRESS && pSql->res.code != TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1948,15 +1942,14 @@ int16_t tscGetJoinTagColIndexByUid(STagCond* pTagCond, uint64_t uid) {
|
|||
}
|
||||
}
|
||||
|
||||
bool tscIsUpdateQuery(STscObj* pObj) {
|
||||
if (pObj == NULL || pObj->signature != pObj) {
|
||||
bool tscIsUpdateQuery(SSqlObj* pSql) {
|
||||
if (pSql == NULL || pSql->signature != pSql) {
|
||||
terrno = TSDB_CODE_DISCONNECTED;
|
||||
return TSDB_CODE_DISCONNECTED;
|
||||
}
|
||||
|
||||
SSqlCmd* pCmd = &pObj->pSql->cmd;
|
||||
return ((pCmd->command >= TSDB_SQL_INSERT && pCmd->command <= TSDB_SQL_DROP_DNODE) ||
|
||||
TSDB_SQL_USE_DB == pCmd->command);
|
||||
SSqlCmd* pCmd = &pSql->cmd;
|
||||
return ((pCmd->command >= TSDB_SQL_INSERT && pCmd->command <= TSDB_SQL_DROP_DNODE) || TSDB_SQL_USE_DB == pCmd->command);
|
||||
}
|
||||
|
||||
int32_t tscInvalidSQLErrMsg(char* msg, const char* additionalInfo, const char* sql) {
|
||||
|
|
|
@ -35,6 +35,7 @@ enum {
|
|||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SELECT, "select" )
|
||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_FETCH, "fetch" )
|
||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_INSERT, "insert" )
|
||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_UPDATE_TAGS_VAL, "update-tag-val" )
|
||||
|
||||
// the SQL below is for mgmt node
|
||||
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_MGMT, "mgmt" )
|
||||
|
|
|
@ -119,22 +119,24 @@ STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder);
|
|||
// ----------------- Data row structure
|
||||
|
||||
/* A data row, the format is like below:
|
||||
* |<------------------------------------- len ---------------------------------->|
|
||||
* |<--Head ->|<--------- flen -------------->| |
|
||||
* +----------+---------------------------------+---------------------------------+
|
||||
* | int32_t | | |
|
||||
* +----------+---------------------------------+---------------------------------+
|
||||
* | len | First part | Second part |
|
||||
* +----------+---------------------------------+---------------------------------+
|
||||
* |<--------------------+--------------------------- len ---------------------------------->|
|
||||
* |<-- Head -->|<--------- flen -------------->| |
|
||||
* +---------------------+---------------------------------+---------------------------------+
|
||||
* | int16_t | int16_t | | |
|
||||
* +----------+----------+---------------------------------+---------------------------------+
|
||||
* | len | sversion | First part | Second part |
|
||||
* +----------+----------+---------------------------------+---------------------------------+
|
||||
*/
|
||||
typedef void *SDataRow;
|
||||
|
||||
#define TD_DATA_ROW_HEAD_SIZE sizeof(int32_t)
|
||||
#define TD_DATA_ROW_HEAD_SIZE sizeof(int16_t)*2
|
||||
|
||||
#define dataRowLen(r) (*(int32_t *)(r))
|
||||
#define dataRowLen(r) (*(int16_t *)(r))
|
||||
#define dataRowVersion(r) *(int16_t *)POINTER_SHIFT(r, sizeof(int16_t))
|
||||
#define dataRowTuple(r) POINTER_SHIFT(r, TD_DATA_ROW_HEAD_SIZE)
|
||||
#define dataRowKey(r) (*(TSKEY *)(dataRowTuple(r)))
|
||||
#define dataRowSetLen(r, l) (dataRowLen(r) = (l))
|
||||
#define dataRowSetVersion(r, v) (dataRowVersion(r) = (v))
|
||||
#define dataRowCpy(dst, r) memcpy((dst), (r), dataRowLen(r))
|
||||
#define dataRowMaxBytesFromSchema(s) (schemaTLen(s) + TD_DATA_ROW_HEAD_SIZE)
|
||||
|
||||
|
@ -246,7 +248,7 @@ void tdResetDataCols(SDataCols *pCols);
|
|||
void tdInitDataCols(SDataCols *pCols, STSchema *pSchema);
|
||||
SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData);
|
||||
void tdFreeDataCols(SDataCols *pCols);
|
||||
void tdAppendDataRowToDataCol(SDataRow row, SDataCols *pCols);
|
||||
void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols);
|
||||
void tdPopDataColsPoints(SDataCols *pCols, int pointsToPop); //!!!!
|
||||
int tdMergeDataCols(SDataCols *target, SDataCols *src, int rowsToMerge);
|
||||
void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, SDataCols *src2, int *iter2, int tRows);
|
||||
|
@ -278,9 +280,10 @@ typedef struct {
|
|||
#define kvRowColVal(r, colIdx) POINTER_SHIFT(kvRowValues(r), (colIdx)->offset)
|
||||
#define kvRowColIdxAt(r, i) (kvRowColIdx(r) + (i))
|
||||
#define kvRowFree(r) tfree(r)
|
||||
#define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r))
|
||||
|
||||
SKVRow tdKVRowDup(SKVRow row);
|
||||
SKVRow tdSetKVRowDataOfCol(SKVRow row, int16_t colId, int8_t type, void *value);
|
||||
int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value);
|
||||
void * tdEncodeKVRow(void *buf, SKVRow row);
|
||||
void * tdDecodeKVRow(void *buf, SKVRow *row);
|
||||
|
||||
|
|
|
@ -159,7 +159,10 @@ STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder) {
|
|||
/**
|
||||
* Initialize a data row
|
||||
*/
|
||||
void tdInitDataRow(SDataRow row, STSchema *pSchema) { dataRowSetLen(row, TD_DATA_ROW_HEAD_SIZE + schemaFLen(pSchema)); }
|
||||
void tdInitDataRow(SDataRow row, STSchema *pSchema) {
|
||||
dataRowSetLen(row, TD_DATA_ROW_HEAD_SIZE + schemaFLen(pSchema));
|
||||
dataRowSetVersion(row, schemaVersion(pSchema));
|
||||
}
|
||||
|
||||
SDataRow tdNewDataRowFromSchema(STSchema *pSchema) {
|
||||
int32_t size = dataRowMaxBytesFromSchema(pSchema);
|
||||
|
@ -262,25 +265,29 @@ bool isNEleNull(SDataCol *pCol, int nEle) {
|
|||
}
|
||||
}
|
||||
|
||||
void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints) {
|
||||
char *ptr = NULL;
|
||||
switch (pCol->type) {
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
pCol->len = 0;
|
||||
for (int i = 0; i < nEle; i++) {
|
||||
pCol->dataOff[i] = pCol->len;
|
||||
ptr = (char *)pCol->pData + pCol->len;
|
||||
varDataLen(ptr) = (pCol->type == TSDB_DATA_TYPE_BINARY) ? sizeof(char) : TSDB_NCHAR_SIZE;
|
||||
setNull(ptr + sizeof(VarDataLenT), pCol->type, pCol->bytes);
|
||||
pCol->len += varDataTLen(ptr);
|
||||
}
|
||||
void dataColSetNullAt(SDataCol *pCol, int index) {
|
||||
if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
pCol->dataOff[index] = pCol->len;
|
||||
char *ptr = POINTER_SHIFT(pCol->pData, pCol->len);
|
||||
varDataLen(ptr) = (pCol->type == TSDB_DATA_TYPE_BINARY) ? sizeof(char) : TSDB_NCHAR_SIZE;
|
||||
setNull(varDataVal(ptr), pCol->type, pCol->bytes);
|
||||
pCol->len += varDataTLen(ptr);
|
||||
} else {
|
||||
setNull(POINTER_SHIFT(pCol->pData, TYPE_BYTES[pCol->type] * index), pCol->type, pCol->bytes);
|
||||
pCol->len += TYPE_BYTES[pCol->type];
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
setNullN(pCol->pData, pCol->type, pCol->bytes, nEle);
|
||||
pCol->len = TYPE_BYTES[pCol->type] * nEle;
|
||||
break;
|
||||
void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints) {
|
||||
|
||||
if (IS_VAR_DATA_TYPE(pCol->type)) {
|
||||
pCol->len = 0;
|
||||
for (int i = 0; i < nEle; i++) {
|
||||
dataColSetNullAt(pCol, i);
|
||||
}
|
||||
} else {
|
||||
setNullN(pCol->pData, pCol->type, pCol->bytes, nEle);
|
||||
pCol->len = TYPE_BYTES[pCol->type] * nEle;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -377,14 +384,32 @@ void tdResetDataCols(SDataCols *pCols) {
|
|||
}
|
||||
}
|
||||
|
||||
void tdAppendDataRowToDataCol(SDataRow row, SDataCols *pCols) {
|
||||
void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) {
|
||||
ASSERT(dataColsKeyLast(pCols) < dataRowKey(row));
|
||||
|
||||
for (int i = 0; i < pCols->numOfCols; i++) {
|
||||
SDataCol *pCol = pCols->cols + i;
|
||||
void * value = tdGetRowDataOfCol(row, pCol->type, pCol->offset);
|
||||
int rcol = 0;
|
||||
int dcol = 0;
|
||||
|
||||
dataColAppendVal(pCol, value, pCols->numOfRows, pCols->maxPoints);
|
||||
while (dcol < pCols->numOfCols) {
|
||||
SDataCol *pDataCol = &(pCols->cols[dcol]);
|
||||
if (rcol >= schemaNCols(pSchema)) {
|
||||
dataColSetNullAt(pDataCol, pCols->numOfRows);
|
||||
dcol++;
|
||||
continue;
|
||||
}
|
||||
|
||||
STColumn *pRowCol = schemaColAt(pSchema, rcol);
|
||||
if (pRowCol->colId == pDataCol->colId) {
|
||||
void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset+TD_DATA_ROW_HEAD_SIZE);
|
||||
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
|
||||
dcol++;
|
||||
rcol++;
|
||||
} else if (pRowCol->colId < pDataCol->colId) {
|
||||
rcol++;
|
||||
} else {
|
||||
dataColSetNullAt(pDataCol, pCols->numOfRows);
|
||||
dcol++;
|
||||
}
|
||||
}
|
||||
pCols->numOfRows++;
|
||||
}
|
||||
|
@ -477,69 +502,103 @@ SKVRow tdKVRowDup(SKVRow row) {
|
|||
return trow;
|
||||
}
|
||||
|
||||
SKVRow tdSetKVRowDataOfCol(SKVRow row, int16_t colId, int8_t type, void *value) {
|
||||
// TODO
|
||||
return NULL;
|
||||
// SColIdx *pColIdx = NULL;
|
||||
// SKVRow rrow = row;
|
||||
// SKVRow nrow = NULL;
|
||||
// void *ptr = taosbsearch(&colId, kvDataRowColIdx(row), kvDataRowNCols(row), sizeof(SColIdx), comparTagId, TD_GE);
|
||||
int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) {
|
||||
SColIdx *pColIdx = NULL;
|
||||
SKVRow row = *orow;
|
||||
SKVRow nrow = NULL;
|
||||
void * ptr = taosbsearch(&colId, kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), comparTagId, TD_GE);
|
||||
|
||||
// if (ptr == NULL || ((SColIdx *)ptr)->colId < colId) { // need to add a column value to the row
|
||||
// int tlen = kvDataRowLen(row) + sizeof(SColIdx) + (IS_VAR_DATA_TYPE(type) ? varDataTLen(value) :
|
||||
// TYPE_BYTES[type]); nrow = malloc(tlen); if (nrow == NULL) return NULL;
|
||||
if (ptr == NULL || ((SColIdx *)ptr)->colId < colId) { // need to add a column value to the row
|
||||
int diff = IS_VAR_DATA_TYPE(type) ? varDataTLen(value) : TYPE_BYTES[type];
|
||||
nrow = malloc(kvRowLen(row) + sizeof(SColIdx) + diff);
|
||||
if (nrow == NULL) return -1;
|
||||
|
||||
// kvDataRowSetNCols(nrow, kvDataRowNCols(row)+1);
|
||||
// kvDataRowSetLen(nrow, tlen);
|
||||
kvRowSetLen(nrow, kvRowLen(row) + sizeof(SColIdx) + diff);
|
||||
kvRowSetNCols(nrow, kvRowNCols(row) + 1);
|
||||
|
||||
// if (ptr == NULL) ptr = kvDataRowValues(row);
|
||||
if (ptr == NULL) {
|
||||
memcpy(kvRowColIdx(nrow), kvRowColIdx(row), sizeof(SColIdx) * kvRowNCols(row));
|
||||
memcpy(kvRowValues(nrow), kvRowValues(row), POINTER_DISTANCE(kvRowEnd(row), kvRowValues(row)));
|
||||
int colIdx = kvRowNCols(nrow) - 1;
|
||||
kvRowColIdxAt(nrow, colIdx)->colId = colId;
|
||||
kvRowColIdxAt(nrow, colIdx)->offset = POINTER_DISTANCE(kvRowEnd(row), kvRowValues(row));
|
||||
memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx)), value, diff);
|
||||
} else {
|
||||
int16_t tlen = POINTER_DISTANCE(ptr, kvRowColIdx(row));
|
||||
if (tlen > 0) {
|
||||
memcpy(kvRowColIdx(nrow), kvRowColIdx(row), tlen);
|
||||
memcpy(kvRowValues(nrow), kvRowValues(row), ((SColIdx *)ptr)->offset);
|
||||
}
|
||||
|
||||
// // Copy the columns before the col
|
||||
// if (POINTER_DISTANCE(ptr, kvDataRowColIdx(row)) > 0) {
|
||||
// memcpy(kvDataRowColIdx(nrow), kvDataRowColIdx(row), POINTER_DISTANCE(ptr, kvDataRowColIdx(row)));
|
||||
// memcpy(kvDataRowValues(nrow), kvDataRowValues(row), ((SColIdx *)ptr)->offset); // TODO: here is not correct
|
||||
// }
|
||||
int colIdx = tlen / sizeof(SColIdx);
|
||||
kvRowColIdxAt(nrow, colIdx)->colId = colId;
|
||||
kvRowColIdxAt(nrow, colIdx)->offset = ((SColIdx *)ptr)->offset;
|
||||
memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx)), value, diff);
|
||||
|
||||
// // Set the new col value
|
||||
// pColIdx = (SColIdx *)POINTER_SHIFT(nrow, POINTER_DISTANCE(ptr, row));
|
||||
// pColIdx->colId = colId;
|
||||
// pColIdx->offset = ((SColIdx *)ptr)->offset; // TODO: here is not correct
|
||||
for (int i = colIdx; i < kvRowNCols(row); i++) {
|
||||
kvRowColIdxAt(nrow, i + 1)->colId = kvRowColIdxAt(row, i)->colId;
|
||||
kvRowColIdxAt(nrow, i + 1)->offset = kvRowColIdxAt(row, i)->offset + diff;
|
||||
}
|
||||
memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx + 1)), kvRowColVal(row, kvRowColIdxAt(row, colIdx)),
|
||||
POINTER_DISTANCE(kvRowEnd(row), kvRowColVal(row, kvRowColIdxAt(row, colIdx)))
|
||||
|
||||
// if (IS_VAR_DATA_TYPE(type)) {
|
||||
// memcpy(POINTER_SHIFT(kvDataRowValues(nrow), pColIdx->offset), value, varDataLen(value));
|
||||
// } else {
|
||||
// memcpy(POINTER_SHIFT(kvDataRowValues(nrow), pColIdx->offset), value, TYPE_BYTES[type]);
|
||||
// }
|
||||
);
|
||||
}
|
||||
|
||||
// // Copy the columns after the col
|
||||
// if (POINTER_DISTANCE(kvDataRowValues(row), ptr) > 0) {
|
||||
// // TODO: memcpy();
|
||||
// }
|
||||
// } else {
|
||||
// // TODO
|
||||
// ASSERT(((SColIdx *)ptr)->colId == colId);
|
||||
// if (IS_VAR_DATA_TYPE(type)) {
|
||||
// void *pOldVal = kvDataRowColVal(row, (SColIdx *)ptr);
|
||||
*orow = nrow;
|
||||
free(row);
|
||||
} else {
|
||||
ASSERT(((SColIdx *)ptr)->colId == colId);
|
||||
if (IS_VAR_DATA_TYPE(type)) {
|
||||
void *pOldVal = kvRowColVal(row, (SColIdx *)ptr);
|
||||
|
||||
// if (varDataTLen(value) == varDataTLen(pOldVal)) { // just update the column value in place
|
||||
// memcpy(pOldVal, value, varDataTLen(value));
|
||||
// } else { // enlarge the memory
|
||||
// // rrow = realloc(rrow, kvDataRowLen(rrow) + varDataTLen(value) - varDataTLen(pOldVal));
|
||||
// // if (rrow == NULL) return NULL;
|
||||
// // memmove();
|
||||
// // for () {
|
||||
// // ((SColIdx *)ptr)->offset += balabala;
|
||||
// // }
|
||||
if (varDataTLen(value) == varDataTLen(pOldVal)) { // just update the column value in place
|
||||
memcpy(pOldVal, value, varDataTLen(value));
|
||||
} else { // need to reallocate the memory
|
||||
int16_t diff = varDataTLen(value) - varDataTLen(pOldVal);
|
||||
int16_t nlen = kvRowLen(row) + diff;
|
||||
ASSERT(nlen > 0);
|
||||
nrow = malloc(nlen);
|
||||
if (nrow == NULL) return -1;
|
||||
|
||||
// // kvDataRowSetLen();
|
||||
kvRowSetLen(nrow, nlen);
|
||||
kvRowSetNCols(nrow, kvRowNCols(row));
|
||||
|
||||
// }
|
||||
// } else {
|
||||
// memcpy(kvDataRowColVal(row, (SColIdx *)ptr), value, TYPE_BYTES[type]);
|
||||
// }
|
||||
// }
|
||||
// Copy part ahead
|
||||
nlen = POINTER_DISTANCE(ptr, kvRowColIdx(row));
|
||||
ASSERT(nlen % sizeof(SColIdx) == 0);
|
||||
if (nlen > 0) {
|
||||
ASSERT(((SColIdx *)ptr)->offset > 0);
|
||||
memcpy(kvRowColIdx(nrow), kvRowColIdx(row), nlen);
|
||||
memcpy(kvRowValues(nrow), kvRowValues(row), ((SColIdx *)ptr)->offset);
|
||||
}
|
||||
|
||||
// return rrow;
|
||||
// Construct current column value
|
||||
int colIdx = nlen / sizeof(SColIdx);
|
||||
pColIdx = kvRowColIdxAt(nrow, colIdx);
|
||||
pColIdx->colId = ((SColIdx *)ptr)->colId;
|
||||
pColIdx->offset = ((SColIdx *)ptr)->offset;
|
||||
memcpy(kvRowColVal(nrow, pColIdx), value, varDataTLen(value));
|
||||
|
||||
// Construct columns after
|
||||
if (kvRowNCols(nrow) - colIdx - 1 > 0) {
|
||||
for (int i = colIdx + 1; i < kvRowNCols(nrow); i++) {
|
||||
kvRowColIdxAt(nrow, i)->colId = kvRowColIdxAt(row, i)->colId;
|
||||
kvRowColIdxAt(nrow, i)->offset += diff;
|
||||
}
|
||||
memcpy(kvRowColVal(nrow, kvRowColIdxAt(nrow, colIdx + 1)), kvRowColVal(row, kvRowColIdxAt(row, colIdx + 1)),
|
||||
POINTER_DISTANCE(kvRowEnd(row), kvRowColVal(row, kvRowColIdxAt(row, colIdx + 1))));
|
||||
}
|
||||
|
||||
*orow = nrow;
|
||||
free(row);
|
||||
}
|
||||
} else {
|
||||
memcpy(kvRowColVal(row, (SColIdx *)ptr), value, TYPE_BYTES[type]);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *tdEncodeKVRow(void *buf, SKVRow row) {
|
||||
|
|
|
@ -364,7 +364,7 @@ char tTokenTypeSwitcher[13] = {
|
|||
};
|
||||
|
||||
bool isValidDataType(int32_t type, int32_t length) {
|
||||
if (type < TSDB_DATA_TYPE_BOOL || type > TSDB_DATA_TYPE_NCHAR) {
|
||||
if (type < TSDB_DATA_TYPE_NULL || type > TSDB_DATA_TYPE_NCHAR) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,8 +18,8 @@ import java.io.InputStream;
|
|||
import java.io.Reader;
|
||||
import java.math.BigDecimal;
|
||||
import java.net.URL;
|
||||
import java.sql.*;
|
||||
import java.sql.Date;
|
||||
import java.sql.*;
|
||||
import java.util.*;
|
||||
|
||||
/*
|
||||
|
@ -102,41 +102,49 @@ public class DatabaseMetaDataResultSet implements ResultSet {
|
|||
|
||||
@Override
|
||||
public byte getByte(int columnIndex) throws SQLException {
|
||||
columnIndex--;
|
||||
return (byte) rowCursor.getInt(columnIndex, columnMetaDataList.get(columnIndex).getColType());
|
||||
}
|
||||
|
||||
@Override
|
||||
public short getShort(int columnIndex) throws SQLException {
|
||||
columnIndex--;
|
||||
return (short) rowCursor.getInt(columnIndex, columnMetaDataList.get(columnIndex).getColType());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getInt(int columnIndex) throws SQLException {
|
||||
columnIndex--;
|
||||
return rowCursor.getInt(columnIndex, columnMetaDataList.get(columnIndex).getColType());
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getLong(int columnIndex) throws SQLException {
|
||||
columnIndex--;
|
||||
return rowCursor.getLong(columnIndex, columnMetaDataList.get(columnIndex).getColType());
|
||||
}
|
||||
|
||||
@Override
|
||||
public float getFloat(int columnIndex) throws SQLException {
|
||||
columnIndex--;
|
||||
return rowCursor.getFloat(columnIndex, columnMetaDataList.get(columnIndex).getColType());
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getDouble(int columnIndex) throws SQLException {
|
||||
columnIndex--;
|
||||
return rowCursor.getDouble(columnIndex, columnMetaDataList.get(columnIndex).getColType());
|
||||
}
|
||||
|
||||
@Override
|
||||
public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException {
|
||||
columnIndex--;
|
||||
return new BigDecimal(rowCursor.getDouble(columnIndex, columnMetaDataList.get(columnIndex).getColType()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getBytes(int columnIndex) throws SQLException {
|
||||
columnIndex--;
|
||||
return (rowCursor.getString(columnIndex, columnMetaDataList.get(columnIndex).getColType())).getBytes();
|
||||
}
|
||||
|
||||
|
@ -152,6 +160,7 @@ public class DatabaseMetaDataResultSet implements ResultSet {
|
|||
|
||||
@Override
|
||||
public Timestamp getTimestamp(int columnIndex) throws SQLException {
|
||||
columnIndex--;
|
||||
return rowCursor.getTimestamp(columnIndex);
|
||||
}
|
||||
|
||||
|
|
|
@ -84,6 +84,14 @@ public class TSDBConnection implements Connection {
|
|||
}
|
||||
}
|
||||
|
||||
public TSDBSubscribe createSubscribe() throws SQLException {
|
||||
if (!this.connector.isClosed()) {
|
||||
return new TSDBSubscribe(this.connector);
|
||||
} else {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
}
|
||||
}
|
||||
|
||||
public PreparedStatement prepareStatement(String sql) throws SQLException {
|
||||
if (!this.connector.isClosed()) {
|
||||
return new TSDBPreparedStatement(this.connector, sql);
|
||||
|
|
|
@ -99,7 +99,7 @@ public class TSDBJNIConnector {
|
|||
|
||||
this.taos = this.connectImp(host, port, dbName, user, password);
|
||||
if (this.taos == TSDBConstants.JNI_NULL_POINTER) {
|
||||
throw new SQLException(TSDBConstants.WrapErrMsg(this.getErrMsg()), "", this.getErrCode());
|
||||
throw new SQLException(TSDBConstants.WrapErrMsg(this.getErrMsg(null)), "", this.getErrCode(null));
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -117,52 +117,57 @@ public class TSDBJNIConnector {
|
|||
freeResultSet(taosResultSetPointer);
|
||||
}
|
||||
|
||||
int code;
|
||||
long pSql = 0l;
|
||||
try {
|
||||
code = this.executeQueryImp(sql.getBytes(TaosGlobalConfig.getCharset()), this.taos);
|
||||
pSql = this.executeQueryImp(sql.getBytes(TaosGlobalConfig.getCharset()), this.taos);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
this.freeResultSet(pSql);
|
||||
throw new SQLException(TSDBConstants.WrapErrMsg("Unsupported encoding"));
|
||||
}
|
||||
int code = this.getErrCode(pSql);
|
||||
|
||||
affectedRows = code;
|
||||
if (code < 0) {
|
||||
affectedRows = -1;
|
||||
if (code == TSDBConstants.JNI_TDENGINE_ERROR) {
|
||||
throw new SQLException(TSDBConstants.WrapErrMsg(this.getErrMsg()), "", this.getErrCode());
|
||||
this.freeResultSet(pSql);
|
||||
throw new SQLException(TSDBConstants.WrapErrMsg(this.getErrMsg(pSql)), "", this.getErrCode(pSql));
|
||||
} else {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(code), "", this.getErrCode());
|
||||
this.freeResultSet(pSql);
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(code), "", this.getErrCode(pSql));
|
||||
}
|
||||
}
|
||||
|
||||
// Try retrieving result set for the executed SQL using the current connection pointer. If the executed
|
||||
// SQL is a DML/DDL which doesn't return a result set, then taosResultSetPointer should be 0L. Otherwise,
|
||||
// taosResultSetPointer should be a non-zero value.
|
||||
taosResultSetPointer = this.getResultSetImp(this.taos);
|
||||
taosResultSetPointer = this.getResultSetImp(this.taos, pSql);
|
||||
if (taosResultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
|
||||
isResultsetClosed = false;
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
private native int executeQueryImp(byte[] sqlBytes, long connection);
|
||||
private native long executeQueryImp(byte[] sqlBytes, long connection);
|
||||
|
||||
/**
|
||||
* Get recent error code by connection
|
||||
*/
|
||||
public int getErrCode() {
|
||||
return Math.abs(this.getErrCodeImp(this.taos));
|
||||
public int getErrCode(Long pSql) {
|
||||
return Math.abs(this.getErrCodeImp(this.taos, pSql));
|
||||
}
|
||||
|
||||
private native int getErrCodeImp(long connection);
|
||||
private native int getErrCodeImp(long connection, Long pSql);
|
||||
|
||||
/**
|
||||
* Get recent error message by connection
|
||||
*/
|
||||
public String getErrMsg() {
|
||||
return this.getErrMsgImp(this.taos);
|
||||
public String getErrMsg(Long pSql) {
|
||||
return this.getErrMsgImp(this.taos, pSql);
|
||||
}
|
||||
|
||||
private native String getErrMsgImp(long connection);
|
||||
private native String getErrMsgImp(long connection, Long pSql);
|
||||
|
||||
/**
|
||||
* Get resultset pointer
|
||||
|
@ -172,7 +177,7 @@ public class TSDBJNIConnector {
|
|||
return taosResultSetPointer;
|
||||
}
|
||||
|
||||
private native long getResultSetImp(long connection);
|
||||
private native long getResultSetImp(long connection, long pSql);
|
||||
|
||||
/**
|
||||
* Free resultset operation from C to release resultset pointer by JNI
|
||||
|
@ -212,15 +217,15 @@ public class TSDBJNIConnector {
|
|||
/**
|
||||
* Get affected rows count
|
||||
*/
|
||||
public int getAffectedRows() {
|
||||
public int getAffectedRows(Long pSql) {
|
||||
int affectedRows = this.affectedRows;
|
||||
if (affectedRows < 0) {
|
||||
affectedRows = this.getAffectedRowsImp(this.taos);
|
||||
affectedRows = this.getAffectedRowsImp(this.taos, pSql);
|
||||
}
|
||||
return affectedRows;
|
||||
}
|
||||
|
||||
private native int getAffectedRowsImp(long connection);
|
||||
private native int getAffectedRowsImp(long connection, Long pSql);
|
||||
|
||||
/**
|
||||
* Get schema metadata
|
||||
|
@ -248,7 +253,7 @@ public class TSDBJNIConnector {
|
|||
public void closeConnection() throws SQLException {
|
||||
int code = this.closeConnectionImp(this.taos);
|
||||
if (code < 0) {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(code), "", this.getErrCode());
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(code), "", this.getErrCode(null));
|
||||
} else if (code == 0) {
|
||||
this.taos = TSDBConstants.JNI_NULL_POINTER;
|
||||
} else {
|
||||
|
@ -261,31 +266,31 @@ public class TSDBJNIConnector {
|
|||
/**
|
||||
* Subscribe to a table in TSDB
|
||||
*/
|
||||
public long subscribe(String host, String user, String password, String database, String table, long time, int period) {
|
||||
return subscribeImp(host, user, password, database, table, time, period);
|
||||
public long subscribe(String topic, String sql, boolean restart, int period) {
|
||||
return subscribeImp(this.taos, restart, topic, sql, period);
|
||||
}
|
||||
|
||||
private native long subscribeImp(String host, String user, String password, String database, String table, long time, int period);
|
||||
public native long subscribeImp(long connection, boolean restart, String topic, String sql, int period);
|
||||
|
||||
/**
|
||||
* Consume a subscribed table
|
||||
*/
|
||||
public TSDBResultSetRowData consume(long subscription) {
|
||||
public long consume(long subscription) {
|
||||
return this.consumeImp(subscription);
|
||||
}
|
||||
|
||||
private native TSDBResultSetRowData consumeImp(long subscription);
|
||||
private native long consumeImp(long subscription);
|
||||
|
||||
/**
|
||||
* Unsubscribe a table
|
||||
*
|
||||
* @param subscription
|
||||
*/
|
||||
public void unsubscribe(long subscription) {
|
||||
unsubscribeImp(subscription);
|
||||
public void unsubscribe(long subscription, boolean isKeep) {
|
||||
unsubscribeImp(subscription, isKeep);
|
||||
}
|
||||
|
||||
private native void unsubscribeImp(long subscription);
|
||||
private native void unsubscribeImp(long subscription, boolean isKeep);
|
||||
|
||||
/**
|
||||
* Validate if a <I>create table</I> sql statement is correct without actually creating that table
|
||||
|
|
|
@ -51,47 +51,47 @@ public class TSDBResultSet implements ResultSet {
|
|||
private boolean lastWasNull = false;
|
||||
private final int COLUMN_INDEX_START_VALUE = 1;
|
||||
|
||||
public TSDBJNIConnector getJniConnector() {
|
||||
return jniConnector;
|
||||
}
|
||||
public TSDBJNIConnector getJniConnector() {
|
||||
return jniConnector;
|
||||
}
|
||||
|
||||
public void setJniConnector(TSDBJNIConnector jniConnector) {
|
||||
this.jniConnector = jniConnector;
|
||||
}
|
||||
public void setJniConnector(TSDBJNIConnector jniConnector) {
|
||||
this.jniConnector = jniConnector;
|
||||
}
|
||||
|
||||
public long getResultSetPointer() {
|
||||
return resultSetPointer;
|
||||
}
|
||||
public long getResultSetPointer() {
|
||||
return resultSetPointer;
|
||||
}
|
||||
|
||||
public void setResultSetPointer(long resultSetPointer) {
|
||||
this.resultSetPointer = resultSetPointer;
|
||||
}
|
||||
public void setResultSetPointer(long resultSetPointer) {
|
||||
this.resultSetPointer = resultSetPointer;
|
||||
}
|
||||
|
||||
public List<ColumnMetaData> getColumnMetaDataList() {
|
||||
return columnMetaDataList;
|
||||
}
|
||||
public List<ColumnMetaData> getColumnMetaDataList() {
|
||||
return columnMetaDataList;
|
||||
}
|
||||
|
||||
public void setColumnMetaDataList(List<ColumnMetaData> columnMetaDataList) {
|
||||
this.columnMetaDataList = columnMetaDataList;
|
||||
}
|
||||
public void setColumnMetaDataList(List<ColumnMetaData> columnMetaDataList) {
|
||||
this.columnMetaDataList = columnMetaDataList;
|
||||
}
|
||||
|
||||
public TSDBResultSetRowData getRowData() {
|
||||
return rowData;
|
||||
}
|
||||
public TSDBResultSetRowData getRowData() {
|
||||
return rowData;
|
||||
}
|
||||
|
||||
public void setRowData(TSDBResultSetRowData rowData) {
|
||||
this.rowData = rowData;
|
||||
}
|
||||
public void setRowData(TSDBResultSetRowData rowData) {
|
||||
this.rowData = rowData;
|
||||
}
|
||||
|
||||
public boolean isLastWasNull() {
|
||||
return lastWasNull;
|
||||
}
|
||||
public boolean isLastWasNull() {
|
||||
return lastWasNull;
|
||||
}
|
||||
|
||||
public void setLastWasNull(boolean lastWasNull) {
|
||||
this.lastWasNull = lastWasNull;
|
||||
}
|
||||
public void setLastWasNull(boolean lastWasNull) {
|
||||
this.lastWasNull = lastWasNull;
|
||||
}
|
||||
|
||||
public TSDBResultSet() {
|
||||
public TSDBResultSet() {
|
||||
}
|
||||
|
||||
public TSDBResultSet(TSDBJNIConnector connecter, long resultSetPointer) throws SQLException {
|
||||
|
@ -119,7 +119,7 @@ public class TSDBResultSet implements ResultSet {
|
|||
|
||||
public boolean next() throws SQLException {
|
||||
if (rowData != null) {
|
||||
this.rowData.clear();
|
||||
this.rowData.clear();
|
||||
}
|
||||
|
||||
int code = this.jniConnector.fetchRow(this.resultSetPointer, this.rowData);
|
||||
|
@ -155,87 +155,87 @@ public class TSDBResultSet implements ResultSet {
|
|||
String res = null;
|
||||
int colIndex = getTrueColumnIndex(columnIndex);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getString(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||
}
|
||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getString(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
public boolean getBoolean(int columnIndex) throws SQLException {
|
||||
boolean res = false;
|
||||
boolean res = false;
|
||||
int colIndex = getTrueColumnIndex(columnIndex);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getBoolean(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||
}
|
||||
res = this.rowData.getBoolean(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
public byte getByte(int columnIndex) throws SQLException {
|
||||
byte res = 0;
|
||||
byte res = 0;
|
||||
int colIndex = getTrueColumnIndex(columnIndex);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||
if (!lastWasNull) {
|
||||
res = (byte) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||
}
|
||||
res = (byte) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
public short getShort(int columnIndex) throws SQLException {
|
||||
short res = 0;
|
||||
short res = 0;
|
||||
int colIndex = getTrueColumnIndex(columnIndex);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||
if (!lastWasNull) {
|
||||
res = (short) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||
}
|
||||
res = (short) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
public int getInt(int columnIndex) throws SQLException {
|
||||
int res = 0;
|
||||
int res = 0;
|
||||
int colIndex = getTrueColumnIndex(columnIndex);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||
}
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
public long getLong(int columnIndex) throws SQLException {
|
||||
long res = 0l;
|
||||
long res = 0l;
|
||||
int colIndex = getTrueColumnIndex(columnIndex);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||
}
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
public float getFloat(int columnIndex) throws SQLException {
|
||||
float res = 0;
|
||||
int colIndex = getTrueColumnIndex(columnIndex);
|
||||
float res = 0;
|
||||
int colIndex = getTrueColumnIndex(columnIndex);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getFloat(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||
}
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getFloat(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
public double getDouble(int columnIndex) throws SQLException {
|
||||
double res = 0;
|
||||
double res = 0;
|
||||
int colIndex = getTrueColumnIndex(columnIndex);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getDouble(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||
}
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getDouble(colIndex, this.columnMetaDataList.get(colIndex).getColType());
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -249,24 +249,24 @@ public class TSDBResultSet implements ResultSet {
|
|||
*/
|
||||
@Deprecated
|
||||
public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException {
|
||||
BigDecimal res = null;
|
||||
BigDecimal res = null;
|
||||
int colIndex = getTrueColumnIndex(columnIndex);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||
if (!lastWasNull) {
|
||||
res = new BigDecimal(this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType()));
|
||||
}
|
||||
if (!lastWasNull) {
|
||||
res = new BigDecimal(this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType()));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
public byte[] getBytes(int columnIndex) throws SQLException {
|
||||
byte[] res = null;
|
||||
byte[] res = null;
|
||||
int colIndex = getTrueColumnIndex(columnIndex);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getString(colIndex, this.columnMetaDataList.get(colIndex).getColType()).getBytes();
|
||||
}
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getString(colIndex, this.columnMetaDataList.get(colIndex).getColType()).getBytes();
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -281,13 +281,13 @@ public class TSDBResultSet implements ResultSet {
|
|||
}
|
||||
|
||||
public Timestamp getTimestamp(int columnIndex) throws SQLException {
|
||||
Timestamp res = null;
|
||||
Timestamp res = null;
|
||||
int colIndex = getTrueColumnIndex(columnIndex);
|
||||
|
||||
this.lastWasNull = this.rowData.wasNull(colIndex);
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getTimestamp(colIndex);
|
||||
}
|
||||
if (!lastWasNull) {
|
||||
res = this.rowData.getTimestamp(colIndex);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -409,13 +409,13 @@ public class TSDBResultSet implements ResultSet {
|
|||
}
|
||||
|
||||
public int findColumn(String columnLabel) throws SQLException {
|
||||
Iterator<ColumnMetaData> colMetaDataIt = this.columnMetaDataList.iterator();
|
||||
while (colMetaDataIt.hasNext()) {
|
||||
ColumnMetaData colMetaData = colMetaDataIt.next();
|
||||
if (colMetaData.getColName() != null && colMetaData.getColName().equalsIgnoreCase(columnLabel)) {
|
||||
return colMetaData.getColIndex() + 1;
|
||||
}
|
||||
}
|
||||
Iterator<ColumnMetaData> colMetaDataIt = this.columnMetaDataList.iterator();
|
||||
while (colMetaDataIt.hasNext()) {
|
||||
ColumnMetaData colMetaData = colMetaDataIt.next();
|
||||
if (colMetaData.getColName() != null && colMetaData.getColName().equalsIgnoreCase(columnLabel)) {
|
||||
return colMetaData.getColIndex() + 1;
|
||||
}
|
||||
}
|
||||
throw new SQLException(TSDBConstants.INVALID_VARIABLES);
|
||||
}
|
||||
|
||||
|
@ -882,7 +882,7 @@ public class TSDBResultSet implements ResultSet {
|
|||
}
|
||||
|
||||
public String getNString(int columnIndex) throws SQLException {
|
||||
int colIndex = getTrueColumnIndex(columnIndex);
|
||||
int colIndex = getTrueColumnIndex(columnIndex);
|
||||
return (String) rowData.get(colIndex);
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,8 @@ public class TSDBStatement implements Statement {
|
|||
/** Timeout for a query */
|
||||
protected int queryTimeout = 0;
|
||||
|
||||
private Long pSql = 0l;
|
||||
|
||||
/**
|
||||
* Status of current statement
|
||||
*/
|
||||
|
@ -66,21 +68,23 @@ public class TSDBStatement implements Statement {
|
|||
if (isClosed) {
|
||||
throw new SQLException("Invalid method call on a closed statement.");
|
||||
}
|
||||
int res = this.connecter.executeQuery(sql);
|
||||
long res = this.connecter.executeQuery(sql);
|
||||
long resultSetPointer = this.connecter.getResultSet();
|
||||
|
||||
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
|
||||
this.connecter.freeResultSet(res);
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
} else if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
|
||||
this.connecter.freeResultSet();
|
||||
throw new SQLException("The executed SQL is not a DML or a DDL");
|
||||
} else {
|
||||
return res;
|
||||
int num = this.connecter.getAffectedRows(res);
|
||||
return num;
|
||||
}
|
||||
}
|
||||
|
||||
public String getErrorMsg() {
|
||||
return this.connecter.getErrMsg();
|
||||
public String getErrorMsg(long pSql) {
|
||||
return this.connecter.getErrMsg(pSql);
|
||||
}
|
||||
|
||||
public void close() throws SQLException {
|
||||
|
@ -170,7 +174,7 @@ public class TSDBStatement implements Statement {
|
|||
if (isClosed) {
|
||||
throw new SQLException("Invalid method call on a closed statement.");
|
||||
}
|
||||
return this.connecter.getAffectedRows();
|
||||
return this.connecter.getAffectedRows(this.pSql);
|
||||
}
|
||||
|
||||
public boolean getMoreResults() throws SQLException {
|
||||
|
|
|
@ -0,0 +1,185 @@
|
|||
/***************************************************************************
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*****************************************************************************/
|
||||
package com.taosdata.jdbc;
|
||||
|
||||
import javax.management.OperationsException;
|
||||
import java.sql.SQLException;
|
||||
import java.util.Map;
|
||||
import java.util.TimerTask;
|
||||
import java.util.concurrent.*;
|
||||
|
||||
public class TSDBSubscribe {
|
||||
private TSDBJNIConnector connecter = null;
|
||||
private static ScheduledExecutorService pool;
|
||||
private static Map<Long, TSDBTimerTask> timerTaskMap = new ConcurrentHashMap<>();
|
||||
private static Map<Long, ScheduledFuture> scheduledMap = new ConcurrentHashMap();
|
||||
|
||||
private static class TimerInstance {
|
||||
private static final ScheduledExecutorService instance = Executors.newScheduledThreadPool(1);
|
||||
}
|
||||
|
||||
public static ScheduledExecutorService getTimerInstance() {
|
||||
return TimerInstance.instance;
|
||||
}
|
||||
|
||||
public TSDBSubscribe(TSDBJNIConnector connecter) throws SQLException {
|
||||
if (null != connecter) {
|
||||
this.connecter = connecter;
|
||||
} else {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* sync subscribe
|
||||
*
|
||||
* @param topic
|
||||
* @param sql
|
||||
* @param restart
|
||||
* @param period
|
||||
* @throws SQLException
|
||||
*/
|
||||
public long subscribe(String topic, String sql, boolean restart, int period) throws SQLException {
|
||||
if (this.connecter.isClosed()) {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
}
|
||||
if (period < 1000) {
|
||||
throw new SQLException(TSDBConstants.WrapErrMsg(TSDBConstants.INVALID_VARIABLES));
|
||||
}
|
||||
return this.connecter.subscribe(topic, sql, restart, period);
|
||||
}
|
||||
|
||||
/**
|
||||
* async subscribe
|
||||
*
|
||||
* @param topic
|
||||
* @param sql
|
||||
* @param restart
|
||||
* @param period
|
||||
* @param callBack
|
||||
* @throws SQLException
|
||||
*/
|
||||
public long subscribe(String topic, String sql, boolean restart, int period, TSDBSubscribeCallBack callBack) throws SQLException {
|
||||
if (this.connecter.isClosed()) {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
}
|
||||
final long subscription = this.connecter.subscribe(topic, sql, restart, period);
|
||||
if (null != callBack) {
|
||||
pool = getTimerInstance();
|
||||
|
||||
TSDBTimerTask timerTask = new TSDBTimerTask(subscription, callBack);
|
||||
|
||||
timerTaskMap.put(subscription, timerTask);
|
||||
|
||||
ScheduledFuture scheduledFuture = pool.scheduleAtFixedRate(timerTask, 1, 1000, TimeUnit.MILLISECONDS);
|
||||
scheduledMap.put(subscription, scheduledFuture);
|
||||
}
|
||||
return subscription;
|
||||
}
|
||||
|
||||
public TSDBResultSet consume(long subscription) throws OperationsException, SQLException {
|
||||
if (this.connecter.isClosed()) {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
}
|
||||
if (0 == subscription) {
|
||||
throw new OperationsException("Invalid use of consume");
|
||||
}
|
||||
long resultSetPointer = this.connecter.consume(subscription);
|
||||
|
||||
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
} else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) {
|
||||
return null;
|
||||
} else {
|
||||
return new TSDBResultSet(this.connecter, resultSetPointer);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cancel subscribe
|
||||
*
|
||||
* @param subscription
|
||||
* @param isKeep
|
||||
* @throws SQLException
|
||||
*/
|
||||
public void unsubscribe(long subscription, boolean isKeep) throws SQLException {
|
||||
if (this.connecter.isClosed()) {
|
||||
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
|
||||
}
|
||||
|
||||
if (null != timerTaskMap.get(subscription)) {
|
||||
synchronized (timerTaskMap.get(subscription)) {
|
||||
while (1 == timerTaskMap.get(subscription).getState()) {
|
||||
try {
|
||||
Thread.sleep(10);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
timerTaskMap.get(subscription).setState(2);
|
||||
if (!timerTaskMap.isEmpty() && timerTaskMap.containsKey(subscription)) {
|
||||
timerTaskMap.get(subscription).cancel();
|
||||
timerTaskMap.remove(subscription);
|
||||
scheduledMap.get(subscription).cancel(false);
|
||||
scheduledMap.remove(subscription);
|
||||
}
|
||||
this.connecter.unsubscribe(subscription, isKeep);
|
||||
}
|
||||
} else {
|
||||
this.connecter.unsubscribe(subscription, isKeep);
|
||||
}
|
||||
}
|
||||
|
||||
class TSDBTimerTask extends TimerTask {
|
||||
private long subscription;
|
||||
private TSDBSubscribeCallBack callBack;
|
||||
// 0: not running 1: running 2: cancel
|
||||
private int state = 0;
|
||||
|
||||
public TSDBTimerTask(long subscription, TSDBSubscribeCallBack callBack) {
|
||||
this.subscription = subscription;
|
||||
this.callBack = callBack;
|
||||
}
|
||||
|
||||
public int getState() {
|
||||
return this.state;
|
||||
}
|
||||
|
||||
public void setState(int state) {
|
||||
this.state = state;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
synchronized (this) {
|
||||
if (2 == state) {
|
||||
return;
|
||||
}
|
||||
|
||||
state = 1;
|
||||
|
||||
try {
|
||||
TSDBResultSet resultSet = consume(subscription);
|
||||
callBack.invoke(resultSet);
|
||||
} catch (Exception e) {
|
||||
this.cancel();
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
state = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
/***************************************************************************
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*****************************************************************************/
|
||||
package com.taosdata.jdbc;
|
||||
|
||||
public interface TSDBSubscribeCallBack {
|
||||
void invoke(TSDBResultSet resultSet);
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
import com.taosdata.jdbc.*;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.util.Properties;
|
||||
|
||||
public class TestAsyncTSDBSubscribe {
|
||||
public static void main(String[] args) {
|
||||
String usage = "java -cp taos-jdbcdriver-1.0.3_dev-dist.jar com.taosdata.jdbc.TSDBSubscribe -db dbName -topic topicName " +
|
||||
"-tname tableName -h host";
|
||||
if (args.length < 2) {
|
||||
System.err.println(usage);
|
||||
return;
|
||||
}
|
||||
|
||||
String dbName = "";
|
||||
String tName = "";
|
||||
String host = "localhost";
|
||||
String topic = "";
|
||||
for (int i = 0; i < args.length; i++) {
|
||||
if ("-db".equalsIgnoreCase(args[i]) && i < args.length - 1) {
|
||||
dbName = args[++i];
|
||||
}
|
||||
if ("-tname".equalsIgnoreCase(args[i]) && i < args.length - 1) {
|
||||
tName = args[++i];
|
||||
}
|
||||
if ("-h".equalsIgnoreCase(args[i]) && i < args.length - 1) {
|
||||
host = args[++i];
|
||||
}
|
||||
if ("-topic".equalsIgnoreCase(args[i]) && i < args.length - 1) {
|
||||
topic = args[++i];
|
||||
}
|
||||
}
|
||||
if (StringUtils.isEmpty(dbName) || StringUtils.isEmpty(tName) || StringUtils.isEmpty(topic)) {
|
||||
System.err.println(usage);
|
||||
return;
|
||||
}
|
||||
|
||||
Connection connection = null;
|
||||
TSDBSubscribe subscribe = null;
|
||||
long subscribId = 0;
|
||||
try {
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + dbName + "?user=root&password=taosdata", properties);
|
||||
String rawSql = "select * from " + tName + ";";
|
||||
subscribe = ((TSDBConnection) connection).createSubscribe();
|
||||
subscribId = subscribe.subscribe(topic, rawSql, false, 1000, new CallBack("first"));
|
||||
long subscribId2 = subscribe.subscribe("test", rawSql, false, 1000, new CallBack("second"));
|
||||
int a = 0;
|
||||
Thread.sleep(2000);
|
||||
subscribe.unsubscribe(subscribId, true);
|
||||
System.err.println("cancel subscribe");
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
private static class CallBack implements TSDBSubscribeCallBack {
|
||||
private String name = "";
|
||||
|
||||
public CallBack(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void invoke(TSDBResultSet resultSet) {
|
||||
try {
|
||||
while (null !=resultSet && resultSet.next()) {
|
||||
System.out.print("callback_" + name + ": ");
|
||||
for (int i = 1; i <= resultSet.getMetaData().getColumnCount(); i++) {
|
||||
System.out.printf(i + ": " + resultSet.getString(i) + "\t");
|
||||
}
|
||||
System.out.println();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -10,9 +10,9 @@ public class TestPreparedStatement {
|
|||
try {
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, "192.168.1.117");
|
||||
Connection connection = DriverManager.getConnection("jdbc:TAOS://192.168.1.117:0/?user=root&password=taosdata", properties);
|
||||
String rawSql = "SELECT ts, c1 FROM (select c1, ts from db.tb1) SUB_QRY";
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, "localhost");
|
||||
Connection connection = DriverManager.getConnection("jdbc:TAOS://localhost:0/?user=root&password=taosdata", properties);
|
||||
String rawSql = "select * from test.log0601";
|
||||
// String[] params = new String[]{"ts", "c1"};
|
||||
PreparedStatement pstmt = (TSDBPreparedStatement) connection.prepareStatement(rawSql);
|
||||
ResultSet resSet = pstmt.executeQuery();
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
import com.taosdata.jdbc.TSDBConnection;
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import com.taosdata.jdbc.TSDBResultSet;
|
||||
import com.taosdata.jdbc.TSDBSubscribe;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.util.Properties;
|
||||
|
||||
public class TestTSDBSubscribe {
|
||||
public static void main(String[] args) throws Exception {
|
||||
String usage = "java -cp taos-jdbcdriver-1.0.3_dev-dist.jar com.taosdata.jdbc.TSDBSubscribe -db dbName " +
|
||||
"-topic topicName -tname tableName -h host";
|
||||
if (args.length < 2) {
|
||||
System.err.println(usage);
|
||||
return;
|
||||
}
|
||||
|
||||
String dbName = "";
|
||||
String tName = "";
|
||||
String host = "localhost";
|
||||
String topic = "";
|
||||
for (int i = 0; i < args.length; i++) {
|
||||
if ("-db".equalsIgnoreCase(args[i]) && i < args.length - 1) {
|
||||
dbName = args[++i];
|
||||
}
|
||||
if ("-tname".equalsIgnoreCase(args[i]) && i < args.length - 1) {
|
||||
tName = args[++i];
|
||||
}
|
||||
if ("-h".equalsIgnoreCase(args[i]) && i < args.length - 1) {
|
||||
host = args[++i];
|
||||
}
|
||||
if ("-topic".equalsIgnoreCase(args[i]) && i < args.length - 1) {
|
||||
topic = args[++i];
|
||||
}
|
||||
}
|
||||
if (StringUtils.isEmpty(dbName) || StringUtils.isEmpty(tName) || StringUtils.isEmpty(topic)) {
|
||||
System.err.println(usage);
|
||||
return;
|
||||
}
|
||||
|
||||
Connection connection = null;
|
||||
TSDBSubscribe subscribe = null;
|
||||
long subscribId = 0;
|
||||
try {
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
|
||||
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/" + dbName + "?user=root&password=taosdata"
|
||||
, properties);
|
||||
String rawSql = "select * from " + tName + ";";
|
||||
subscribe = ((TSDBConnection) connection).createSubscribe();
|
||||
subscribId = subscribe.subscribe(topic, rawSql, false, 1000);
|
||||
int a = 0;
|
||||
while (true) {
|
||||
Thread.sleep(900);
|
||||
TSDBResultSet resSet = subscribe.consume(subscribId);
|
||||
|
||||
while (resSet.next()) {
|
||||
for (int i = 1; i <= resSet.getMetaData().getColumnCount(); i++) {
|
||||
System.out.printf(i + ": " + resSet.getString(i) + "\t");
|
||||
}
|
||||
System.out.println("\n======" + a + "==========");
|
||||
}
|
||||
|
||||
a++;
|
||||
if (a >= 10) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
} finally {
|
||||
if (null != subscribe && 0 != subscribId) {
|
||||
subscribe.unsubscribe(subscribId, true);
|
||||
}
|
||||
if (null != connection) {
|
||||
connection.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -142,12 +142,14 @@ class CTaosInterface(object):
|
|||
libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
|
||||
libtaos.taos_init.restype = None
|
||||
libtaos.taos_connect.restype = ctypes.c_void_p
|
||||
libtaos.taos_use_result.restype = ctypes.c_void_p
|
||||
#libtaos.taos_use_result.restype = ctypes.c_void_p
|
||||
libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
|
||||
libtaos.taos_errstr.restype = ctypes.c_char_p
|
||||
libtaos.taos_subscribe.restype = ctypes.c_void_p
|
||||
libtaos.taos_consume.restype = ctypes.c_void_p
|
||||
libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
|
||||
libtaos.taos_free_result.restype = None
|
||||
libtaos.taos_errno.restype = ctypes.c_int
|
||||
|
||||
def __init__(self, config=None):
|
||||
'''
|
||||
|
@ -251,10 +253,10 @@ class CTaosInterface(object):
|
|||
# CTaosInterface.libtaos.close(connection)
|
||||
|
||||
@staticmethod
|
||||
def affectedRows(connection):
|
||||
def affectedRows(result):
|
||||
"""The affected rows after runing query
|
||||
"""
|
||||
return CTaosInterface.libtaos.taos_affected_rows(connection)
|
||||
return CTaosInterface.libtaos.taos_affected_rows(result)
|
||||
|
||||
@staticmethod
|
||||
def subscribe(connection, restart, topic, sql, interval):
|
||||
|
@ -292,18 +294,17 @@ class CTaosInterface(object):
|
|||
CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
|
||||
|
||||
@staticmethod
|
||||
def useResult(connection):
|
||||
def useResult(result):
|
||||
'''Use result after calling self.query
|
||||
'''
|
||||
result = ctypes.c_void_p(CTaosInterface.libtaos.taos_use_result(connection))
|
||||
fields = []
|
||||
pfields = CTaosInterface.fetchFields(result)
|
||||
for i in range(CTaosInterface.fieldsCount(connection)):
|
||||
for i in range(CTaosInterface.fieldsCount(result)):
|
||||
fields.append({'name': pfields[i].name.decode('utf-8'),
|
||||
'bytes': pfields[i].bytes,
|
||||
'type': ord(pfields[i].type)})
|
||||
|
||||
return result, fields
|
||||
return fields
|
||||
|
||||
@staticmethod
|
||||
def fetchBlock(result, fields):
|
||||
|
@ -337,8 +338,8 @@ class CTaosInterface(object):
|
|||
result.value = None
|
||||
|
||||
@staticmethod
|
||||
def fieldsCount(connection):
|
||||
return CTaosInterface.libtaos.taos_field_count(connection)
|
||||
def fieldsCount(result):
|
||||
return CTaosInterface.libtaos.taos_field_count(result)
|
||||
|
||||
@staticmethod
|
||||
def fetchFields(result):
|
||||
|
@ -386,29 +387,30 @@ class CTaosInterface(object):
|
|||
# return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
|
||||
|
||||
@staticmethod
|
||||
def errno(connection):
|
||||
def errno(result):
|
||||
"""Return the error number.
|
||||
"""
|
||||
return CTaosInterface.libtaos.taos_errno(connection)
|
||||
return CTaosInterface.libtaos.taos_errno(result)
|
||||
|
||||
@staticmethod
|
||||
def errStr(connection):
|
||||
def errStr(result):
|
||||
"""Return the error styring
|
||||
"""
|
||||
return CTaosInterface.libtaos.taos_errstr(connection)
|
||||
return CTaosInterface.libtaos.taos_errstr(result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cinter = CTaosInterface()
|
||||
conn = cinter.connect()
|
||||
result = cinter.query(conn, 'show databases')
|
||||
|
||||
print('Query return value: {}'.format(cinter.query(conn, 'show databases')))
|
||||
print('Affected rows: {}'.format(cinter.affectedRows(conn)))
|
||||
print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
|
||||
|
||||
result, des = CTaosInterface.useResult(conn)
|
||||
fields = CTaosInterface.useResult(result)
|
||||
|
||||
data, num_of_rows = CTaosInterface.fetchBlock(result, des)
|
||||
data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
|
||||
|
||||
print(data)
|
||||
|
||||
cinter.freeresult(result)
|
||||
cinter.close(conn)
|
|
@ -78,9 +78,7 @@ class TDengineConnection(object):
|
|||
def clear_result_set(self):
|
||||
"""Clear unused result set on this connection.
|
||||
"""
|
||||
result = self._chandle.useResult(self._conn)[0]
|
||||
if result:
|
||||
self._chandle.freeResult(result)
|
||||
pass
|
||||
|
||||
if __name__ == "__main__":
|
||||
conn = TDengineConnection(host='192.168.1.107')
|
||||
|
|
|
@ -28,6 +28,6 @@ class FieldType(object):
|
|||
C_FLOAT_NULL = float('nan')
|
||||
C_DOUBLE_NULL = float('nan')
|
||||
C_BINARY_NULL = bytearray([int('0xff', 16)])
|
||||
# Time precision definition
|
||||
# Timestamp precision definition
|
||||
C_TIMESTAMP_MILLI = 0
|
||||
C_TIMESTAMP_MICRO = 1
|
||||
|
|
|
@ -116,25 +116,30 @@ class TDengineCursor(object):
|
|||
if params is not None:
|
||||
pass
|
||||
|
||||
res = CTaosInterface.query(self._connection._conn, stmt)
|
||||
|
||||
# global querySeqNum
|
||||
# querySeqNum += 1
|
||||
# localSeqNum = querySeqNum # avoid raice condition
|
||||
# print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt)))
|
||||
self._result = CTaosInterface.query(self._connection._conn, stmt)
|
||||
# print(" << Query ({}) Exec Done".format(localSeqNum))
|
||||
if (self._logfile):
|
||||
with open(self._logfile, "a") as logfile:
|
||||
logfile.write("%s;\n" % operation)
|
||||
|
||||
if res == 0:
|
||||
if CTaosInterface.fieldsCount(self._connection._conn) == 0:
|
||||
errno = CTaosInterface.libtaos.taos_errno(self._result)
|
||||
if errno == 0:
|
||||
if CTaosInterface.fieldsCount(self._result) == 0:
|
||||
self._affected_rows += CTaosInterface.affectedRows(
|
||||
self._connection._conn)
|
||||
return CTaosInterface.affectedRows(self._connection._conn)
|
||||
self._result )
|
||||
return CTaosInterface.affectedRows(self._result )
|
||||
else:
|
||||
self._result, self._fields = CTaosInterface.useResult(
|
||||
self._connection._conn)
|
||||
self._fields = CTaosInterface.useResult(
|
||||
self._result)
|
||||
return self._handle_result()
|
||||
else:
|
||||
raise ProgrammingError(
|
||||
CTaosInterface.errStr(
|
||||
self._connection._conn))
|
||||
self._result ))
|
||||
|
||||
def executemany(self, operation, seq_of_parameters):
|
||||
"""Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
|
||||
|
|
|
@ -142,12 +142,14 @@ class CTaosInterface(object):
|
|||
libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
|
||||
libtaos.taos_init.restype = None
|
||||
libtaos.taos_connect.restype = ctypes.c_void_p
|
||||
libtaos.taos_use_result.restype = ctypes.c_void_p
|
||||
#libtaos.taos_use_result.restype = ctypes.c_void_p
|
||||
libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
|
||||
libtaos.taos_errstr.restype = ctypes.c_char_p
|
||||
libtaos.taos_subscribe.restype = ctypes.c_void_p
|
||||
libtaos.taos_consume.restype = ctypes.c_void_p
|
||||
libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
|
||||
libtaos.taos_free_result.restype = None
|
||||
libtaos.taos_errno.restype = ctypes.c_int
|
||||
|
||||
def __init__(self, config=None):
|
||||
'''
|
||||
|
@ -251,10 +253,10 @@ class CTaosInterface(object):
|
|||
# CTaosInterface.libtaos.close(connection)
|
||||
|
||||
@staticmethod
|
||||
def affectedRows(connection):
|
||||
def affectedRows(result):
|
||||
"""The affected rows after runing query
|
||||
"""
|
||||
return CTaosInterface.libtaos.taos_affected_rows(connection)
|
||||
return CTaosInterface.libtaos.taos_affected_rows(result)
|
||||
|
||||
@staticmethod
|
||||
def subscribe(connection, restart, topic, sql, interval):
|
||||
|
@ -292,18 +294,17 @@ class CTaosInterface(object):
|
|||
CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
|
||||
|
||||
@staticmethod
|
||||
def useResult(connection):
|
||||
def useResult(result):
|
||||
'''Use result after calling self.query
|
||||
'''
|
||||
result = ctypes.c_void_p(CTaosInterface.libtaos.taos_use_result(connection))
|
||||
fields = []
|
||||
pfields = CTaosInterface.fetchFields(result)
|
||||
for i in range(CTaosInterface.fieldsCount(connection)):
|
||||
for i in range(CTaosInterface.fieldsCount(result)):
|
||||
fields.append({'name': pfields[i].name.decode('utf-8'),
|
||||
'bytes': pfields[i].bytes,
|
||||
'type': ord(pfields[i].type)})
|
||||
|
||||
return result, fields
|
||||
return fields
|
||||
|
||||
@staticmethod
|
||||
def fetchBlock(result, fields):
|
||||
|
@ -337,8 +338,8 @@ class CTaosInterface(object):
|
|||
result.value = None
|
||||
|
||||
@staticmethod
|
||||
def fieldsCount(connection):
|
||||
return CTaosInterface.libtaos.taos_field_count(connection)
|
||||
def fieldsCount(result):
|
||||
return CTaosInterface.libtaos.taos_field_count(result)
|
||||
|
||||
@staticmethod
|
||||
def fetchFields(result):
|
||||
|
@ -386,29 +387,30 @@ class CTaosInterface(object):
|
|||
# return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
|
||||
|
||||
@staticmethod
|
||||
def errno(connection):
|
||||
def errno(result):
|
||||
"""Return the error number.
|
||||
"""
|
||||
return CTaosInterface.libtaos.taos_errno(connection)
|
||||
return CTaosInterface.libtaos.taos_errno(result)
|
||||
|
||||
@staticmethod
|
||||
def errStr(connection):
|
||||
def errStr(result):
|
||||
"""Return the error styring
|
||||
"""
|
||||
return CTaosInterface.libtaos.taos_errstr(connection).decode('utf-8')
|
||||
return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cinter = CTaosInterface()
|
||||
conn = cinter.connect()
|
||||
result = cinter.query(conn, 'show databases')
|
||||
|
||||
print('Query return value: {}'.format(cinter.query(conn, 'show databases')))
|
||||
print('Affected rows: {}'.format(cinter.affectedRows(conn)))
|
||||
print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
|
||||
|
||||
result, des = CTaosInterface.useResult(conn)
|
||||
fields = CTaosInterface.useResult(result)
|
||||
|
||||
data, num_of_rows = CTaosInterface.fetchBlock(result, des)
|
||||
data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
|
||||
|
||||
print(data)
|
||||
|
||||
cinter.freeresult(result)
|
||||
cinter.close(conn)
|
|
@ -78,9 +78,7 @@ class TDengineConnection(object):
|
|||
def clear_result_set(self):
|
||||
"""Clear unused result set on this connection.
|
||||
"""
|
||||
result = self._chandle.useResult(self._conn)[0]
|
||||
if result:
|
||||
self._chandle.freeResult(result)
|
||||
pass
|
||||
|
||||
if __name__ == "__main__":
|
||||
conn = TDengineConnection(host='192.168.1.107')
|
||||
|
|
|
@ -122,26 +122,26 @@ class TDengineCursor(object):
|
|||
# querySeqNum += 1
|
||||
# localSeqNum = querySeqNum # avoid raice condition
|
||||
# print(" >> Exec Query ({}): {}".format(localSeqNum, str(stmt)))
|
||||
res = CTaosInterface.query(self._connection._conn, stmt)
|
||||
self._result = CTaosInterface.query(self._connection._conn, stmt)
|
||||
# print(" << Query ({}) Exec Done".format(localSeqNum))
|
||||
|
||||
if (self._logfile):
|
||||
with open(self._logfile, "a") as logfile:
|
||||
logfile.write("%s;\n" % operation)
|
||||
|
||||
if res == 0:
|
||||
if CTaosInterface.fieldsCount(self._connection._conn) == 0:
|
||||
errno = CTaosInterface.libtaos.taos_errno(self._result)
|
||||
if errno == 0:
|
||||
if CTaosInterface.fieldsCount(self._result) == 0:
|
||||
self._affected_rows += CTaosInterface.affectedRows(
|
||||
self._connection._conn)
|
||||
return CTaosInterface.affectedRows(self._connection._conn)
|
||||
self._result )
|
||||
return CTaosInterface.affectedRows(self._result )
|
||||
else:
|
||||
self._result, self._fields = CTaosInterface.useResult(
|
||||
self._connection._conn)
|
||||
self._fields = CTaosInterface.useResult(
|
||||
self._result)
|
||||
return self._handle_result()
|
||||
else:
|
||||
raise ProgrammingError(
|
||||
CTaosInterface.errStr(
|
||||
self._connection._conn))
|
||||
self._result ))
|
||||
|
||||
def executemany(self, operation, seq_of_parameters):
|
||||
"""Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
from taos.cinterface import CTaosInterface
|
||||
from taos.error import *
|
||||
from taos.subscription import TDengineSubscription
|
||||
from taos.connection import TDengineConnection
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
conn = TDengineConnection(
|
||||
host="127.0.0.1", user="root", password="taosdata", database="test")
|
||||
|
||||
# Generate a cursor object to run SQL commands
|
||||
sub = conn.subscribe(False, "test", "select * from log0601;", 1000)
|
||||
|
||||
for i in range(100):
|
||||
print(i)
|
||||
data = sub.consume()
|
||||
for d in data:
|
||||
print(d)
|
||||
|
||||
sub.close()
|
||||
conn.close()
|
|
@ -142,12 +142,14 @@ class CTaosInterface(object):
|
|||
libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
|
||||
libtaos.taos_init.restype = None
|
||||
libtaos.taos_connect.restype = ctypes.c_void_p
|
||||
libtaos.taos_use_result.restype = ctypes.c_void_p
|
||||
#libtaos.taos_use_result.restype = ctypes.c_void_p
|
||||
libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
|
||||
libtaos.taos_errstr.restype = ctypes.c_char_p
|
||||
libtaos.taos_subscribe.restype = ctypes.c_void_p
|
||||
libtaos.taos_consume.restype = ctypes.c_void_p
|
||||
libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
|
||||
libtaos.taos_free_result.restype = None
|
||||
libtaos.taos_errno.restype = ctypes.c_int
|
||||
|
||||
def __init__(self, config=None):
|
||||
'''
|
||||
|
@ -251,10 +253,10 @@ class CTaosInterface(object):
|
|||
# CTaosInterface.libtaos.close(connection)
|
||||
|
||||
@staticmethod
|
||||
def affectedRows(connection):
|
||||
def affectedRows(result):
|
||||
"""The affected rows after runing query
|
||||
"""
|
||||
return CTaosInterface.libtaos.taos_affected_rows(connection)
|
||||
return CTaosInterface.libtaos.taos_affected_rows(result)
|
||||
|
||||
@staticmethod
|
||||
def subscribe(connection, restart, topic, sql, interval):
|
||||
|
@ -292,18 +294,17 @@ class CTaosInterface(object):
|
|||
CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
|
||||
|
||||
@staticmethod
|
||||
def useResult(connection):
|
||||
def useResult(result):
|
||||
'''Use result after calling self.query
|
||||
'''
|
||||
result = ctypes.c_void_p(CTaosInterface.libtaos.taos_use_result(connection))
|
||||
fields = []
|
||||
pfields = CTaosInterface.fetchFields(result)
|
||||
for i in range(CTaosInterface.fieldsCount(connection)):
|
||||
for i in range(CTaosInterface.fieldsCount(result)):
|
||||
fields.append({'name': pfields[i].name.decode('utf-8'),
|
||||
'bytes': pfields[i].bytes,
|
||||
'type': ord(pfields[i].type)})
|
||||
|
||||
return result, fields
|
||||
return fields
|
||||
|
||||
@staticmethod
|
||||
def fetchBlock(result, fields):
|
||||
|
@ -337,8 +338,8 @@ class CTaosInterface(object):
|
|||
result.value = None
|
||||
|
||||
@staticmethod
|
||||
def fieldsCount(connection):
|
||||
return CTaosInterface.libtaos.taos_field_count(connection)
|
||||
def fieldsCount(result):
|
||||
return CTaosInterface.libtaos.taos_field_count(result)
|
||||
|
||||
@staticmethod
|
||||
def fetchFields(result):
|
||||
|
@ -386,29 +387,30 @@ class CTaosInterface(object):
|
|||
# return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
|
||||
|
||||
@staticmethod
|
||||
def errno(connection):
|
||||
def errno(result):
|
||||
"""Return the error number.
|
||||
"""
|
||||
return CTaosInterface.libtaos.taos_errno(connection)
|
||||
return CTaosInterface.libtaos.taos_errno(result)
|
||||
|
||||
@staticmethod
|
||||
def errStr(connection):
|
||||
def errStr(result):
|
||||
"""Return the error styring
|
||||
"""
|
||||
return CTaosInterface.libtaos.taos_errstr(connection)
|
||||
return CTaosInterface.libtaos.taos_errstr(result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cinter = CTaosInterface()
|
||||
conn = cinter.connect()
|
||||
result = cinter.query(conn, 'show databases')
|
||||
|
||||
print('Query return value: {}'.format(cinter.query(conn, 'show databases')))
|
||||
print('Affected rows: {}'.format(cinter.affectedRows(conn)))
|
||||
print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
|
||||
|
||||
result, des = CTaosInterface.useResult(conn)
|
||||
fields = CTaosInterface.useResult(result)
|
||||
|
||||
data, num_of_rows = CTaosInterface.fetchBlock(result, des)
|
||||
data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
|
||||
|
||||
print(data)
|
||||
|
||||
cinter.freeresult(result)
|
||||
cinter.close(conn)
|
|
@ -79,9 +79,7 @@ class TDengineConnection(object):
|
|||
def clear_result_set(self):
|
||||
"""Clear unused result set on this connection.
|
||||
"""
|
||||
result = self._chandle.useResult(self._conn)[0]
|
||||
if result:
|
||||
self._chandle.freeResult(result)
|
||||
pass
|
||||
|
||||
if __name__ == "__main__":
|
||||
conn = TDengineConnection(host='192.168.1.107')
|
||||
|
|
|
@ -109,16 +109,17 @@ class TDengineCursor(object):
|
|||
if params is not None:
|
||||
pass
|
||||
|
||||
res = CTaosInterface.query(self._connection._conn, stmt)
|
||||
if res == 0:
|
||||
if CTaosInterface.fieldsCount(self._connection._conn) == 0:
|
||||
self._affected_rows += CTaosInterface.affectedRows(self._connection._conn)
|
||||
return CTaosInterface.affectedRows(self._connection._conn)
|
||||
self._result = CTaosInterface.query(self._connection._conn, stmt)
|
||||
errno = CTaosInterface.libtaos.taos_errno(self._result)
|
||||
if errno == 0:
|
||||
if CTaosInterface.fieldsCount(self._result) == 0:
|
||||
self._affected_rows += CTaosInterface.affectedRows(self._result)
|
||||
return CTaosInterface.affectedRows(self._result )
|
||||
else:
|
||||
self._result, self._fields = CTaosInterface.useResult(self._connection._conn)
|
||||
self._fields = CTaosInterface.useResult(self._result)
|
||||
return self._handle_result()
|
||||
else:
|
||||
raise ProgrammingError(CTaosInterface.errStr(self._connection._conn))
|
||||
raise ProgrammingError(CTaosInterface.errStr(self._result))
|
||||
|
||||
def executemany(self, operation, seq_of_parameters):
|
||||
"""Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
|
||||
|
|
|
@ -142,12 +142,14 @@ class CTaosInterface(object):
|
|||
libtaos.taos_fetch_fields.restype = ctypes.POINTER(TaosField)
|
||||
libtaos.taos_init.restype = None
|
||||
libtaos.taos_connect.restype = ctypes.c_void_p
|
||||
libtaos.taos_use_result.restype = ctypes.c_void_p
|
||||
#libtaos.taos_use_result.restype = ctypes.c_void_p
|
||||
libtaos.taos_fetch_row.restype = ctypes.POINTER(ctypes.c_void_p)
|
||||
libtaos.taos_errstr.restype = ctypes.c_char_p
|
||||
libtaos.taos_subscribe.restype = ctypes.c_void_p
|
||||
libtaos.taos_consume.restype = ctypes.c_void_p
|
||||
libtaos.taos_fetch_lengths.restype = ctypes.c_void_p
|
||||
libtaos.taos_free_result.restype = None
|
||||
libtaos.taos_errno.restype = ctypes.c_int
|
||||
|
||||
def __init__(self, config=None):
|
||||
'''
|
||||
|
@ -251,10 +253,10 @@ class CTaosInterface(object):
|
|||
# CTaosInterface.libtaos.close(connection)
|
||||
|
||||
@staticmethod
|
||||
def affectedRows(connection):
|
||||
def affectedRows(result):
|
||||
"""The affected rows after runing query
|
||||
"""
|
||||
return CTaosInterface.libtaos.taos_affected_rows(connection)
|
||||
return CTaosInterface.libtaos.taos_affected_rows(result)
|
||||
|
||||
@staticmethod
|
||||
def subscribe(connection, restart, topic, sql, interval):
|
||||
|
@ -292,18 +294,17 @@ class CTaosInterface(object):
|
|||
CTaosInterface.libtaos.taos_unsubscribe(sub, 1 if keepProgress else 0)
|
||||
|
||||
@staticmethod
|
||||
def useResult(connection):
|
||||
def useResult(result):
|
||||
'''Use result after calling self.query
|
||||
'''
|
||||
result = ctypes.c_void_p(CTaosInterface.libtaos.taos_use_result(connection))
|
||||
fields = []
|
||||
pfields = CTaosInterface.fetchFields(result)
|
||||
for i in range(CTaosInterface.fieldsCount(connection)):
|
||||
for i in range(CTaosInterface.fieldsCount(result)):
|
||||
fields.append({'name': pfields[i].name.decode('utf-8'),
|
||||
'bytes': pfields[i].bytes,
|
||||
'type': ord(pfields[i].type)})
|
||||
|
||||
return result, fields
|
||||
return fields
|
||||
|
||||
@staticmethod
|
||||
def fetchBlock(result, fields):
|
||||
|
@ -337,8 +338,8 @@ class CTaosInterface(object):
|
|||
result.value = None
|
||||
|
||||
@staticmethod
|
||||
def fieldsCount(connection):
|
||||
return CTaosInterface.libtaos.taos_field_count(connection)
|
||||
def fieldsCount(result):
|
||||
return CTaosInterface.libtaos.taos_field_count(result)
|
||||
|
||||
@staticmethod
|
||||
def fetchFields(result):
|
||||
|
@ -386,29 +387,30 @@ class CTaosInterface(object):
|
|||
# return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
|
||||
|
||||
@staticmethod
|
||||
def errno(connection):
|
||||
def errno(result):
|
||||
"""Return the error number.
|
||||
"""
|
||||
return CTaosInterface.libtaos.taos_errno(connection)
|
||||
return CTaosInterface.libtaos.taos_errno(result)
|
||||
|
||||
@staticmethod
|
||||
def errStr(connection):
|
||||
def errStr(result):
|
||||
"""Return the error styring
|
||||
"""
|
||||
return CTaosInterface.libtaos.taos_errstr(connection).decode('utf-8')
|
||||
return CTaosInterface.libtaos.taos_errstr(result).decode('utf-8')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cinter = CTaosInterface()
|
||||
conn = cinter.connect()
|
||||
result = cinter.query(conn, 'show databases')
|
||||
|
||||
print('Query return value: {}'.format(cinter.query(conn, 'show databases')))
|
||||
print('Affected rows: {}'.format(cinter.affectedRows(conn)))
|
||||
print('Query Affected rows: {}'.format(cinter.affectedRows(result)))
|
||||
|
||||
result, des = CTaosInterface.useResult(conn)
|
||||
fields = CTaosInterface.useResult(result)
|
||||
|
||||
data, num_of_rows = CTaosInterface.fetchBlock(result, des)
|
||||
data, num_of_rows = CTaosInterface.fetchBlock(result, fields)
|
||||
|
||||
print(data)
|
||||
|
||||
cinter.freeresult(result)
|
||||
cinter.close(conn)
|
|
@ -79,9 +79,7 @@ class TDengineConnection(object):
|
|||
def clear_result_set(self):
|
||||
"""Clear unused result set on this connection.
|
||||
"""
|
||||
result = self._chandle.useResult(self._conn)[0]
|
||||
if result:
|
||||
self._chandle.freeResult(result)
|
||||
pass
|
||||
|
||||
if __name__ == "__main__":
|
||||
conn = TDengineConnection(host='192.168.1.107')
|
||||
|
|
|
@ -109,16 +109,17 @@ class TDengineCursor(object):
|
|||
if params is not None:
|
||||
pass
|
||||
|
||||
res = CTaosInterface.query(self._connection._conn, stmt)
|
||||
if res == 0:
|
||||
if CTaosInterface.fieldsCount(self._connection._conn) == 0:
|
||||
self._affected_rows += CTaosInterface.affectedRows(self._connection._conn)
|
||||
return CTaosInterface.affectedRows(self._connection._conn)
|
||||
self._result = CTaosInterface.query(self._connection._conn, stmt)
|
||||
errno = CTaosInterface.libtaos.taos_errno(self._result)
|
||||
if errno == 0:
|
||||
if CTaosInterface.fieldsCount(self._result) == 0:
|
||||
self._affected_rows += CTaosInterface.affectedRows(self._result )
|
||||
return CTaosInterface.affectedRows(self._result )
|
||||
else:
|
||||
self._result, self._fields = CTaosInterface.useResult(self._connection._conn)
|
||||
self._fields = CTaosInterface.useResult(self._result )
|
||||
return self._handle_result()
|
||||
else:
|
||||
raise ProgrammingError(CTaosInterface.errStr(self._connection._conn))
|
||||
raise ProgrammingError(CTaosInterface.errStr(self._result ))
|
||||
|
||||
def executemany(self, operation, seq_of_parameters):
|
||||
"""Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
|
||||
|
|
|
@ -38,6 +38,7 @@ static void dnodeCheckDataDirOpenned(char *dir);
|
|||
static SDnodeRunStatus tsDnodeRunStatus = TSDB_DNODE_RUN_STATUS_STOPPED;
|
||||
static int32_t dnodeInitComponents();
|
||||
static void dnodeCleanupComponents(int32_t stepId);
|
||||
static int dnodeCreateDir(const char *dir);
|
||||
|
||||
typedef struct {
|
||||
const char *const name;
|
||||
|
@ -59,6 +60,16 @@ static const SDnodeComponent tsDnodeComponents[] = {
|
|||
{"shell", dnodeInitShell, dnodeCleanupShell}
|
||||
};
|
||||
|
||||
static int dnodeCreateDir(const char *dir) {
|
||||
struct stat dirstat;
|
||||
if (stat(dir, &dirstat) < 0) {
|
||||
if (mkdir(dir, 0755) != 0 && errno != EEXIST) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dnodeCleanupComponents(int32_t stepId) {
|
||||
for (int32_t i = stepId; i >= 0; i--) {
|
||||
tsDnodeComponents[i].cleanup();
|
||||
|
@ -87,9 +98,9 @@ int32_t dnodeInitSystem() {
|
|||
taosSetCoreDump();
|
||||
signal(SIGPIPE, SIG_IGN);
|
||||
|
||||
struct stat dirstat;
|
||||
if (stat(tsLogDir, &dirstat) < 0) {
|
||||
mkdir(tsLogDir, 0755);
|
||||
if (dnodeCreateDir(tsLogDir) < 0) {
|
||||
printf("failed to create dir: %s, reason: %s\n", tsLogDir, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
char temp[TSDB_FILENAME_LEN];
|
||||
|
@ -140,7 +151,11 @@ static void dnodeCheckDataDirOpenned(char *dir) {
|
|||
char filepath[256] = {0};
|
||||
sprintf(filepath, "%s/.running", dir);
|
||||
|
||||
int32_t fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO);
|
||||
int fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO);
|
||||
if (fd < 0) {
|
||||
dError("failed to open lock file:%s, reason: %s, quit", filepath, strerror(errno));
|
||||
exit(0);
|
||||
}
|
||||
int32_t ret = flock(fd, LOCK_EX | LOCK_NB);
|
||||
if (ret != 0) {
|
||||
dError("failed to lock file:%s ret:%d, database may be running, quit", filepath, ret);
|
||||
|
@ -150,16 +165,28 @@ static void dnodeCheckDataDirOpenned(char *dir) {
|
|||
}
|
||||
|
||||
static int32_t dnodeInitStorage() {
|
||||
struct stat dirstat;
|
||||
if (stat(tsDataDir, &dirstat) < 0) {
|
||||
mkdir(tsDataDir, 0755);
|
||||
if (dnodeCreateDir(tsDataDir) < 0) {
|
||||
dError("failed to create dir: %s, reason: %s", tsDataDir, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
sprintf(tsMnodeDir, "%s/mnode", tsDataDir);
|
||||
sprintf(tsVnodeDir, "%s/vnode", tsDataDir);
|
||||
sprintf(tsDnodeDir, "%s/dnode", tsDataDir);
|
||||
mkdir(tsVnodeDir, 0755);
|
||||
mkdir(tsDnodeDir, 0755);
|
||||
|
||||
//TODO(dengyihao): no need to init here
|
||||
if (dnodeCreateDir(tsMnodeDir) < 0) {
|
||||
dError("failed to create dir: %s, reason: %s", tsMnodeDir, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
//TODO(dengyihao): no need to init here
|
||||
if (dnodeCreateDir(tsVnodeDir) < 0) {
|
||||
dError("failed to create dir: %s, reason: %s", tsVnodeDir, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
if (dnodeCreateDir(tsDnodeDir) < 0) {
|
||||
dError("failed to create dir: %s, reason: %s", tsDnodeDir, strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
dnodeCheckDataDirOpenned(tsDnodeDir);
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@ int32_t dnodeInitShell() {
|
|||
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_SUBMIT] = dnodeDispatchToVnodeWriteQueue;
|
||||
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_QUERY] = dnodeDispatchToVnodeReadQueue;
|
||||
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_FETCH] = dnodeDispatchToVnodeReadQueue;
|
||||
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_UPDATE_TAG_VAL] = dnodeDispatchToVnodeWriteQueue;
|
||||
|
||||
// the following message shall be treated as mnode write
|
||||
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_ACCT] = dnodeDispatchToMnodeWriteQueue;
|
||||
|
|
|
@ -88,14 +88,13 @@ int taos_stmt_execute(TAOS_STMT *stmt);
|
|||
TAOS_RES * taos_stmt_use_result(TAOS_STMT *stmt);
|
||||
int taos_stmt_close(TAOS_STMT *stmt);
|
||||
|
||||
DLL_EXPORT int taos_query(TAOS *taos, const char *sql);
|
||||
DLL_EXPORT TAOS_RES *taos_use_result(TAOS *taos);
|
||||
DLL_EXPORT TAOS_RES *taos_query(TAOS *taos, const char *sql);
|
||||
DLL_EXPORT TAOS_ROW taos_fetch_row(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_result_precision(TAOS_RES *res); // get the time precision of result
|
||||
DLL_EXPORT void taos_free_result(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_field_count(TAOS *taos);
|
||||
DLL_EXPORT int taos_field_count(TAOS_RES *tres);
|
||||
DLL_EXPORT int taos_num_fields(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_affected_rows(TAOS *taos);
|
||||
DLL_EXPORT int taos_affected_rows(TAOS_RES *taos);
|
||||
DLL_EXPORT TAOS_FIELD *taos_fetch_fields(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_select_db(TAOS *taos, const char *db);
|
||||
DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields);
|
||||
|
@ -112,9 +111,9 @@ int* taos_fetch_lengths(TAOS_RES *res);
|
|||
// TODO: the return value should be `const`
|
||||
DLL_EXPORT char *taos_get_server_info(TAOS *taos);
|
||||
DLL_EXPORT char *taos_get_client_info();
|
||||
DLL_EXPORT char *taos_errstr(TAOS *taos);
|
||||
DLL_EXPORT char *taos_errstr(TAOS_RES *tres);
|
||||
|
||||
DLL_EXPORT int taos_errno(TAOS *taos);
|
||||
DLL_EXPORT int taos_errno(TAOS_RES *tres);
|
||||
|
||||
DLL_EXPORT void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, int code), void *param);
|
||||
DLL_EXPORT void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);
|
||||
|
|
|
@ -323,7 +323,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
|
|||
#define TSDB_QUERY_TYPE_SUBQUERY 0x02u
|
||||
#define TSDB_QUERY_TYPE_STABLE_SUBQUERY 0x04u // two-stage subquery for super table
|
||||
|
||||
#define TSDB_QUERY_TYPE_TABLE_QUERY 0x08u // query ordinary table; below only apply to client side
|
||||
#define TSDB_QUERY_TYPE_TABLE_QUERY 0x08u // query ordinary table; below only apply to client side
|
||||
#define TSDB_QUERY_TYPE_STABLE_QUERY 0x10u // query on super table
|
||||
#define TSDB_QUERY_TYPE_JOIN_QUERY 0x20u // join query
|
||||
#define TSDB_QUERY_TYPE_PROJECTION_QUERY 0x40u // select *,columns... query
|
||||
|
|
|
@ -92,16 +92,15 @@ TAOS_DEFINE_ERROR(TSDB_CODE_NO_USER_FROM_CONN, 0, 0x0185, "can not get
|
|||
// table
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TABLE_ALREADY_EXIST, 0, 0x0200, "table already exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TABLE_ID, 0, 0x0201, "invalid table id")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TABLE_TYPE, 0, 0x0202, "invalid table typee")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TABLE_TYPE, 0, 0x0202, "invalid table type")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_NOT_SUPER_TABLE, 0, 0x0203, "no super table") // operation only available for super table
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TAG_ALREAY_EXIST, 0, 0x0204, "tag already exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TAG_NOT_EXIST, 0, 0x0205, "tag not exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_FIELD_ALREAY_EXIST, 0, 0x0206, "field already exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_FIELD_NOT_EXIST, 0, 0x0207, "field not exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_COL_NAME_TOO_LONG, 0, 0x0209, "column name too long")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_COL_NAME_TOO_LONG, 0, 0x0208, "column name too long")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TOO_MANY_TAGS, 0, 0x0209, "too many tags")
|
||||
|
||||
|
||||
// dnode & mnode
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_NO_ENOUGH_DNODES, 0, 0x0280, "no enough dnodes")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_DNODE_ALREADY_EXIST, 0, 0x0281, "dnode already exist")
|
||||
|
@ -173,6 +172,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_FILE_FORMAT, 0, 0x0500, "invalid file
|
|||
|
||||
// TSDB
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_CONFIG, 0, 0x0580, "invalid TSDB configuration")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TAG_VER_OUT_OF_DATE, 0, 0x0581, "tag version is out of date")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TABLE_SCHEMA_VERSION, 0, 0x0582, "invalid table schema version from client")
|
||||
|
||||
|
||||
#ifdef TAOS_ERROR_C
|
||||
|
|
|
@ -43,7 +43,7 @@ enum {
|
|||
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_SUBMIT, "submit" )
|
||||
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_QUERY, "query" )
|
||||
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_FETCH, "fetch" )
|
||||
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY0, "dummy0" )
|
||||
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_UPDATE_TAG_VAL, "update-tag-val" )
|
||||
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY1, "dummy1" )
|
||||
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY2, "dummy2" )
|
||||
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY3, "dummy3" )
|
||||
|
@ -277,6 +277,18 @@ typedef struct {
|
|||
// char tagVal[];
|
||||
} SCMAlterTableMsg;
|
||||
|
||||
typedef struct {
|
||||
SMsgHead head;
|
||||
int64_t uid;
|
||||
int32_t tid;
|
||||
int16_t tversion;
|
||||
int16_t colId;
|
||||
int16_t type;
|
||||
int16_t bytes;
|
||||
int32_t tagValLen;
|
||||
char data[];
|
||||
} SUpdateTableTagValMsg;
|
||||
|
||||
typedef struct {
|
||||
char clientVersion[TSDB_VERSION_LEN];
|
||||
char msgVersion[TSDB_VERSION_LEN];
|
||||
|
|
|
@ -45,6 +45,7 @@ typedef struct {
|
|||
int (*eventCallBack)(void *);
|
||||
void *(*cqCreateFunc)(void *handle, int sid, char *sqlStr, STSchema *pSchema);
|
||||
void (*cqDropFunc)(void *handle);
|
||||
void *(*configFunc)(int32_t vgId, int32_t sid);
|
||||
} STsdbAppH;
|
||||
|
||||
// --------- TSDB REPOSITORY CONFIGURATION DEFINITION
|
||||
|
@ -108,13 +109,14 @@ int tsdbTableSetSName(STableCfg *config, char *sname, bool dup);
|
|||
int tsdbTableSetStreamSql(STableCfg *config, char *sql, bool dup);
|
||||
void tsdbClearTableCfg(STableCfg *config);
|
||||
|
||||
int32_t tsdbGetTableTagVal(TsdbRepoT *repo, STableId *id, int32_t colId, int16_t *type, int16_t *bytes, char **val);
|
||||
char * tsdbGetTableName(TsdbRepoT *repo, const STableId *id, int16_t *bytes);
|
||||
void* tsdbGetTableTagVal(TsdbRepoT* repo, const STableId* id, int32_t colId, int16_t type, int16_t bytes);
|
||||
char* tsdbGetTableName(TsdbRepoT *repo, const STableId *id);
|
||||
STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg);
|
||||
|
||||
int tsdbCreateTable(TsdbRepoT *repo, STableCfg *pCfg);
|
||||
int tsdbDropTable(TsdbRepoT *pRepo, STableId tableId);
|
||||
int tsdbAlterTable(TsdbRepoT *repo, STableCfg *pCfg);
|
||||
int tsdbUpdateTagValue(TsdbRepoT *repo, SUpdateTableTagValMsg *pMsg);
|
||||
TSKEY tsdbGetTableLastKey(TsdbRepoT *repo, uint64_t uid);
|
||||
|
||||
uint32_t tsdbGetFileInfo(TsdbRepoT *repo, char *name, uint32_t *index, uint32_t eindex, int32_t *size);
|
||||
|
|
|
@ -275,22 +275,28 @@ void shellRunCommandOnServer(TAOS *con, char command[]) {
|
|||
|
||||
st = taosGetTimestampUs();
|
||||
|
||||
if (taos_query(con, command)) {
|
||||
taos_error(con);
|
||||
TAOS_RES* pSql = taos_query(con, command);
|
||||
if (taos_errno(pSql)) {
|
||||
taos_error(pSql);
|
||||
return;
|
||||
}
|
||||
|
||||
if (regex_match(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) {
|
||||
fprintf(stdout, "Database changed.\n\n");
|
||||
fflush(stdout);
|
||||
|
||||
taos_free_result(pSql);
|
||||
return;
|
||||
}
|
||||
|
||||
int num_fields = taos_field_count(con);
|
||||
int num_fields = taos_field_count(pSql);
|
||||
if (num_fields != 0) { // select and show kinds of commands
|
||||
int error_no = 0;
|
||||
int numOfRows = shellDumpResult(con, fname, &error_no, printMode);
|
||||
if (numOfRows < 0) return;
|
||||
int numOfRows = shellDumpResult(pSql, fname, &error_no, printMode);
|
||||
if (numOfRows < 0) {
|
||||
taos_free_result(pSql);
|
||||
return;
|
||||
}
|
||||
|
||||
et = taosGetTimestampUs();
|
||||
if (error_no == 0) {
|
||||
|
@ -299,7 +305,7 @@ void shellRunCommandOnServer(TAOS *con, char command[]) {
|
|||
printf("Query interrupted (%s), %d row(s) in set (%.6fs)\n", taos_errstr(con), numOfRows, (et - st) / 1E6);
|
||||
}
|
||||
} else {
|
||||
int num_rows_affacted = taos_affected_rows(con);
|
||||
int num_rows_affacted = taos_affected_rows(pSql);
|
||||
et = taosGetTimestampUs();
|
||||
printf("Query OK, %d row(s) affected (%.6fs)\n", num_rows_affacted, (et - st) / 1E6);
|
||||
}
|
||||
|
@ -309,6 +315,8 @@ void shellRunCommandOnServer(TAOS *con, char command[]) {
|
|||
if (fname != NULL) {
|
||||
wordfree(&full_path);
|
||||
}
|
||||
|
||||
taos_free_result(pSql);
|
||||
}
|
||||
|
||||
/* Function to do regular expression check */
|
||||
|
@ -461,6 +469,7 @@ static int dumpResultToFile(const char* fname, TAOS_RES* result) {
|
|||
} while( row != NULL);
|
||||
|
||||
fclose(fp);
|
||||
taos_free_result(result);
|
||||
return numOfRows;
|
||||
}
|
||||
|
||||
|
@ -548,15 +557,15 @@ static void printField(const char* val, TAOS_FIELD* field, int width, int32_t le
|
|||
}
|
||||
|
||||
|
||||
static int verticalPrintResult(TAOS_RES* result) {
|
||||
TAOS_ROW row = taos_fetch_row(result);
|
||||
static int verticalPrintResult(TAOS_RES* tres) {
|
||||
TAOS_ROW row = taos_fetch_row(tres);
|
||||
if (row == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int num_fields = taos_num_fields(result);
|
||||
TAOS_FIELD *fields = taos_fetch_fields(result);
|
||||
int precision = taos_result_precision(result);
|
||||
int num_fields = taos_num_fields(tres);
|
||||
TAOS_FIELD *fields = taos_fetch_fields(tres);
|
||||
int precision = taos_result_precision(tres);
|
||||
|
||||
int maxColNameLen = 0;
|
||||
for (int col = 0; col < num_fields; col++) {
|
||||
|
@ -569,7 +578,7 @@ static int verticalPrintResult(TAOS_RES* result) {
|
|||
int numOfRows = 0;
|
||||
do {
|
||||
printf("*************************** %d.row ***************************\n", numOfRows + 1);
|
||||
int32_t* length = taos_fetch_lengths(result);
|
||||
int32_t* length = taos_fetch_lengths(tres);
|
||||
for (int i = 0; i < num_fields; i++) {
|
||||
TAOS_FIELD* field = fields + i;
|
||||
|
||||
|
@ -581,7 +590,7 @@ static int verticalPrintResult(TAOS_RES* result) {
|
|||
}
|
||||
|
||||
numOfRows++;
|
||||
row = taos_fetch_row(result);
|
||||
row = taos_fetch_row(tres);
|
||||
} while(row != NULL);
|
||||
|
||||
return numOfRows;
|
||||
|
@ -656,15 +665,15 @@ static void printHeader(TAOS_FIELD* fields, int* width, int num_fields) {
|
|||
}
|
||||
|
||||
|
||||
static int horizontalPrintResult(TAOS_RES* result) {
|
||||
TAOS_ROW row = taos_fetch_row(result);
|
||||
static int horizontalPrintResult(TAOS_RES* tres) {
|
||||
TAOS_ROW row = taos_fetch_row(tres);
|
||||
if (row == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int num_fields = taos_num_fields(result);
|
||||
TAOS_FIELD *fields = taos_fetch_fields(result);
|
||||
int precision = taos_result_precision(result);
|
||||
int num_fields = taos_num_fields(tres);
|
||||
TAOS_FIELD *fields = taos_fetch_fields(tres);
|
||||
int precision = taos_result_precision(tres);
|
||||
|
||||
int width[TSDB_MAX_COLUMNS];
|
||||
for (int col = 0; col < num_fields; col++) {
|
||||
|
@ -675,7 +684,7 @@ static int horizontalPrintResult(TAOS_RES* result) {
|
|||
|
||||
int numOfRows = 0;
|
||||
do {
|
||||
int32_t* length = taos_fetch_lengths(result);
|
||||
int32_t* length = taos_fetch_lengths(tres);
|
||||
for (int i = 0; i < num_fields; i++) {
|
||||
putchar(' ');
|
||||
printField(row[i], fields + i, width[i], length[i], precision);
|
||||
|
@ -684,32 +693,24 @@ static int horizontalPrintResult(TAOS_RES* result) {
|
|||
}
|
||||
putchar('\n');
|
||||
numOfRows++;
|
||||
row = taos_fetch_row(result);
|
||||
row = taos_fetch_row(tres);
|
||||
} while(row != NULL);
|
||||
|
||||
return numOfRows;
|
||||
}
|
||||
|
||||
|
||||
int shellDumpResult(TAOS *con, char *fname, int *error_no, bool vertical) {
|
||||
int shellDumpResult(TAOS_RES *tres, char *fname, int *error_no, bool vertical) {
|
||||
int numOfRows = 0;
|
||||
|
||||
TAOS_RES* result = taos_use_result(con);
|
||||
if (result == NULL) {
|
||||
taos_error(con);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (fname != NULL) {
|
||||
numOfRows = dumpResultToFile(fname, result);
|
||||
numOfRows = dumpResultToFile(fname, tres);
|
||||
} else if(vertical) {
|
||||
numOfRows = verticalPrintResult(result);
|
||||
numOfRows = verticalPrintResult(tres);
|
||||
} else {
|
||||
numOfRows = horizontalPrintResult(result);
|
||||
numOfRows = horizontalPrintResult(tres);
|
||||
}
|
||||
|
||||
*error_no = taos_errno(con);
|
||||
taos_free_result(result);
|
||||
*error_no = taos_errno(tres);
|
||||
return numOfRows;
|
||||
}
|
||||
|
||||
|
@ -771,12 +772,11 @@ void write_history() {
|
|||
fclose(f);
|
||||
}
|
||||
|
||||
void taos_error(TAOS *con) {
|
||||
fprintf(stderr, "\nDB error: %s\n", taos_errstr(con));
|
||||
void taos_error(TAOS_RES *tres) {
|
||||
fprintf(stderr, "\nDB error: %s\n", taos_errstr(tres));
|
||||
|
||||
/* free local resouce: allocated memory/metric-meta refcnt */
|
||||
TAOS_RES *pRes = taos_use_result(con);
|
||||
taos_free_result(pRes);
|
||||
taos_free_result(tres);
|
||||
}
|
||||
|
||||
int isCommentLine(char *line) {
|
||||
|
@ -858,7 +858,8 @@ void shellGetGrantInfo(void *con) {
|
|||
|
||||
char sql[] = "show grants";
|
||||
|
||||
int code = taos_query(con, sql);
|
||||
TAOS_RES* pSql = taos_query(con, sql);
|
||||
int code = taos_errno(pSql);
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
if (code == TSDB_CODE_OPS_NOT_SUPPORT) {
|
||||
|
@ -869,12 +870,11 @@ void shellGetGrantInfo(void *con) {
|
|||
return;
|
||||
}
|
||||
|
||||
int num_fields = taos_field_count(con);
|
||||
int num_fields = taos_field_count(result);
|
||||
if (num_fields == 0) {
|
||||
fprintf(stderr, "\nInvalid grant information.\n");
|
||||
exit(0);
|
||||
} else {
|
||||
result = taos_use_result(con);
|
||||
if (result == NULL) {
|
||||
fprintf(stderr, "\nGrant information is null.\n");
|
||||
exit(0);
|
||||
|
|
|
@ -192,11 +192,14 @@ static void shellSourceFile(TAOS *con, char *fptr) {
|
|||
}
|
||||
|
||||
memcpy(cmd + cmd_len, line, read_len);
|
||||
if (taos_query(con, cmd)) {
|
||||
|
||||
TAOS_RES* pSql = taos_query(con, cmd);
|
||||
int32_t code = taos_errno(pSql);
|
||||
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "DB error: %s: %s (%d)\n", taos_errstr(con), fname, lineNo);
|
||||
/* free local resouce: allocated memory/metric-meta refcnt */
|
||||
TAOS_RES *pRes = taos_use_result(con);
|
||||
taos_free_result(pRes);
|
||||
taos_free_result(pSql);
|
||||
}
|
||||
|
||||
memset(cmd, 0, MAX_COMMAND_SIZE);
|
||||
|
|
|
@ -18,21 +18,21 @@
|
|||
#include "tsclient.h"
|
||||
#include "tutil.h"
|
||||
|
||||
TAOS* con;
|
||||
TAOS_RES* con;
|
||||
pthread_t pid;
|
||||
|
||||
// TODO: IMPLEMENT INTERRUPT HANDLER.
|
||||
void interruptHandler(int signum) {
|
||||
#ifdef LINUX
|
||||
TAOS_RES* res = taos_use_result(con);
|
||||
taos_stop_query(res);
|
||||
if (res != NULL) {
|
||||
taos_stop_query(con);
|
||||
if (con != NULL) {
|
||||
/*
|
||||
* we need to free result in async model, in order to avoid free
|
||||
* results while the master thread is waiting for server response.
|
||||
*/
|
||||
tscQueueAsyncFreeResult(res);
|
||||
tscQueueAsyncFreeResult(con);
|
||||
}
|
||||
|
||||
result = NULL;
|
||||
#else
|
||||
printf("\nReceive ctrl+c or other signal, quit shell.\n");
|
||||
|
@ -90,7 +90,6 @@ int main(int argc, char* argv[]) {
|
|||
/* Initialize the shell */
|
||||
con = shellInit(&args);
|
||||
if (con == NULL) {
|
||||
taos_error(con);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
|
|
|
@ -461,8 +461,7 @@ int main(int argc, char *argv[]) {
|
|||
taos_init();
|
||||
TAOS *taos = taos_connect(ip_addr, user, pass, NULL, port);
|
||||
if (taos == NULL) {
|
||||
fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(taos));
|
||||
taos_close(taos);
|
||||
fprintf(stderr, "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL));
|
||||
return 1;
|
||||
}
|
||||
char command[BUFFER_SIZE] = "\0";
|
||||
|
@ -708,27 +707,24 @@ void *readTable(void *sarg) {
|
|||
sprintf(command, "select %s from %s%d where ts>= %" PRId64, aggreFunc[j], tb_prefix, i, sTime);
|
||||
|
||||
double t = getCurrentTime();
|
||||
if (taos_query(taos, command) != 0) {
|
||||
fprintf(stderr, "Failed to query\n");
|
||||
TAOS_RES *pSql = taos_query(taos, command);
|
||||
int32_t code = taos_errno(pSql);
|
||||
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "Failed to query:%s\n", taos_errstr(taos));
|
||||
taos_free_result(pSql);
|
||||
taos_close(taos);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
TAOS_RES *result = taos_use_result(taos);
|
||||
if (result == NULL) {
|
||||
fprintf(stderr, "Failed to retreive results:%s\n", taos_errstr(taos));
|
||||
taos_close(taos);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
while (taos_fetch_row(result) != NULL) {
|
||||
while (taos_fetch_row(pSql) != NULL) {
|
||||
count++;
|
||||
}
|
||||
|
||||
t = getCurrentTime() - t;
|
||||
totalT += t;
|
||||
|
||||
taos_free_result(result);
|
||||
taos_free_result(pSql);
|
||||
}
|
||||
|
||||
fprintf(fp, "|%10s | %10d | %12.2f | %10.2f |\n",
|
||||
|
@ -779,20 +775,18 @@ void *readMetric(void *sarg) {
|
|||
fprintf(fp, "%s\n", command);
|
||||
|
||||
double t = getCurrentTime();
|
||||
if (taos_query(taos, command) != 0) {
|
||||
fprintf(stderr, "Failed to query\n");
|
||||
taos_close(taos);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
TAOS_RES *result = taos_use_result(taos);
|
||||
if (result == NULL) {
|
||||
fprintf(stderr, "Failed to retreive results:%s\n", taos_errstr(taos));
|
||||
TAOS_RES *pSql = taos_query(taos, command);
|
||||
int32_t code = taos_errno(pSql);
|
||||
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "Failed to query:%s\n", taos_errstr(taos));
|
||||
taos_free_result(pSql);
|
||||
taos_close(taos);
|
||||
exit(1);
|
||||
}
|
||||
int count = 0;
|
||||
while (taos_fetch_row(result) != NULL) {
|
||||
while (taos_fetch_row(pSql) != NULL) {
|
||||
count++;
|
||||
}
|
||||
t = getCurrentTime() - t;
|
||||
|
@ -800,7 +794,7 @@ void *readMetric(void *sarg) {
|
|||
fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", num_of_tables * num_of_DPT / t, t * 1000);
|
||||
printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t);
|
||||
|
||||
taos_free_result(result);
|
||||
taos_free_result(pSql);
|
||||
}
|
||||
fprintf(fp, "\n");
|
||||
}
|
||||
|
@ -811,10 +805,19 @@ void *readMetric(void *sarg) {
|
|||
|
||||
void queryDB(TAOS *taos, char *command) {
|
||||
int i = 5;
|
||||
while (i > 0) {
|
||||
if (taos_query(taos, command) == 0) break;
|
||||
TAOS_RES *pSql = NULL;
|
||||
int32_t code = -1;
|
||||
while (i > 0 && code != 0) {
|
||||
pSql = taos_query(taos, command);
|
||||
code = taos_errno(pSql);
|
||||
taos_free_result(pSql);
|
||||
pSql = NULL;
|
||||
if (code == 0) {
|
||||
break;
|
||||
}
|
||||
i--;
|
||||
}
|
||||
|
||||
if (i == 0) {
|
||||
fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(taos));
|
||||
taos_close(taos);
|
||||
|
@ -947,6 +950,7 @@ void callBack(void *param, TAOS_RES *res, int code) {
|
|||
break;
|
||||
}
|
||||
}
|
||||
tb_info->timestamp = tmp_time;
|
||||
|
||||
taos_query_a(tb_info->taos, buffer, callBack, tb_info);
|
||||
|
||||
|
|
|
@ -372,14 +372,12 @@ int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo) {
|
|||
memset(pTableRecordInfo, 0, sizeof(STableRecordInfo));
|
||||
|
||||
sprintf(command, "show tables like %s", table);
|
||||
if (taos_query(taos, command) != 0) {
|
||||
fprintf(stderr, "failed to run command %s\n", command);
|
||||
return -1;
|
||||
}
|
||||
TAOS_RES *result = taos_query(taos, command);\
|
||||
int32_t code = taos_errno(result);
|
||||
|
||||
result = taos_use_result(taos);
|
||||
if (result == NULL) {
|
||||
fprintf(stderr, "failed to use result\n");
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "failed to run command %s, error: %s\n", command, taos_errstr(result));
|
||||
taos_free_result(result);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -400,14 +398,12 @@ int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo) {
|
|||
if (isSet) return 0;
|
||||
|
||||
sprintf(command, "show stables like %s", table);
|
||||
if (taos_query(taos, command) != 0) {
|
||||
fprintf(stderr, "failed to run command %s\n", command);
|
||||
return -1;
|
||||
}
|
||||
|
||||
result = taos_use_result(taos);
|
||||
if (result == NULL) {
|
||||
fprintf(stderr, "failed to use result\n");
|
||||
result = taos_query(taos, command);
|
||||
code = taos_errno(result);
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "failed to run command %s, error: %s\n", command, taos_errstr(result));
|
||||
taos_free_result(result);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -467,14 +463,11 @@ int taosDumpOut(SDumpArguments *arguments) {
|
|||
taosDumpCharset(fp);
|
||||
|
||||
sprintf(command, "show databases");
|
||||
if (taos_query(taos, command) != 0) {
|
||||
result = taos_query(taos, command);
|
||||
int32_t code = taos_errno(result);
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(taos));
|
||||
goto _exit_failure;
|
||||
}
|
||||
|
||||
result = taos_use_result(taos);
|
||||
if (result == NULL) {
|
||||
fprintf(stderr, "failed to use result\n");
|
||||
taos_free_result(result);
|
||||
goto _exit_failure;
|
||||
}
|
||||
|
||||
|
@ -551,7 +544,7 @@ int taosDumpOut(SDumpArguments *arguments) {
|
|||
taosDumpCreateDbClause(dbInfos[0], arguments->with_property, fp);
|
||||
|
||||
sprintf(command, "use %s", dbInfos[0]->name);
|
||||
if (taos_query(taos, command) != 0) {
|
||||
if (taos_query(taos, command) == NULL ) {
|
||||
fprintf(stderr, "invalid database %s\n", dbInfos[0]->name);
|
||||
goto _exit_failure;
|
||||
}
|
||||
|
@ -612,7 +605,7 @@ int taosDumpDb(SDbInfo *dbInfo, SDumpArguments *arguments, FILE *fp) {
|
|||
taosDumpCreateDbClause(dbInfo, arguments->with_property, fp);
|
||||
|
||||
sprintf(command, "use %s", dbInfo->name);
|
||||
if (taos_query(taos, command) != 0) {
|
||||
if (taos_errno(taos_query(taos, command)) != 0) {
|
||||
fprintf(stderr, "invalid database %s\n", dbInfo->name);
|
||||
return -1;
|
||||
}
|
||||
|
@ -620,14 +613,11 @@ int taosDumpDb(SDbInfo *dbInfo, SDumpArguments *arguments, FILE *fp) {
|
|||
fprintf(fp, "USE %s\n\n", dbInfo->name);
|
||||
|
||||
sprintf(command, "show tables");
|
||||
if (taos_query(taos, command) != 0) {
|
||||
fprintf(stderr, "failed to run command %s\n", command);
|
||||
return -1;
|
||||
}
|
||||
|
||||
result = taos_use_result(taos);
|
||||
if (result == NULL) {
|
||||
fprintf(stderr, "failed to use result\n");
|
||||
result = taos_query(taos,command);
|
||||
int32_t code = taos_errno(result);
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "failed to run command %s, error: %s\n", command, taos_errstr(result));
|
||||
taos_free_result(result);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -725,14 +715,11 @@ void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols
|
|||
TAOS_ROW row = NULL;
|
||||
|
||||
sprintf(command, "select %s from %s limit 1", tableDes->cols[counter].field, tableDes->name);
|
||||
if (taos_query(taos, command) != 0) {
|
||||
fprintf(stderr, "failed to run command %s\n", command);
|
||||
return;
|
||||
}
|
||||
|
||||
result = taos_use_result(taos);
|
||||
if (result == NULL) {
|
||||
fprintf(stderr, "failed to use result\n");
|
||||
result = taos_query(taos, command);
|
||||
int32_t code = taos_errno(result);
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "failed to run command %s, error: %s\n", command, taos_errstr(result));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -806,14 +793,12 @@ int taosGetTableDes(char *table, STableDef *tableDes) {
|
|||
int count = 0;
|
||||
|
||||
sprintf(command, "describe %s", table);
|
||||
if (taos_query(taos, command) != 0) {
|
||||
fprintf(stderr, "failed to run command %s\n", command);
|
||||
return -1;
|
||||
}
|
||||
|
||||
result = taos_use_result(taos);
|
||||
if (result == NULL) {
|
||||
fprintf(stderr, "failed to use result\n");
|
||||
result = taos_query(taos, command);
|
||||
int32_t code = taos_errno(result);
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "failed to run command %s, error: %s\n", command, taos_errstr(result));
|
||||
taos_free_result(result);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -889,14 +874,11 @@ int32_t taosDumpMetric(char *metric, SDumpArguments *arguments, FILE *fp) {
|
|||
strcpy(tableRecord.metric, metric);
|
||||
|
||||
sprintf(command, "select tbname from %s", metric);
|
||||
if (taos_query(taos, command) != 0) {
|
||||
fprintf(stderr, "failed to run command %s\n", command);
|
||||
return -1;
|
||||
}
|
||||
|
||||
result = taos_use_result(taos);
|
||||
if (result == NULL) {
|
||||
fprintf(stderr, "failed to use result\n");
|
||||
result = taos_query(taos, command);
|
||||
int32_t code = taos_errno(result);
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "failed to run command %s, error: %s\n", command, taos_errstr(result));
|
||||
taos_free_result(result);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -942,18 +924,16 @@ int taosDumpTableData(FILE *fp, char *tbname, SDumpArguments *arguments) {
|
|||
|
||||
sprintf(command, "select * from %s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc", tbname, arguments->start_time,
|
||||
arguments->end_time);
|
||||
if (taos_query(taos, command) != 0) {
|
||||
fprintf(stderr, "failed to run command %s, reason: %s\n", command, taos_errstr(taos));
|
||||
|
||||
result = taos_query(taos, command);
|
||||
int32_t code = taos_errno(result);
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "failed to run command %s, reason: %s\n", command, taos_errstr(result));
|
||||
taos_free_result(result);
|
||||
return -1;
|
||||
}
|
||||
|
||||
result = taos_use_result(taos);
|
||||
if (result == NULL) {
|
||||
fprintf(stderr, "failed to use result\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
numFields = taos_field_count(taos);
|
||||
numFields = taos_field_count(result);
|
||||
assert(numFields > 0);
|
||||
TAOS_FIELD *fields = taos_fetch_fields(result);
|
||||
tbuf = (char *)malloc(COMMAND_SIZE);
|
||||
|
@ -1194,7 +1174,7 @@ int taosDumpIn(SDumpArguments *arguments) {
|
|||
tcommand = command;
|
||||
}
|
||||
taosReplaceCtrlChar(tcommand);
|
||||
if (taos_query(taos, tcommand) != 0)
|
||||
if (taos_query(taos, tcommand) == NULL)
|
||||
fprintf(stderr, "linenu: %" PRId64 " failed to run command %s reason:%s \ncontinue...\n", linenu, command,
|
||||
taos_errstr(taos));
|
||||
|
||||
|
@ -1242,9 +1222,14 @@ int taosDumpIn(SDumpArguments *arguments) {
|
|||
tcommand = command;
|
||||
}
|
||||
taosReplaceCtrlChar(tcommand);
|
||||
if (taos_query(taos, tcommand) != 0)
|
||||
result = taos_query(taos, tcommand);
|
||||
int32_t code = taos_errno(result);
|
||||
if (code != 0)
|
||||
{
|
||||
fprintf(stderr, "linenu:%" PRId64 " failed to run command %s reason: %s \ncontinue...\n", linenu, command,
|
||||
taos_errstr(taos));
|
||||
}
|
||||
taos_free_result(result);
|
||||
}
|
||||
|
||||
pstr = command;
|
||||
|
@ -1265,7 +1250,7 @@ int taosDumpIn(SDumpArguments *arguments) {
|
|||
tcommand = command;
|
||||
}
|
||||
taosReplaceCtrlChar(lcommand);
|
||||
if (taos_query(taos, tcommand) != 0)
|
||||
if (taos_query(taos, tcommand) == NULL)
|
||||
fprintf(stderr, "linenu:%" PRId64 " failed to run command %s reason:%s \ncontinue...\n", linenu, command,
|
||||
taos_errstr(taos));
|
||||
}
|
||||
|
|
|
@ -0,0 +1,136 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <arpa/inet.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <netdb.h>
|
||||
#include <netinet/in.h>
|
||||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#define BUFFER_SIZE 200
|
||||
|
||||
typedef struct {
|
||||
int port;
|
||||
char *host[15];
|
||||
} info;
|
||||
|
||||
void *checkPort(void *sarg) {
|
||||
info *pinfo = (info *)sarg;
|
||||
int port = pinfo->port;
|
||||
char *host = *pinfo->host;
|
||||
int clientSocket;
|
||||
|
||||
struct sockaddr_in serverAddr;
|
||||
char sendbuf[BUFFER_SIZE];
|
||||
char recvbuf[BUFFER_SIZE];
|
||||
int iDataNum;
|
||||
if ((clientSocket = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
|
||||
perror("socket");
|
||||
return NULL;
|
||||
}
|
||||
serverAddr.sin_family = AF_INET;
|
||||
serverAddr.sin_port = htons(port);
|
||||
|
||||
serverAddr.sin_addr.s_addr = inet_addr(host);
|
||||
|
||||
printf("=================================\n");
|
||||
if (connect(clientSocket, (struct sockaddr *)&serverAddr, sizeof(serverAddr)) < 0) {
|
||||
perror("connect");
|
||||
return NULL;
|
||||
}
|
||||
printf("Connect to: %s:%d...success\n", host, port);
|
||||
|
||||
sprintf(sendbuf, "send port_%d", port);
|
||||
send(clientSocket, sendbuf, strlen(sendbuf), 0);
|
||||
printf("Send msg_%d: %s\n", port, sendbuf);
|
||||
|
||||
recvbuf[0] = '\0';
|
||||
iDataNum = recv(clientSocket, recvbuf, BUFFER_SIZE, 0);
|
||||
recvbuf[iDataNum] = '\0';
|
||||
printf("Read ack msg_%d: %s\n", port, recvbuf);
|
||||
|
||||
printf("=================================\n");
|
||||
close(clientSocket);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *checkUPort(void *sarg) {
|
||||
info *pinfo = (info *)sarg;
|
||||
int port = pinfo->port;
|
||||
char *host = *pinfo->host;
|
||||
int clientSocket;
|
||||
|
||||
struct sockaddr_in serverAddr;
|
||||
char sendbuf[BUFFER_SIZE];
|
||||
char recvbuf[BUFFER_SIZE];
|
||||
int iDataNum;
|
||||
if ((clientSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) {
|
||||
perror("socket");
|
||||
return NULL;
|
||||
}
|
||||
serverAddr.sin_family = AF_INET;
|
||||
serverAddr.sin_port = htons(port);
|
||||
|
||||
serverAddr.sin_addr.s_addr = inet_addr(host);
|
||||
|
||||
printf("=================================\n");
|
||||
|
||||
sprintf(sendbuf, "send msg port_%d by udp", port);
|
||||
|
||||
socklen_t sin_size = sizeof(*(struct sockaddr*)&serverAddr);
|
||||
|
||||
sendto(clientSocket, sendbuf, strlen(sendbuf), 0, (struct sockaddr *)&serverAddr, (int)sin_size);
|
||||
|
||||
printf("Send msg_%d by udp: %s\n", port, sendbuf);
|
||||
|
||||
recvbuf[0] = '\0';
|
||||
iDataNum = recvfrom(clientSocket, recvbuf, BUFFER_SIZE, 0, (struct sockaddr *)&serverAddr, &sin_size);
|
||||
recvbuf[iDataNum] = '\0';
|
||||
printf("Read ack msg_%d from udp: %s\n", port, recvbuf);
|
||||
|
||||
printf("=================================\n");
|
||||
close(clientSocket);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int main() {
|
||||
int port = 6020;
|
||||
char *host = "127.0.0.1";
|
||||
info *tinfo = malloc(sizeof(info));
|
||||
info *uinfo = malloc(sizeof(info));
|
||||
|
||||
for (size_t i = 0; i < 30; i++) {
|
||||
port++;
|
||||
printf("For test: %s:%d\n", host, port);
|
||||
|
||||
*tinfo->host = host;
|
||||
tinfo->port = port;
|
||||
checkPort(tinfo);
|
||||
|
||||
*uinfo->host = host;
|
||||
uinfo->port = port;
|
||||
checkUPort(uinfo);
|
||||
}
|
||||
free(tinfo);
|
||||
free(uinfo);
|
||||
}
|
|
@ -0,0 +1,204 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <arpa/inet.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <netdb.h>
|
||||
#include <netinet/in.h>
|
||||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#define BUFFER_SIZE 200
|
||||
|
||||
typedef struct {
|
||||
int port;
|
||||
int type; // 0: tcp, 1: udo, default: 0
|
||||
} info;
|
||||
|
||||
static void *bindPort(void *sarg) {
|
||||
info *pinfo = (info *)sarg;
|
||||
int port = pinfo->port;
|
||||
int type = pinfo->type;
|
||||
int serverSocket;
|
||||
|
||||
struct sockaddr_in server_addr;
|
||||
struct sockaddr_in clientAddr;
|
||||
int addr_len = sizeof(clientAddr);
|
||||
int client;
|
||||
char buffer[BUFFER_SIZE];
|
||||
int iDataNum;
|
||||
|
||||
if ((serverSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) {
|
||||
perror("socket");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bzero(&server_addr, sizeof(server_addr));
|
||||
server_addr.sin_family = AF_INET;
|
||||
server_addr.sin_port = htons(port);
|
||||
server_addr.sin_addr.s_addr = htonl(INADDR_ANY);
|
||||
|
||||
if (bind(serverSocket, (struct sockaddr *)&server_addr, sizeof(server_addr)) < 0) {
|
||||
perror("connect");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (listen(serverSocket, 5) < 0) {
|
||||
perror("listen");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
printf("Bind port: %d success\n", port);
|
||||
while (1) {
|
||||
client = accept(serverSocket, (struct sockaddr *)&clientAddr, (socklen_t *)&addr_len);
|
||||
if (client < 0) {
|
||||
perror("accept");
|
||||
continue;
|
||||
}
|
||||
printf("=================================\n");
|
||||
|
||||
printf("Client ip is %s, Server port is %d\n", inet_ntoa(clientAddr.sin_addr), port);
|
||||
while (1) {
|
||||
buffer[0] = '\0';
|
||||
iDataNum = recv(client, buffer, BUFFER_SIZE, 0);
|
||||
|
||||
if (iDataNum < 0) {
|
||||
perror("recv null");
|
||||
continue;
|
||||
}
|
||||
if (iDataNum > 0) {
|
||||
buffer[iDataNum] = '\0';
|
||||
printf("read msg:%s\n", buffer);
|
||||
if (strcmp(buffer, "quit") == 0) break;
|
||||
buffer[0] = '\0';
|
||||
|
||||
sprintf(buffer, "ack port_%d", port);
|
||||
printf("send ack msg:%s\n", buffer);
|
||||
|
||||
send(client, buffer, strlen(buffer), 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
printf("=================================\n");
|
||||
}
|
||||
close(serverSocket);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *bindUPort(void *sarg) {
|
||||
info *pinfo = (info *)sarg;
|
||||
int port = pinfo->port;
|
||||
int type = pinfo->type;
|
||||
int serverSocket;
|
||||
|
||||
struct sockaddr_in server_addr;
|
||||
struct sockaddr_in clientAddr;
|
||||
int addr_len = sizeof(clientAddr);
|
||||
int client;
|
||||
char buffer[BUFFER_SIZE];
|
||||
int iDataNum;
|
||||
|
||||
if ((serverSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) {
|
||||
perror("socket");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bzero(&server_addr, sizeof(server_addr));
|
||||
server_addr.sin_family = AF_INET;
|
||||
server_addr.sin_port = htons(port);
|
||||
server_addr.sin_addr.s_addr = htonl(INADDR_ANY);
|
||||
|
||||
if (bind(serverSocket, (struct sockaddr *)&server_addr, sizeof(server_addr)) < 0) {
|
||||
perror("connect");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
socklen_t sin_size;
|
||||
printf("Bind port: %d success\n", port);
|
||||
|
||||
while (1) {
|
||||
buffer[0] = '\0';
|
||||
|
||||
sin_size = sizeof(*(struct sockaddr *)&server_addr);
|
||||
|
||||
iDataNum = recvfrom(serverSocket, buffer, BUFFER_SIZE, 0, (struct sockaddr *)&clientAddr, &sin_size);
|
||||
|
||||
if (iDataNum < 0) {
|
||||
perror("recvfrom null");
|
||||
continue;
|
||||
}
|
||||
if (iDataNum > 0) {
|
||||
printf("=================================\n");
|
||||
|
||||
printf("Client ip is %s, Server port is %d\n", inet_ntoa(clientAddr.sin_addr), port);
|
||||
buffer[iDataNum] = '\0';
|
||||
printf("Read msg from udp:%s\n", buffer);
|
||||
if (strcmp(buffer, "quit") == 0) break;
|
||||
buffer[0] = '\0';
|
||||
|
||||
sprintf(buffer, "ack port_%d by udp", port);
|
||||
printf("Send ack msg by udp:%s\n", buffer);
|
||||
|
||||
sendto(serverSocket, buffer, strlen(buffer), 0, (struct sockaddr *)&clientAddr, (int)sin_size);
|
||||
|
||||
send(client, buffer, strlen(buffer), 0);
|
||||
printf("=================================\n");
|
||||
}
|
||||
}
|
||||
|
||||
close(serverSocket);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
int main() {
|
||||
int port = 6020;
|
||||
pthread_t *pids = malloc(60 * sizeof(pthread_t));
|
||||
info * infos = malloc(30 * sizeof(info));
|
||||
info * uinfos = malloc(30 * sizeof(info));
|
||||
|
||||
for (size_t i = 0; i < 30; i++) {
|
||||
port++;
|
||||
|
||||
info *pinfo = infos++;
|
||||
pinfo->port = port;
|
||||
|
||||
if (pthread_create(pids + i, NULL, bindPort, pinfo) != 0) //创建线程
|
||||
{ //创建线程失败
|
||||
printf("创建线程失败: %d.\n", port);
|
||||
exit(0);
|
||||
}
|
||||
|
||||
info *uinfo = uinfos++;
|
||||
uinfo->port = port;
|
||||
uinfo->type = 1;
|
||||
if (pthread_create(pids + 30 + i, NULL, bindUPort, uinfo) != 0) //创建线程
|
||||
{ //创建线程失败
|
||||
printf("创建线程失败: %d.\n", port);
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < 30; i++) {
|
||||
pthread_join(pids[i], NULL);
|
||||
pthread_join(pids[(10 + i)], NULL);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <netinet/in.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/socket.h>
|
||||
#include <unistd.h>
|
||||
#define SERVER_PORT 8000
|
||||
#define SIZE 200
|
||||
|
||||
int main() {
|
||||
struct sockaddr_in servaddr, cliaddr;
|
||||
socklen_t cliaddr_len;
|
||||
int client_sockfd;
|
||||
char buf[SIZE];
|
||||
char recvbuf[SIZE];
|
||||
|
||||
int i, n, flag = 0;
|
||||
|
||||
int len, iDataNum;
|
||||
|
||||
client_sockfd = socket(AF_INET, SOCK_STREAM, 0);
|
||||
bzero(&servaddr, sizeof(servaddr));
|
||||
servaddr.sin_family = AF_INET;
|
||||
servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
|
||||
servaddr.sin_port = htons(SERVER_PORT);
|
||||
|
||||
if (connect(client_sockfd, (struct sockaddr *)&servaddr, sizeof(servaddr)) < 0) {
|
||||
printf("Connected error..\n");
|
||||
return 0;
|
||||
}
|
||||
printf("Connected to server..\n");
|
||||
|
||||
/*循环的发送接收信息并打印接收信息(可以按需发送)--recv返回接收到的字节数,send返回发送的字节数*/
|
||||
while (1) {
|
||||
printf("Enter string to send:");
|
||||
scanf("%s", buf);
|
||||
if (!strcmp(buf, "quit")) {
|
||||
break;
|
||||
}
|
||||
len = (sizeof buf);
|
||||
|
||||
recvbuf[0] = '\0';
|
||||
|
||||
iDataNum = recv(client_sockfd, recvbuf, SIZE, 0);
|
||||
|
||||
recvbuf[iDataNum] = '\0';
|
||||
|
||||
printf("%s\n", recvbuf);
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <netinet/in.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/socket.h>
|
||||
#include <unistd.h>
|
||||
#define SERVER_PORT 8000
|
||||
#define SIZE 200
|
||||
|
||||
int main() {
|
||||
struct sockaddr_in servaddr, cliaddr;
|
||||
socklen_t cliaddr_len;
|
||||
int listenfd, connfd;
|
||||
char buf[BUFSIZ];
|
||||
int i, n, flag = 0;
|
||||
|
||||
listenfd = socket(AF_INET, SOCK_STREAM, 0);
|
||||
bzero(&servaddr, sizeof(servaddr));
|
||||
servaddr.sin_family = AF_INET;
|
||||
servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
|
||||
servaddr.sin_port = htons(SERVER_PORT);
|
||||
bind(listenfd, (struct sockaddr *)&servaddr, sizeof(servaddr));
|
||||
listen(listenfd, 20);
|
||||
|
||||
printf("Accepting connections..\n");
|
||||
while (1) {
|
||||
cliaddr_len = sizeof(cliaddr);
|
||||
connfd = accept(listenfd, (struct sockaddr *)&cliaddr,
|
||||
&cliaddr_len); //如果得不到客户端发来的消息,将会被阻塞,一直等到消息到来
|
||||
n = read(connfd, buf, SIZE); //如果n<=0,表示客户端已断开
|
||||
while (1) {
|
||||
if (n != 0) {
|
||||
for (i = 0; i < n; i++) printf("%c", buf[i]); //输出客户端发来的信息
|
||||
} else {
|
||||
printf("Client say close the connection..\n");
|
||||
break;
|
||||
}
|
||||
n = read(connfd, buf, SIZE);
|
||||
}
|
||||
close(connfd);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
#include <netinet/in.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#define SERVER_PORT 8888
|
||||
#define BUFF_LEN 512
|
||||
#define SERVER_IP "172.0.5.182"
|
||||
|
||||
void udp_msg_sender(int fd, struct sockaddr* dst) {}
|
||||
|
||||
/*
|
||||
client:
|
||||
socket-->sendto-->revcfrom-->close
|
||||
*/
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
int client_fd;
|
||||
struct sockaddr_in ser_addr;
|
||||
|
||||
client_fd = socket(AF_INET, SOCK_DGRAM, 0);
|
||||
if (client_fd < 0) {
|
||||
printf("create socket fail!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
memset(&ser_addr, 0, sizeof(ser_addr));
|
||||
ser_addr.sin_family = AF_INET;
|
||||
// ser_addr.sin_addr.s_addr = inet_addr(SERVER_IP);
|
||||
ser_addr.sin_addr.s_addr = htonl(INADDR_ANY); //注意网络序转换
|
||||
ser_addr.sin_port = htons(SERVER_PORT); //注意网络序转换
|
||||
|
||||
socklen_t len;
|
||||
struct sockaddr_in src;
|
||||
while (1) {
|
||||
char buf[BUFF_LEN] = "TEST UDP MSG!\n";
|
||||
len = sizeof(*(struct sockaddr*)&ser_addr);
|
||||
printf("client:%s\n", buf); //打印自己发送的信息
|
||||
sendto(client_fd, buf, BUFF_LEN, 0, (struct sockaddr*)&ser_addr, len);
|
||||
memset(buf, 0, BUFF_LEN);
|
||||
recvfrom(client_fd, buf, BUFF_LEN, 0, (struct sockaddr*)&src, &len); //接收来自server的信息
|
||||
printf("server:%s\n", buf);
|
||||
sleep(1); //一秒发送一次消息
|
||||
}
|
||||
|
||||
close(client_fd);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
#include <netinet/in.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#define SERVER_PORT 8888
|
||||
#define BUFF_LEN 1024
|
||||
|
||||
void handle_udp_msg(int fd) {
|
||||
char buf[BUFF_LEN]; //接收缓冲区,1024字节
|
||||
socklen_t len;
|
||||
int count;
|
||||
struct sockaddr_in clent_addr; // clent_addr用于记录发送方的地址信息
|
||||
while (1) {
|
||||
memset(buf, 0, BUFF_LEN);
|
||||
len = sizeof(clent_addr);
|
||||
count =
|
||||
recvfrom(fd, buf, BUFF_LEN, 0, (struct sockaddr*)&clent_addr, &len); // recvfrom是拥塞函数,没有数据就一直拥塞
|
||||
if (count == -1) {
|
||||
printf("recieve data fail!\n");
|
||||
return;
|
||||
}
|
||||
printf("client:%s\n", buf); //打印client发过来的信息
|
||||
memset(buf, 0, BUFF_LEN);
|
||||
sprintf(buf, "I have recieved %d bytes data!\n", count); //回复client
|
||||
printf("server:%s\n", buf); //打印自己发送的信息给
|
||||
sendto(fd, buf, BUFF_LEN, 0, (struct sockaddr*)&clent_addr,
|
||||
len); //发送信息给client,注意使用了clent_addr结构体指针
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
server:
|
||||
socket-->bind-->recvfrom-->sendto-->close
|
||||
*/
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
int server_fd, ret;
|
||||
struct sockaddr_in ser_addr;
|
||||
|
||||
server_fd = socket(AF_INET, SOCK_DGRAM, 0); // AF_INET:IPV4;SOCK_DGRAM:UDP
|
||||
if (server_fd < 0) {
|
||||
printf("create socket fail!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
memset(&ser_addr, 0, sizeof(ser_addr));
|
||||
ser_addr.sin_family = AF_INET;
|
||||
ser_addr.sin_addr.s_addr = htonl(INADDR_ANY); // IP地址,需要进行网络序转换,INADDR_ANY:本地地址
|
||||
ser_addr.sin_port = htons(SERVER_PORT); //端口号,需要网络序转换
|
||||
|
||||
ret = bind(server_fd, (struct sockaddr*)&ser_addr, sizeof(ser_addr));
|
||||
if (ret < 0) {
|
||||
printf("socket bind fail!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
handle_udp_msg(server_fd); //处理接收到的数据
|
||||
|
||||
close(server_fd);
|
||||
return 0;
|
||||
}
|
|
@ -1352,11 +1352,14 @@ static void *mnodeBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SChildTableO
|
|||
int32_t tagDataLen = 0;
|
||||
int32_t totalCols = 0;
|
||||
int32_t contLen = 0;
|
||||
if (pTable->info.type == TSDB_CHILD_TABLE && pMsg != NULL) {
|
||||
pTagData = (STagData*)pMsg->schema;
|
||||
tagDataLen = ntohl(pTagData->dataLen);
|
||||
if (pTable->info.type == TSDB_CHILD_TABLE) {
|
||||
totalCols = pTable->superTable->numOfColumns + pTable->superTable->numOfTags;
|
||||
contLen = sizeof(SMDCreateTableMsg) + totalCols * sizeof(SSchema) + tagDataLen + pTable->sqlLen;
|
||||
if (pMsg != NULL) {
|
||||
pTagData = (STagData *)pMsg->schema;
|
||||
tagDataLen = ntohl(pTagData->dataLen);
|
||||
contLen += tagDataLen;
|
||||
}
|
||||
} else {
|
||||
totalCols = pTable->numOfColumns;
|
||||
contLen = sizeof(SMDCreateTableMsg) + totalCols * sizeof(SSchema) + pTable->sqlLen;
|
||||
|
@ -1410,7 +1413,7 @@ static void *mnodeBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SChildTableO
|
|||
memcpy(pCreate->data + totalCols * sizeof(SSchema), pTagData->data, tagDataLen);
|
||||
}
|
||||
|
||||
if (pTable->info.type == TSDB_STREAM_TABLE && pMsg != NULL) {
|
||||
if (pTable->info.type == TSDB_STREAM_TABLE) {
|
||||
memcpy(pCreate->data + totalCols * sizeof(SSchema), pTable->sql, pTable->sqlLen);
|
||||
}
|
||||
|
||||
|
|
|
@ -245,10 +245,10 @@ static void taosGetSystemLocale() { // get and set default locale
|
|||
strncpy(tsCharset, revisedCharset, tListLen(tsCharset));
|
||||
|
||||
free(revisedCharset);
|
||||
uError("charset not configured, set to system default:%s", tsCharset);
|
||||
uWarn("charset not configured, set to system default:%s", tsCharset);
|
||||
} else {
|
||||
strcpy(tsCharset, "UTF-8");
|
||||
uError("can't get locale and charset from system, set it to UTF-8");
|
||||
uWarn("can't get locale and charset from system, set it to UTF-8");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,10 +58,10 @@ void httpProcessMultiSqlRetrieveCallBack(void *param, TAOS_RES *result, int numO
|
|||
if (numOfRows < 0) {
|
||||
httpError("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, retrieve failed code:%s, sql:%s",
|
||||
pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, tstrerror(numOfRows), sql);
|
||||
} else {
|
||||
taos_free_result(result);
|
||||
}
|
||||
|
||||
taos_free_result(result);
|
||||
|
||||
if (singleCmd->cmdReturnType == HTTP_CMD_RETURN_TYPE_WITH_RETURN && encode->stopJsonFp) {
|
||||
(encode->stopJsonFp)(pContext, singleCmd);
|
||||
}
|
||||
|
@ -103,12 +103,15 @@ void httpProcessMultiSqlCallBack(void *param, TAOS_RES *result, int code) {
|
|||
}
|
||||
multiCmds->pos++;
|
||||
httpProcessMultiSql(pContext);
|
||||
|
||||
taos_free_result(result);
|
||||
return;
|
||||
}
|
||||
|
||||
if (result == NULL) {
|
||||
int num_fields = taos_field_count(result);
|
||||
if (num_fields == 0) {
|
||||
// not select or show commands
|
||||
int affectRows = code;
|
||||
int affectRows = taos_affected_rows(result);
|
||||
httpTrace("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, affect rows:%d, sql:%s",
|
||||
pContext, pContext->fd, pContext->ipstr, pContext->user, multiCmds->pos, affectRows, sql);
|
||||
|
||||
|
@ -132,6 +135,7 @@ void httpProcessMultiSqlCallBack(void *param, TAOS_RES *result, int code) {
|
|||
multiCmds->pos++;
|
||||
}
|
||||
|
||||
taos_free_result(result);
|
||||
httpProcessMultiSql(pContext);
|
||||
} else {
|
||||
httpTrace("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, start retrieve, sql:%s",
|
||||
|
@ -212,10 +216,10 @@ void httpProcessSingleSqlRetrieveCallBack(void *param, TAOS_RES *result, int num
|
|||
if (numOfRows < 0) {
|
||||
httpError("context:%p, fd:%d, ip:%s, user:%s, retrieve failed, code:%s", pContext, pContext->fd, pContext->ipstr,
|
||||
pContext->user, tstrerror(numOfRows));
|
||||
} else {
|
||||
taos_free_result(result);
|
||||
}
|
||||
|
||||
taos_free_result(result);
|
||||
|
||||
if (encode->stopJsonFp) {
|
||||
(encode->stopJsonFp)(pContext, &pContext->singleCmd);
|
||||
}
|
||||
|
@ -247,12 +251,14 @@ void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int code) {
|
|||
pContext, pContext->fd, pContext->ipstr, pContext->user, pContext->session->taos, tstrerror(code), pObj);
|
||||
httpSendTaosdErrorResp(pContext, code);
|
||||
}
|
||||
taos_free_result(result);
|
||||
return;
|
||||
}
|
||||
|
||||
if (result == NULL) {
|
||||
int num_fields = taos_field_count(result);
|
||||
if (num_fields == 0) {
|
||||
// not select or show commands
|
||||
int affectRows = code;
|
||||
int affectRows = taos_affected_rows(result);
|
||||
|
||||
httpTrace("context:%p, fd:%d, ip:%s, user:%s, affect rows:%d, stop query, sqlObj:%p",
|
||||
pContext, pContext->fd, pContext->ipstr, pContext->user, affectRows, result);
|
||||
|
@ -269,6 +275,7 @@ void httpProcessSingleSqlCallBack(void *param, TAOS_RES *result, int code) {
|
|||
(encode->stopJsonFp)(pContext, &pContext->singleCmd);
|
||||
}
|
||||
|
||||
taos_free_result(result);
|
||||
httpCloseContextByApp(pContext);
|
||||
} else {
|
||||
httpTrace("context:%p, fd:%d, ip:%s, user:%s, start retrieve", pContext, pContext->fd, pContext->ipstr,
|
||||
|
|
|
@ -116,11 +116,14 @@ typedef struct SQueryCostInfo {
|
|||
uint64_t loadDataInCacheSize;
|
||||
|
||||
uint64_t loadDataTime;
|
||||
uint64_t dataInRows;
|
||||
uint64_t checkRows;
|
||||
uint32_t dataBlocks;
|
||||
uint64_t totalRows;
|
||||
uint64_t totalCheckedRows;
|
||||
uint32_t totalBlocks;
|
||||
uint32_t loadBlocks;
|
||||
uint32_t loadBlockStatis;
|
||||
uint32_t discardBlocks;
|
||||
uint64_t elapsedTime;
|
||||
uint64_t computTime;
|
||||
} SQueryCostInfo;
|
||||
|
||||
typedef struct SGroupItem {
|
||||
|
@ -168,7 +171,7 @@ typedef struct SQueryRuntimeEnv {
|
|||
SWindowResInfo windowResInfo;
|
||||
STSBuf* pTSBuf;
|
||||
STSCursor cur;
|
||||
SQueryCostInfo summary;
|
||||
SQueryCostInfo summary;
|
||||
bool stableQuery; // super table query or not
|
||||
void* pQueryHandle;
|
||||
void* pSecQueryHandle; // another thread for
|
||||
|
@ -177,8 +180,6 @@ typedef struct SQueryRuntimeEnv {
|
|||
|
||||
typedef struct SQInfo {
|
||||
void* signature;
|
||||
TSKEY startTime;
|
||||
TSKEY elapsedTime;
|
||||
int32_t pointsInterpo;
|
||||
int32_t code; // error code to returned to client
|
||||
sem_t dataReady;
|
||||
|
|
|
@ -1183,6 +1183,7 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl
|
|||
|
||||
STableQueryInfo* pTableQInfo = pQuery->current;
|
||||
SWindowResInfo* pWindowResInfo = &pRuntimeEnv->windowResInfo;
|
||||
pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : pDataBlockInfo->rows - 1;
|
||||
|
||||
if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || isGroupbyNormalCol(pQuery->pGroupbyExpr)) {
|
||||
rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, pDataBlock);
|
||||
|
@ -1190,10 +1191,10 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl
|
|||
blockwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, searchFn, pDataBlock);
|
||||
}
|
||||
|
||||
// update the lastkey of current table
|
||||
TSKEY lastKey = QUERY_IS_ASC_QUERY(pQuery) ? pDataBlockInfo->window.ekey : pDataBlockInfo->window.skey;
|
||||
pTableQInfo->lastKey = lastKey + GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
|
||||
|
||||
|
||||
// interval query with limit applied
|
||||
int32_t numOfRes = 0;
|
||||
|
||||
|
@ -2013,7 +2014,7 @@ SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle,
|
|||
|
||||
if (*pStatis == NULL) { // data block statistics does not exist, load data block
|
||||
pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL);
|
||||
pRuntimeEnv->summary.checkRows += pBlockInfo->rows;
|
||||
pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows;
|
||||
}
|
||||
} else {
|
||||
assert(r == BLK_DATA_ALL_NEEDED);
|
||||
|
@ -2032,7 +2033,7 @@ SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle,
|
|||
// return DISK_DATA_DISCARDED;
|
||||
}
|
||||
|
||||
pRuntimeEnv->summary.checkRows += pBlockInfo->rows;
|
||||
pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows;
|
||||
pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL);
|
||||
}
|
||||
|
||||
|
@ -2149,7 +2150,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
|
|||
|
||||
TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle;
|
||||
while (tsdbNextDataBlock(pQueryHandle)) {
|
||||
pRuntimeEnv->summary.dataBlocks += 1;
|
||||
pRuntimeEnv->summary.totalBlocks += 1;
|
||||
if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -2185,12 +2186,10 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
|
|||
ensureOutputBuffer(pRuntimeEnv, &blockInfo);
|
||||
|
||||
SDataStatis *pStatis = NULL;
|
||||
pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : blockInfo.rows - 1;
|
||||
|
||||
SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis);
|
||||
int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, binarySearchForKey, pDataBlock);
|
||||
|
||||
pRuntimeEnv->summary.dataInRows += blockInfo.rows;
|
||||
pRuntimeEnv->summary.totalRows += blockInfo.rows;
|
||||
qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%"PRId64, GET_QINFO_ADDR(pRuntimeEnv),
|
||||
blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, numOfRes, pQuery->current->lastKey);
|
||||
|
||||
|
@ -2224,24 +2223,26 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
|
|||
* set tag value in SQLFunctionCtx
|
||||
* e.g.,tag information into input buffer
|
||||
*/
|
||||
static void doSetTagValueInParam(void *tsdb, STableId* pTableId, int32_t tagColId, tVariant *param) {
|
||||
tVariantDestroy(param);
|
||||
|
||||
char * val = NULL;
|
||||
int16_t bytes = 0;
|
||||
int16_t type = 0;
|
||||
static void doSetTagValueInParam(void *tsdb, STableId* pTableId, int32_t tagColId, tVariant *tag, int16_t type,
|
||||
int16_t bytes) {
|
||||
tVariantDestroy(tag);
|
||||
|
||||
if (tagColId == TSDB_TBNAME_COLUMN_INDEX) {
|
||||
val = tsdbGetTableName(tsdb, pTableId, &bytes);
|
||||
type = TSDB_DATA_TYPE_BINARY;
|
||||
tVariantCreateFromBinary(param, varDataVal(val), varDataLen(val), type);
|
||||
char* val = tsdbGetTableName(tsdb, pTableId);
|
||||
assert(val != NULL);
|
||||
|
||||
tVariantCreateFromBinary(tag, varDataVal(val), varDataLen(val), TSDB_DATA_TYPE_BINARY);
|
||||
} else {
|
||||
tsdbGetTableTagVal(tsdb, pTableId, tagColId, &type, &bytes, &val);
|
||||
char* val = tsdbGetTableTagVal(tsdb, pTableId, tagColId, type, bytes);
|
||||
if (val == NULL) {
|
||||
tag->nType = TSDB_DATA_TYPE_NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||
tVariantCreateFromBinary(param, varDataVal(val), varDataLen(val), type);
|
||||
tVariantCreateFromBinary(tag, varDataVal(val), varDataLen(val), type);
|
||||
} else {
|
||||
tVariantCreateFromBinary(param, val, bytes, type);
|
||||
tVariantCreateFromBinary(tag, val, bytes, type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2249,25 +2250,29 @@ static void doSetTagValueInParam(void *tsdb, STableId* pTableId, int32_t tagColI
|
|||
void setTagVal(SQueryRuntimeEnv *pRuntimeEnv, STableId* pTableId, void *tsdb) {
|
||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||
|
||||
SSqlFuncMsg *pFuncMsg = &pQuery->pSelectExpr[0].base;
|
||||
if (pQuery->numOfOutput == 1 && pFuncMsg->functionId == TSDB_FUNC_TS_COMP) {
|
||||
assert(pFuncMsg->numOfParams == 1);
|
||||
doSetTagValueInParam(tsdb, pTableId, pFuncMsg->arg->argValue.i64, &pRuntimeEnv->pCtx[0].tag);
|
||||
SExprInfo *pExprInfo = &pQuery->pSelectExpr[0];
|
||||
if (pQuery->numOfOutput == 1 && pExprInfo->base.functionId == TSDB_FUNC_TS_COMP) {
|
||||
|
||||
assert(pExprInfo->base.numOfParams == 1);
|
||||
doSetTagValueInParam(tsdb, pTableId, pExprInfo->base.arg->argValue.i64, &pRuntimeEnv->pCtx[0].tag,
|
||||
pExprInfo->type, pExprInfo->bytes);
|
||||
} else {
|
||||
// set tag value, by which the results are aggregated.
|
||||
for (int32_t idx = 0; idx < pQuery->numOfOutput; ++idx) {
|
||||
SColIndex *pCol = &pQuery->pSelectExpr[idx].base.colInfo;
|
||||
SExprInfo* pExprInfo = &pQuery->pSelectExpr[idx];
|
||||
|
||||
// ts_comp column required the tag value for join filter
|
||||
if (!TSDB_COL_IS_TAG(pCol->flag)) {
|
||||
if (!TSDB_COL_IS_TAG(pExprInfo->base.colInfo.flag)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// todo use tag column index to optimize performance
|
||||
doSetTagValueInParam(tsdb, pTableId, pCol->colId, &pRuntimeEnv->pCtx[idx].tag);
|
||||
doSetTagValueInParam(tsdb, pTableId, pExprInfo->base.colInfo.colId, &pRuntimeEnv->pCtx[idx].tag,
|
||||
pExprInfo->type, pExprInfo->bytes);
|
||||
}
|
||||
|
||||
// set the join tag for first column
|
||||
SSqlFuncMsg *pFuncMsg = &pExprInfo->base;
|
||||
if (pFuncMsg->functionId == TSDB_FUNC_TS && pFuncMsg->colInfo.colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX &&
|
||||
pRuntimeEnv->pTSBuf != NULL) {
|
||||
assert(pFuncMsg->numOfParams == 1);
|
||||
|
@ -3247,7 +3252,7 @@ void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo, int32_t numOfCols)
|
|||
free(pTableQueryInfo);
|
||||
}
|
||||
|
||||
void restoreIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *pTableQueryInfo) {
|
||||
void setCurrentQueryTable(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *pTableQueryInfo) {
|
||||
SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||
pQuery->current = pTableQueryInfo;
|
||||
|
||||
|
@ -3316,7 +3321,7 @@ static void setWindowResOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SWindowResult *
|
|||
|
||||
int32_t setAdditionalInfo(SQInfo *pQInfo, STableId* pTableId, STableQueryInfo *pTableQueryInfo) {
|
||||
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
assert(pTableQueryInfo->lastKey >= 0);
|
||||
assert(pTableQueryInfo->lastKey >= TSKEY_INITIAL_VAL);
|
||||
|
||||
setTagVal(pRuntimeEnv, pTableId, pQInfo->tsdb);
|
||||
|
||||
|
@ -3528,10 +3533,11 @@ static void updateWindowResNumOfRes(SQueryRuntimeEnv *pRuntimeEnv, STableQueryIn
|
|||
}
|
||||
}
|
||||
|
||||
void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *pTableQueryInfo,
|
||||
SDataBlockInfo *pDataBlockInfo, SDataStatis *pStatis, SArray *pDataBlock,
|
||||
__block_search_fn_t searchFn) {
|
||||
void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pDataBlockInfo, SDataStatis *pStatis,
|
||||
SArray *pDataBlock, __block_search_fn_t searchFn) {
|
||||
SQuery * pQuery = pRuntimeEnv->pQuery;
|
||||
STableQueryInfo* pTableQueryInfo = pQuery->current;
|
||||
|
||||
SWindowResInfo * pWindowResInfo = &pTableQueryInfo->windowResInfo;
|
||||
pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : pDataBlockInfo->rows - 1;
|
||||
|
||||
|
@ -3664,10 +3670,8 @@ int32_t doFillGapsInResults(SQueryRuntimeEnv* pRuntimeEnv, tFilePage **pDst, int
|
|||
}
|
||||
}
|
||||
|
||||
void queryCostStatis(SQInfo *pQInfo) {
|
||||
static void queryCostStatis(SQInfo *pQInfo) {
|
||||
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
// SQuery *pQuery = pRuntimeEnv->pQuery;
|
||||
|
||||
SQueryCostInfo *pSummary = &pRuntimeEnv->summary;
|
||||
// if (pRuntimeEnv->pResultBuf == NULL) {
|
||||
//// pSummary->tmpBufferInDisk = 0;
|
||||
|
@ -3687,8 +3691,9 @@ void queryCostStatis(SQInfo *pQInfo) {
|
|||
// pQInfo, pSummary->readDiskBlocks, pSummary->totalBlockSize, pSummary->loadBlocksUs / 1000.0,
|
||||
// pSummary->skippedFileBlocks, pSummary->totalGenData);
|
||||
|
||||
qTrace("QInfo:%p cost: check blocks:%d, statis:%d, rows:%"PRId64", check rows:%"PRId64, pQInfo, pSummary->dataBlocks,
|
||||
pSummary->loadBlockStatis, pSummary->dataInRows, pSummary->checkRows);
|
||||
qTrace("QInfo:%p :cost summary: elpased time:%"PRId64" us, total blocks:%d, use block statis:%d, use block data:%d, "
|
||||
"total rows:%"PRId64 ", check rows:%"PRId64, pQInfo, pSummary->elapsedTime, pSummary->totalBlocks,
|
||||
pSummary->loadBlockStatis, pSummary->loadBlocks, pSummary->totalRows, pSummary->totalCheckedRows);
|
||||
|
||||
// qTrace("QInfo:%p cost: temp file:%d Bytes", pQInfo, pSummary->tmpBufferInDisk);
|
||||
//
|
||||
|
@ -4084,12 +4089,13 @@ static void enableExecutionForNextTable(SQueryRuntimeEnv *pRuntimeEnv) {
|
|||
static int64_t queryOnDataBlocks(SQInfo *pQInfo) {
|
||||
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
SQuery * pQuery = pRuntimeEnv->pQuery;
|
||||
SQueryCostInfo* summary = &pRuntimeEnv->summary;
|
||||
|
||||
int64_t st = taosGetTimestampMs();
|
||||
|
||||
TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle;
|
||||
|
||||
while (tsdbNextDataBlock(pQueryHandle)) {
|
||||
summary->totalBlocks += 1;
|
||||
if (isQueryKilled(pQInfo)) {
|
||||
break;
|
||||
}
|
||||
|
@ -4121,10 +4127,9 @@ static int64_t queryOnDataBlocks(SQInfo *pQInfo) {
|
|||
}
|
||||
|
||||
assert(pTableQueryInfo != NULL);
|
||||
restoreIntervalQueryRange(pRuntimeEnv, pTableQueryInfo);
|
||||
setCurrentQueryTable(pRuntimeEnv, pTableQueryInfo);
|
||||
|
||||
SDataStatis *pStatis = NULL;
|
||||
|
||||
SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis);
|
||||
|
||||
if (!isIntervalQuery(pQuery)) {
|
||||
|
@ -4133,15 +4138,14 @@ static int64_t queryOnDataBlocks(SQInfo *pQInfo) {
|
|||
} else { // interval query
|
||||
TSKEY nextKey = blockInfo.window.skey;
|
||||
setIntervalQueryRange(pQInfo, nextKey);
|
||||
int32_t ret = setAdditionalInfo(pQInfo, &pTableQueryInfo->id, pTableQueryInfo);
|
||||
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
pQInfo->code = ret;
|
||||
return taosGetTimestampMs() - st;
|
||||
}
|
||||
/*int32_t ret = */setAdditionalInfo(pQInfo, &pTableQueryInfo->id, pTableQueryInfo);
|
||||
}
|
||||
|
||||
stableApplyFunctionsOnBlock(pRuntimeEnv, pTableQueryInfo, &blockInfo, pStatis, pDataBlock, binarySearchForKey);
|
||||
summary->totalRows += blockInfo.rows;
|
||||
stableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, pDataBlock, binarySearchForKey);
|
||||
|
||||
qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, lastKey:%"PRId64, GET_QINFO_ADDR(pRuntimeEnv),
|
||||
blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, pQuery->current->lastKey);
|
||||
}
|
||||
|
||||
int64_t et = taosGetTimestampMs();
|
||||
|
@ -4503,10 +4507,6 @@ static void multiTableQueryProcess(SQInfo *pQInfo) {
|
|||
copyFromWindowResToSData(pQInfo, pRuntimeEnv->windowResInfo.pResult);
|
||||
}
|
||||
|
||||
if (pQuery->rec.rows == 0) {
|
||||
// queryCostStatis(pSupporter);
|
||||
}
|
||||
|
||||
qTrace("QInfo:%p current:%lld, total:%lld", pQInfo, pQuery->rec.rows, pQuery->rec.total);
|
||||
return;
|
||||
}
|
||||
|
@ -4789,7 +4789,6 @@ static void tableQueryImpl(SQInfo *pQInfo) {
|
|||
}
|
||||
|
||||
qTrace("QInfo:%p query over, %d rows are returned", pQInfo, pQuery->rec.total);
|
||||
queryCostStatis(pQInfo);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -4812,7 +4811,7 @@ static void tableQueryImpl(SQInfo *pQInfo) {
|
|||
}
|
||||
|
||||
// record the total elapsed time
|
||||
pQInfo->elapsedTime += (taosGetTimestampUs() - st);
|
||||
pRuntimeEnv->summary.elapsedTime += (taosGetTimestampUs() - st);
|
||||
assert(pQInfo->groupInfo.numOfTables == 1);
|
||||
|
||||
/* check if query is killed or not */
|
||||
|
@ -4821,10 +4820,6 @@ static void tableQueryImpl(SQInfo *pQInfo) {
|
|||
} else {// todo set the table uid and tid in log
|
||||
qTrace("QInfo:%p query paused, %" PRId64 " rows returned, numOfTotal:%" PRId64 " rows",
|
||||
pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows);
|
||||
|
||||
if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
|
||||
queryCostStatis(pQInfo);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4846,13 +4841,10 @@ static void stableQueryImpl(SQInfo *pQInfo) {
|
|||
}
|
||||
|
||||
// record the total elapsed time
|
||||
pQInfo->elapsedTime += (taosGetTimestampUs() - st);
|
||||
// taosFillSetStartInfo(&pQInfo->runtimeEnv.pFillInfo, pQuery->size, pQInfo->query.fillType);
|
||||
pQInfo->runtimeEnv.summary.elapsedTime += (taosGetTimestampUs() - st);
|
||||
|
||||
if (pQuery->rec.rows == 0) {
|
||||
qTrace("QInfo:%p over, %d tables queried, %d points are returned", pQInfo, pQInfo->groupInfo.numOfTables,
|
||||
pQuery->rec.total);
|
||||
// queryCostStatis(pSupporter);
|
||||
qTrace("QInfo:%p over, %d tables queried, %d rows are returned", pQInfo, pQInfo->groupInfo.numOfTables, pQuery->rec.total);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5832,9 +5824,11 @@ int32_t qCreateQueryInfo(void *tsdb, int32_t vgId, SQueryTableMsg *pQueryMsg, qi
|
|||
numOfGroupByCols = 0;
|
||||
}
|
||||
|
||||
// todo handle the error
|
||||
/*int32_t ret =*/tsdbQuerySTableByTagCond(tsdb, id->uid, tagCond, pQueryMsg->tagCondLen, pQueryMsg->tagNameRelType, tbnameCond, &groupInfo, pGroupColIndex,
|
||||
code = tsdbQuerySTableByTagCond(tsdb, id->uid, tagCond, pQueryMsg->tagCondLen, pQueryMsg->tagNameRelType, tbnameCond, &groupInfo, pGroupColIndex,
|
||||
numOfGroupByCols);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _over;
|
||||
}
|
||||
} else {
|
||||
SArray* pTableGroup = taosArrayInit(1, POINTER_BYTES);
|
||||
|
||||
|
@ -5864,7 +5858,7 @@ _over:
|
|||
taosArrayDestroy(pTableIdList);
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tfree(*pQInfo);
|
||||
//pQInfo already freed in initQInfo, but *pQInfo may not pointer to null;
|
||||
*pQInfo = NULL;
|
||||
}
|
||||
|
||||
|
@ -5874,6 +5868,9 @@ _over:
|
|||
|
||||
void qDestroyQueryInfo(qinfo_t pQInfo) {
|
||||
qTrace("QInfo:%p query completed", pQInfo);
|
||||
|
||||
// print the query cost summary
|
||||
queryCostStatis(pQInfo);
|
||||
freeQInfo(pQInfo);
|
||||
}
|
||||
|
||||
|
@ -5950,6 +5947,7 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
|
|||
return TSDB_CODE_INVALID_QHANDLE;
|
||||
}
|
||||
|
||||
SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
|
||||
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
|
||||
size_t size = getResultSize(pQInfo, &pQuery->rec.rows);
|
||||
size += sizeof(int32_t);
|
||||
|
@ -5963,7 +5961,7 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
|
|||
int32_t code = pQInfo->code;
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
(*pRsp)->offset = htobe64(pQuery->limit.offset);
|
||||
(*pRsp)->useconds = htobe64(pQInfo->elapsedTime);
|
||||
(*pRsp)->useconds = htobe64(pRuntimeEnv->summary.elapsedTime);
|
||||
} else {
|
||||
(*pRsp)->offset = 0;
|
||||
(*pRsp)->useconds = 0;
|
||||
|
@ -6004,7 +6002,7 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
|
|||
num = taosArrayGetSize(pa);
|
||||
|
||||
assert(num == pQInfo->groupInfo.numOfTables);
|
||||
int16_t type, bytes;
|
||||
// int16_t type, bytes;
|
||||
|
||||
int32_t functionId = pQuery->pSelectExpr[0].base.functionId;
|
||||
if (functionId == TSDB_FUNC_TID_TAG) { // return the tags & table Id
|
||||
|
@ -6012,7 +6010,6 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
|
|||
SExprInfo* pExprInfo = &pQuery->pSelectExpr[0];
|
||||
|
||||
int32_t rsize = pExprInfo->bytes;
|
||||
char* data = NULL;
|
||||
|
||||
for(int32_t i = 0; i < num; ++i) {
|
||||
SGroupItem* item = taosArrayGet(pa, i);
|
||||
|
@ -6030,8 +6027,25 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
|
|||
*(int32_t*) output = pQInfo->vgId;
|
||||
output += sizeof(pQInfo->vgId);
|
||||
|
||||
tsdbGetTableTagVal(pQInfo->tsdb, &item->id, pExprInfo->base.colInfo.colId, &type, &bytes, &data);
|
||||
memcpy(output, data, bytes);
|
||||
int16_t bytes = pExprInfo->bytes;
|
||||
int16_t type = pExprInfo->type;
|
||||
|
||||
char* val = tsdbGetTableTagVal(pQInfo->tsdb, &item->id, pExprInfo->base.colInfo.colId, type, bytes);
|
||||
|
||||
// todo refactor
|
||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||
if (val == NULL) {
|
||||
setVardataNull(output, type);
|
||||
} else {
|
||||
memcpy(output, val, varDataTLen(val));
|
||||
}
|
||||
} else {
|
||||
if (val == NULL) {
|
||||
setNull(output, type, bytes);
|
||||
} else {
|
||||
memcpy(output, val, bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
qTrace("QInfo:%p create (tableId, tag) info completed, rows:%d", pQInfo, num);
|
||||
|
@ -6040,23 +6054,32 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
|
|||
SExprInfo* pExprInfo = pQuery->pSelectExpr;
|
||||
SGroupItem* item = taosArrayGet(pa, i);
|
||||
|
||||
char* data = NULL;
|
||||
for(int32_t j = 0; j < pQuery->numOfOutput; ++j) {
|
||||
// todo check the return value, refactor codes
|
||||
if (pExprInfo[j].base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) {
|
||||
data = tsdbGetTableName(pQInfo->tsdb, &item->id, &bytes);
|
||||
char* data = tsdbGetTableName(pQInfo->tsdb, &item->id);
|
||||
|
||||
char* dst = pQuery->sdata[j]->data + i * (TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE);
|
||||
memcpy(dst, data, varDataTLen(data));
|
||||
} else {// todo refactor, return the true length of binary|nchar data
|
||||
tsdbGetTableTagVal(pQInfo->tsdb, &item->id, pExprInfo[j].base.colInfo.colId, &type, &bytes, &data);
|
||||
assert(bytes <= pExprInfo[j].bytes && type == pExprInfo[j].type);
|
||||
} else {// todo refactor
|
||||
int16_t type = pExprInfo[j].type;
|
||||
int16_t bytes = pExprInfo[j].bytes;
|
||||
|
||||
char* data = tsdbGetTableTagVal(pQInfo->tsdb, &item->id, pExprInfo[j].base.colInfo.colId, type, bytes);
|
||||
|
||||
char* dst = pQuery->sdata[j]->data + i * pExprInfo[j].bytes;
|
||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
||||
memcpy(dst, data, varDataTLen(data));
|
||||
if (data == NULL) {
|
||||
setVardataNull(dst, type);
|
||||
} else {
|
||||
memcpy(dst, data, varDataTLen(data));
|
||||
}
|
||||
} else {
|
||||
memcpy(dst, data, bytes);
|
||||
if (data == NULL) {
|
||||
setNull(dst, type, bytes);
|
||||
} else {
|
||||
memcpy(dst, data, pExprInfo[j].bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -110,6 +110,14 @@ void taosDestoryFillInfo(SFillInfo* pFillInfo) {
|
|||
tfree(pFillInfo->prevValues);
|
||||
tfree(pFillInfo->nextValues);
|
||||
tfree(pFillInfo->pTags);
|
||||
|
||||
for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
|
||||
tfree(pFillInfo->pData[i]);
|
||||
}
|
||||
|
||||
tfree(pFillInfo->pData);
|
||||
tfree(pFillInfo->pFillCol);
|
||||
|
||||
tfree(pFillInfo);
|
||||
}
|
||||
|
||||
|
@ -247,7 +255,7 @@ int taosDoLinearInterpolation(int32_t type, SPoint* point1, SPoint* point2, SPoi
|
|||
}
|
||||
|
||||
static void setTagsValue(SFillInfo* pColInfo, tFilePage** data, char** pTags, int32_t start, int32_t num) {
|
||||
for (int32_t j = 0, i = start; i < pColInfo->numOfCols + pColInfo->numOfTags; ++i, ++j) {
|
||||
for (int32_t j = 0, i = start; i < pColInfo->numOfCols; ++i, ++j) {
|
||||
SFillColInfo* pCol = &pColInfo->pFillCol[i];
|
||||
|
||||
char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, num);
|
||||
|
@ -344,7 +352,7 @@ static void doInterpoResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t*
|
|||
setTagsValue(pFillInfo, data, pTags, numOfValCols, *num);
|
||||
|
||||
}
|
||||
} else { /* default value interpolation */
|
||||
} else { /* fill the default value */
|
||||
for (int32_t i = 1; i < numOfValCols; ++i) {
|
||||
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
|
||||
|
||||
|
|
|
@ -218,9 +218,9 @@ void *rpcOpen(const SRpcInit *pInit) {
|
|||
pRpc->localPort = pInit->localPort;
|
||||
pRpc->afp = pInit->afp;
|
||||
pRpc->sessions = pInit->sessions+1;
|
||||
if (pInit->user) strcpy(pRpc->user, pInit->user);
|
||||
if (pInit->secret) strcpy(pRpc->secret, pInit->secret);
|
||||
if (pInit->ckey) strcpy(pRpc->ckey, pInit->ckey);
|
||||
if (pInit->user) tstrncpy(pRpc->user, pInit->user, sizeof(pRpc->user));
|
||||
if (pInit->secret) memcpy(pRpc->secret, pInit->secret, sizeof(pRpc->secret));
|
||||
if (pInit->ckey) tstrncpy(pRpc->ckey, pInit->ckey, sizeof(pRpc->ckey));
|
||||
pRpc->spi = pInit->spi;
|
||||
pRpc->cfp = pInit->cfp;
|
||||
pRpc->afp = pInit->afp;
|
||||
|
@ -435,6 +435,7 @@ void rpcSendResponse(const SRpcMsg *pRsp) {
|
|||
|
||||
void rpcSendRedirectRsp(void *thandle, const SRpcIpSet *pIpSet) {
|
||||
SRpcMsg rpcMsg;
|
||||
memset(&rpcMsg, 0, sizeof(rpcMsg));
|
||||
|
||||
rpcMsg.contLen = sizeof(SRpcIpSet);
|
||||
rpcMsg.pCont = rpcMallocCont(rpcMsg.contLen);
|
||||
|
|
|
@ -253,12 +253,14 @@ void *taosInitTcpClient(uint32_t ip, uint16_t port, char *label, int num, void *
|
|||
|
||||
if (pthread_mutex_init(&(pThreadObj->mutex), NULL) < 0) {
|
||||
tError("%s failed to init TCP client mutex(%s)", label, strerror(errno));
|
||||
free(pThreadObj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pThreadObj->pollFd = epoll_create(10); // size does not matter
|
||||
if (pThreadObj->pollFd < 0) {
|
||||
tError("%s failed to create TCP client epoll", label);
|
||||
free(pThreadObj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -269,6 +271,8 @@ void *taosInitTcpClient(uint32_t ip, uint16_t port, char *label, int num, void *
|
|||
int code = pthread_create(&(pThreadObj->thread), &thattr, taosProcessTcpData, (void *)(pThreadObj));
|
||||
pthread_attr_destroy(&thattr);
|
||||
if (code != 0) {
|
||||
close(pThreadObj->pollFd);
|
||||
free(pThreadObj);
|
||||
tError("%s failed to create TCP read data thread(%s)", label, strerror(errno));
|
||||
return NULL;
|
||||
}
|
||||
|
@ -292,7 +296,7 @@ void *taosOpenTcpClientConnection(void *shandle, void *thandle, uint32_t ip, uin
|
|||
SThreadObj * pThreadObj = shandle;
|
||||
|
||||
int fd = taosOpenTcpClientSocket(ip, port, pThreadObj->ip);
|
||||
if (fd <= 0) return NULL;
|
||||
if (fd < 0) return NULL;
|
||||
|
||||
SFdObj *pFdObj = taosMallocFdObj(pThreadObj, fd);
|
||||
|
||||
|
|
|
@ -192,7 +192,7 @@ static void *taosRecvUdpData(void *param) {
|
|||
|
||||
char *tmsg = malloc(dataLen + tsRpcOverhead);
|
||||
if (NULL == tmsg) {
|
||||
tError("%s failed to allocate memory, size:%d", pConn->label, dataLen);
|
||||
tError("%s failed to allocate memory, size:%ld", pConn->label, dataLen);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
*/
|
||||
|
||||
#include "os.h"
|
||||
#include "tutil.h"
|
||||
#include "tglobal.h"
|
||||
#include "rpcLog.h"
|
||||
#include "trpc.h"
|
||||
|
@ -105,7 +106,7 @@ int main(int argc, char *argv[]) {
|
|||
if (strcmp(argv[i], "-p")==0 && i < argc-1) {
|
||||
ipSet.port[0] = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-i") ==0 && i < argc-1) {
|
||||
strcpy(ipSet.fqdn[0], argv[++i]);
|
||||
tstrncpy(ipSet.fqdn[0], argv[++i], sizeof(ipSet.fqdn));
|
||||
} else if (strcmp(argv[i], "-t")==0 && i < argc-1) {
|
||||
rpcInit.numOfThreads = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-m")==0 && i < argc-1) {
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
|
||||
#include "os.h"
|
||||
#include "tutil.h"
|
||||
#include "tglobal.h"
|
||||
#include "rpcLog.h"
|
||||
#include "trpc.h"
|
||||
|
@ -106,7 +107,7 @@ int main(int argc, char *argv[]) {
|
|||
if (strcmp(argv[i], "-p")==0 && i < argc-1) {
|
||||
ipSet.port[0] = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-i") ==0 && i < argc-1) {
|
||||
strcpy(ipSet.fqdn[0], argv[++i]);
|
||||
tstrncpy(ipSet.fqdn[0], argv[++i], sizeof(ipSet.fqdn[0]));
|
||||
} else if (strcmp(argv[i], "-t")==0 && i < argc-1) {
|
||||
rpcInit.numOfThreads = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-m")==0 && i < argc-1) {
|
||||
|
|
|
@ -69,6 +69,7 @@ void processShellMsg() {
|
|||
taosGetQitem(qall, &type, (void **)&pRpcMsg);
|
||||
rpcFreeCont(pRpcMsg->pCont);
|
||||
|
||||
memset(&rpcMsg, 0, sizeof(rpcMsg));
|
||||
rpcMsg.pCont = rpcMallocCont(msgSize);
|
||||
rpcMsg.contLen = msgSize;
|
||||
rpcMsg.handle = pRpcMsg->handle;
|
||||
|
|
|
@ -69,12 +69,13 @@ typedef struct {
|
|||
} SMemTable;
|
||||
|
||||
// ---------- TSDB TABLE DEFINITION
|
||||
#define TSDB_MAX_TABLE_SCHEMAS 16
|
||||
typedef struct STable {
|
||||
int8_t type;
|
||||
STableId tableId;
|
||||
uint64_t superUid; // Super table UID
|
||||
int32_t sversion;
|
||||
STSchema * schema;
|
||||
int16_t numOfSchemas;
|
||||
STSchema ** schema;
|
||||
STSchema * tagSchema;
|
||||
SKVRow tagVal;
|
||||
SMemTable * mem;
|
||||
|
@ -122,7 +123,6 @@ typedef struct STableIndexElem {
|
|||
|
||||
STsdbMeta *tsdbInitMeta(char *rootDir, int32_t maxTables, void *pRepo);
|
||||
int32_t tsdbFreeMeta(STsdbMeta *pMeta);
|
||||
STSchema * tsdbGetTableSchema(STsdbMeta *pMeta, STable *pTable);
|
||||
STSchema * tsdbGetTableTagSchema(STsdbMeta *pMeta, STable *pTable);
|
||||
|
||||
// ---- Operation on STable
|
||||
|
@ -502,11 +502,18 @@ int tsdbWriteCompInfo(SRWHelper *pHelper);
|
|||
int tsdbWriteCompIdx(SRWHelper *pHelper);
|
||||
|
||||
// --------- Other functions need to further organize
|
||||
void tsdbFitRetention(STsdbRepo *pRepo);
|
||||
int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks);
|
||||
void tsdbAdjustCacheBlocks(STsdbCache *pCache);
|
||||
int32_t tsdbGetMetaFileName(char *rootDir, char *fname);
|
||||
int tsdbUpdateFileHeader(SFile *pFile, uint32_t version);
|
||||
void tsdbFitRetention(STsdbRepo *pRepo);
|
||||
int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks);
|
||||
void tsdbAdjustCacheBlocks(STsdbCache *pCache);
|
||||
int32_t tsdbGetMetaFileName(char *rootDir, char *fname);
|
||||
int tsdbUpdateFileHeader(SFile *pFile, uint32_t version);
|
||||
int tsdbUpdateTable(STsdbMeta *pMeta, STable *pTable, STableCfg *pCfg);
|
||||
int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable);
|
||||
int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable);
|
||||
STSchema *tsdbGetTableSchemaByVersion(STsdbMeta *pMeta, STable *pTable, int16_t version);
|
||||
STSchema *tsdbGetTableSchema(STsdbMeta *pMeta, STable *pTable);
|
||||
|
||||
#define DEFAULT_TAG_INDEX_COLUMN 0 // skip list built based on the first column of tags
|
||||
|
||||
int compFGroupKey(const void *key, const void *fgroup);
|
||||
|
||||
|
|
|
@ -410,6 +410,61 @@ int tsdbAlterTable(TsdbRepoT *pRepo, STableCfg *pCfg) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int tsdbUpdateTagValue(TsdbRepoT *repo, SUpdateTableTagValMsg *pMsg) {
|
||||
STsdbRepo *pRepo = (STsdbRepo *)repo;
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
int16_t tversion = htons(pMsg->tversion);
|
||||
|
||||
STable *pTable = tsdbGetTableByUid(pMeta, htobe64(pMsg->uid));
|
||||
if (pTable == NULL) return TSDB_CODE_INVALID_TABLE_ID;
|
||||
if (pTable->tableId.tid != htonl(pMsg->tid)) return TSDB_CODE_INVALID_TABLE_ID;
|
||||
|
||||
if (pTable->type != TSDB_CHILD_TABLE) {
|
||||
tsdbError("vgId:%d failed to update tag value of table %s since its type is %d", pRepo->config.tsdbId,
|
||||
varDataVal(pTable->name), pTable->type);
|
||||
return TSDB_CODE_INVALID_TABLE_TYPE;
|
||||
}
|
||||
|
||||
if (schemaVersion(tsdbGetTableTagSchema(pMeta, pTable)) < tversion) {
|
||||
tsdbTrace("vgId:%d server tag version %d is older than client tag version %d, try to config", pRepo->config.tsdbId,
|
||||
schemaVersion(tsdbGetTableTagSchema(pMeta, pTable)), tversion);
|
||||
void *msg = (*pRepo->appH.configFunc)(pRepo->config.tsdbId, htonl(pMsg->tid));
|
||||
if (msg == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
// Deal with error her
|
||||
STableCfg *pTableCfg = tsdbCreateTableCfgFromMsg(msg);
|
||||
STable *super = tsdbGetTableByUid(pMeta, pTableCfg->superUid);
|
||||
ASSERT(super != NULL);
|
||||
|
||||
int32_t code = tsdbUpdateTable(pMeta, super, pTableCfg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
tsdbClearTableCfg(pTableCfg);
|
||||
rpcFreeCont(msg);
|
||||
}
|
||||
|
||||
STSchema *pTagSchema = tsdbGetTableTagSchema(pMeta, pTable);
|
||||
|
||||
if (schemaVersion(pTagSchema) > tversion) {
|
||||
tsdbError(
|
||||
"vgId:%d failed to update tag value of table %s since version out of date, client tag version:%d server tag "
|
||||
"version:%d",
|
||||
pRepo->config.tsdbId, varDataVal(pTable->name), tversion, schemaVersion(pTable->tagSchema));
|
||||
return TSDB_CODE_TAG_VER_OUT_OF_DATE;
|
||||
}
|
||||
if (schemaColAt(pTagSchema, DEFAULT_TAG_INDEX_COLUMN)->colId == htons(pMsg->colId)) {
|
||||
tsdbRemoveTableFromIndex(pMeta, pTable);
|
||||
}
|
||||
// TODO: remove table from index if it is the first column of tag
|
||||
tdSetKVRowDataOfCol(&pTable->tagVal, htons(pMsg->colId), htons(pMsg->type), pMsg->data);
|
||||
if (schemaColAt(pTagSchema, DEFAULT_TAG_INDEX_COLUMN)->colId == htons(pMsg->colId)) {
|
||||
tsdbAddTableIntoIndex(pMeta, pTable);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
TSKEY tsdbGetTableLastKey(TsdbRepoT *repo, uint64_t uid) {
|
||||
STsdbRepo *pRepo = (STsdbRepo *)repo;
|
||||
|
||||
|
@ -559,12 +614,15 @@ int tsdbTableSetStreamSql(STableCfg *config, char *sql, bool dup) {
|
|||
}
|
||||
|
||||
void tsdbClearTableCfg(STableCfg *config) {
|
||||
if (config->schema) tdFreeSchema(config->schema);
|
||||
if (config->tagSchema) tdFreeSchema(config->tagSchema);
|
||||
if (config->tagValues) kvRowFree(config->tagValues);
|
||||
tfree(config->name);
|
||||
tfree(config->sname);
|
||||
tfree(config->sql);
|
||||
if (config) {
|
||||
if (config->schema) tdFreeSchema(config->schema);
|
||||
if (config->tagSchema) tdFreeSchema(config->tagSchema);
|
||||
if (config->tagValues) kvRowFree(config->tagValues);
|
||||
tfree(config->name);
|
||||
tfree(config->sname);
|
||||
tfree(config->sql);
|
||||
free(config);
|
||||
}
|
||||
}
|
||||
|
||||
int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter) {
|
||||
|
@ -883,6 +941,7 @@ static int32_t tdInsertRowToTable(STsdbRepo *pRepo, SDataRow row, STable *pTable
|
|||
|
||||
static int32_t tsdbInsertDataToTable(TsdbRepoT *repo, SSubmitBlk *pBlock, TSKEY now, int32_t *affectedrows) {
|
||||
STsdbRepo *pRepo = (STsdbRepo *)repo;
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
|
||||
STableId tableId = {.uid = pBlock->uid, .tid = pBlock->tid};
|
||||
STable *pTable = tsdbIsValidTableToInsert(pRepo->tsdbMeta, tableId);
|
||||
|
@ -892,6 +951,39 @@ static int32_t tsdbInsertDataToTable(TsdbRepoT *repo, SSubmitBlk *pBlock, TSKEY
|
|||
return TSDB_CODE_INVALID_TABLE_ID;
|
||||
}
|
||||
|
||||
// Check schema version
|
||||
int32_t tversion = pBlock->sversion;
|
||||
int16_t nversion = schemaVersion(tsdbGetTableSchema(pMeta, pTable));
|
||||
if (tversion > nversion) {
|
||||
tsdbTrace("vgId:%d table:%s tid:%d server schema version %d is older than clien version %d, try to config.",
|
||||
pRepo->config.tsdbId, varDataVal(pTable->name), pTable->tableId.tid, nversion, tversion);
|
||||
void *msg = (*pRepo->appH.configFunc)(pRepo->config.tsdbId, pTable->tableId.tid);
|
||||
if (msg == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
// Deal with error her
|
||||
STableCfg *pTableCfg = tsdbCreateTableCfgFromMsg(msg);
|
||||
STable *pTableUpdate = NULL;
|
||||
if (pTable->type == TSDB_CHILD_TABLE) {
|
||||
pTableUpdate = tsdbGetTableByUid(pMeta, pTableCfg->superUid);
|
||||
} else {
|
||||
pTableUpdate = pTable;
|
||||
}
|
||||
|
||||
int32_t code = tsdbUpdateTable(pMeta, pTableUpdate, pTableCfg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
tsdbClearTableCfg(pTableCfg);
|
||||
rpcFreeCont(msg);
|
||||
} else {
|
||||
if (tsdbGetTableSchemaByVersion(pMeta, pTable, tversion) == NULL) {
|
||||
tsdbError("vgId:%d table:%s tid:%d invalid schema version %d from client", pRepo->config.tsdbId,
|
||||
varDataVal(pTable->name), pTable->tableId.tid, tversion);
|
||||
return TSDB_CODE_TABLE_SCHEMA_VERSION;
|
||||
}
|
||||
}
|
||||
|
||||
SSubmitBlkIter blkIter = {0};
|
||||
SDataRow row = NULL;
|
||||
|
||||
|
@ -916,9 +1008,10 @@ static int32_t tsdbInsertDataToTable(TsdbRepoT *repo, SSubmitBlk *pBlock, TSKEY
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int tsdbReadRowsFromCache(SSkipListIterator *pIter, TSKEY maxKey, int maxRowsToRead, SDataCols *pCols) {
|
||||
static int tsdbReadRowsFromCache(STsdbMeta *pMeta, STable *pTable, SSkipListIterator *pIter, TSKEY maxKey, int maxRowsToRead, SDataCols *pCols) {
|
||||
ASSERT(maxRowsToRead > 0);
|
||||
if (pIter == NULL) return 0;
|
||||
STSchema *pSchema = NULL;
|
||||
|
||||
int numOfRows = 0;
|
||||
|
||||
|
@ -931,7 +1024,15 @@ static int tsdbReadRowsFromCache(SSkipListIterator *pIter, TSKEY maxKey, int max
|
|||
SDataRow row = SL_GET_NODE_DATA(node);
|
||||
if (dataRowKey(row) > maxKey) break;
|
||||
|
||||
tdAppendDataRowToDataCol(row, pCols);
|
||||
if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) {
|
||||
pSchema = tsdbGetTableSchemaByVersion(pMeta, pTable, dataRowVersion(row));
|
||||
if (pSchema == NULL) {
|
||||
// TODO: deal with the error here
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
|
||||
tdAppendDataRowToDataCol(row, pSchema, pCols);
|
||||
numOfRows++;
|
||||
} while (tSkipListIterNext(pIter));
|
||||
|
||||
|
@ -1081,7 +1182,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SSkipListIterator **iters
|
|||
int maxRowsToRead = pCfg->maxRowsPerFileBlock * 4 / 5;
|
||||
int nLoop = 0;
|
||||
while (true) {
|
||||
int rowsRead = tsdbReadRowsFromCache(pIter, maxKey, maxRowsToRead, pDataCols);
|
||||
int rowsRead = tsdbReadRowsFromCache(pMeta, pTable, pIter, maxKey, maxRowsToRead, pDataCols);
|
||||
assert(rowsRead >= 0);
|
||||
if (pDataCols->numOfRows == 0) break;
|
||||
nLoop++;
|
||||
|
|
|
@ -8,13 +8,10 @@
|
|||
#define TSDB_SUPER_TABLE_SL_LEVEL 5 // TODO: may change here
|
||||
// #define TSDB_META_FILE_NAME "META"
|
||||
|
||||
const int32_t DEFAULT_TAG_INDEX_COLUMN = 0; // skip list built based on the first column of tags
|
||||
|
||||
static int tsdbFreeTable(STable *pTable);
|
||||
static int32_t tsdbCheckTableCfg(STableCfg *pCfg);
|
||||
static int tsdbAddTableToMeta(STsdbMeta *pMeta, STable *pTable, bool addIdx);
|
||||
static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable);
|
||||
static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable);
|
||||
static int tsdbRemoveTableFromMeta(STsdbMeta *pMeta, STable *pTable, bool rmFromIdx);
|
||||
|
||||
/**
|
||||
|
@ -41,15 +38,20 @@ void tsdbEncodeTable(STable *pTable, char *buf, int *contLen) {
|
|||
T_APPEND_MEMBER(ptr, &(pTable->tableId), STableId, uid);
|
||||
T_APPEND_MEMBER(ptr, &(pTable->tableId), STableId, tid);
|
||||
T_APPEND_MEMBER(ptr, pTable, STable, superUid);
|
||||
T_APPEND_MEMBER(ptr, pTable, STable, sversion);
|
||||
|
||||
if (pTable->type == TSDB_SUPER_TABLE) {
|
||||
ptr = tdEncodeSchema(ptr, pTable->schema);
|
||||
T_APPEND_MEMBER(ptr, pTable, STable, numOfSchemas);
|
||||
for (int i = 0; i < pTable->numOfSchemas; i++) {
|
||||
ptr = tdEncodeSchema(ptr, pTable->schema[i]);
|
||||
}
|
||||
ptr = tdEncodeSchema(ptr, pTable->tagSchema);
|
||||
} else if (pTable->type == TSDB_CHILD_TABLE) {
|
||||
ptr = tdEncodeKVRow(ptr, pTable->tagVal);
|
||||
} else {
|
||||
ptr = tdEncodeSchema(ptr, pTable->schema);
|
||||
T_APPEND_MEMBER(ptr, pTable, STable, numOfSchemas);
|
||||
for (int i = 0; i < pTable->numOfSchemas; i++) {
|
||||
ptr = tdEncodeSchema(ptr, pTable->schema[i]);
|
||||
}
|
||||
}
|
||||
|
||||
if (pTable->type == TSDB_STREAM_TABLE) {
|
||||
|
@ -72,6 +74,11 @@ void tsdbEncodeTable(STable *pTable, char *buf, int *contLen) {
|
|||
STable *tsdbDecodeTable(void *cont, int contLen) {
|
||||
STable *pTable = (STable *)calloc(1, sizeof(STable));
|
||||
if (pTable == NULL) return NULL;
|
||||
pTable->schema = (STSchema **)malloc(sizeof(STSchema *) * TSDB_MAX_TABLE_SCHEMAS);
|
||||
if (pTable->schema == NULL) {
|
||||
free(pTable);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *ptr = cont;
|
||||
T_READ_MEMBER(ptr, int8_t, pTable->type);
|
||||
|
@ -87,15 +94,20 @@ STable *tsdbDecodeTable(void *cont, int contLen) {
|
|||
T_READ_MEMBER(ptr, uint64_t, pTable->tableId.uid);
|
||||
T_READ_MEMBER(ptr, int32_t, pTable->tableId.tid);
|
||||
T_READ_MEMBER(ptr, uint64_t, pTable->superUid);
|
||||
T_READ_MEMBER(ptr, int32_t, pTable->sversion);
|
||||
|
||||
if (pTable->type == TSDB_SUPER_TABLE) {
|
||||
pTable->schema = tdDecodeSchema(&ptr);
|
||||
T_READ_MEMBER(ptr, int16_t, pTable->numOfSchemas);
|
||||
for (int i = 0; i < pTable->numOfSchemas; i++) {
|
||||
pTable->schema[i] = tdDecodeSchema(&ptr);
|
||||
}
|
||||
pTable->tagSchema = tdDecodeSchema(&ptr);
|
||||
} else if (pTable->type == TSDB_CHILD_TABLE) {
|
||||
ptr = tdDecodeKVRow(ptr, &pTable->tagVal);
|
||||
} else {
|
||||
pTable->schema = tdDecodeSchema(&ptr);
|
||||
T_READ_MEMBER(ptr, int16_t, pTable->numOfSchemas);
|
||||
for (int i = 0; i < pTable->numOfSchemas; i++) {
|
||||
pTable->schema[i] = tdDecodeSchema(&ptr);
|
||||
}
|
||||
}
|
||||
|
||||
if (pTable->type == TSDB_STREAM_TABLE) {
|
||||
|
@ -223,18 +235,45 @@ int32_t tsdbFreeMeta(STsdbMeta *pMeta) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
// Get the newest table schema
|
||||
STSchema *tsdbGetTableSchema(STsdbMeta *pMeta, STable *pTable) {
|
||||
if (pTable->type == TSDB_NORMAL_TABLE || pTable->type == TSDB_SUPER_TABLE || pTable->type == TSDB_STREAM_TABLE) {
|
||||
return pTable->schema;
|
||||
return pTable->schema[pTable->numOfSchemas - 1];
|
||||
} else if (pTable->type == TSDB_CHILD_TABLE) {
|
||||
STable *pSuper = tsdbGetTableByUid(pMeta, pTable->superUid);
|
||||
if (pSuper == NULL) return NULL;
|
||||
return pSuper->schema;
|
||||
return pSuper->schema[pSuper->numOfSchemas-1];
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int tsdbCompareSchemaVersion(const void *key1, const void *key2) {
|
||||
if (*(int16_t *)key1 < (*(STSchema **)key2)->version) {
|
||||
return -1;
|
||||
} else if (*(int16_t *)key1 > (*(STSchema **)key2)->version) {
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
STSchema *tsdbGetTableSchemaByVersion(STsdbMeta *pMeta, STable *pTable, int16_t version) {
|
||||
STable *pSearchTable = NULL;
|
||||
if (pTable->type == TSDB_CHILD_TABLE) {
|
||||
pSearchTable = tsdbGetTableByUid(pMeta, pTable->superUid);
|
||||
} else {
|
||||
pSearchTable = pTable;
|
||||
}
|
||||
ASSERT(pSearchTable != NULL);
|
||||
|
||||
void *ptr = taosbsearch(&version, pSearchTable->schema, pSearchTable->numOfSchemas, sizeof(STSchema *),
|
||||
tsdbCompareSchemaVersion, TD_EQ);
|
||||
if (ptr == NULL) return NULL;
|
||||
|
||||
return *(STSchema **)ptr;
|
||||
}
|
||||
|
||||
STSchema * tsdbGetTableTagSchema(STsdbMeta *pMeta, STable *pTable) {
|
||||
if (pTable->type == TSDB_SUPER_TABLE) {
|
||||
return pTable->tagSchema;
|
||||
|
@ -247,45 +286,33 @@ STSchema * tsdbGetTableTagSchema(STsdbMeta *pMeta, STable *pTable) {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t tsdbGetTableTagVal(TsdbRepoT* repo, STableId* id, int32_t colId, int16_t* type, int16_t* bytes, char** val) {
|
||||
void* tsdbGetTableTagVal(TsdbRepoT* repo, const STableId* id, int32_t colId, int16_t type, int16_t bytes) {
|
||||
STsdbMeta* pMeta = tsdbGetMeta(repo);
|
||||
STable* pTable = tsdbGetTableByUid(pMeta, id->uid);
|
||||
|
||||
STSchema *pSchema = tsdbGetTableTagSchema(pMeta, pTable);
|
||||
STColumn *pCol = tdGetColOfID(pSchema, colId);
|
||||
if (pCol == NULL) {
|
||||
return -1; // No matched tag volumn
|
||||
return NULL; // No matched tag volumn
|
||||
}
|
||||
|
||||
*val = tdGetKVRowValOfCol(pTable->tagVal, colId);
|
||||
*type = pCol->type;
|
||||
char* val = tdGetKVRowValOfCol(pTable->tagVal, colId);
|
||||
assert(type == pCol->type && bytes == pCol->bytes);
|
||||
|
||||
if (*val != NULL) {
|
||||
if (IS_VAR_DATA_TYPE(*type)) {
|
||||
*bytes = varDataLen(*val);
|
||||
} else {
|
||||
*bytes = TYPE_BYTES[*type];
|
||||
}
|
||||
if (val != NULL && IS_VAR_DATA_TYPE(type)) {
|
||||
assert(varDataLen(val) < pCol->bytes);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
return val;
|
||||
}
|
||||
|
||||
char* tsdbGetTableName(TsdbRepoT *repo, const STableId* id, int16_t* bytes) {
|
||||
char* tsdbGetTableName(TsdbRepoT *repo, const STableId* id) {
|
||||
STsdbMeta* pMeta = tsdbGetMeta(repo);
|
||||
STable* pTable = tsdbGetTableByUid(pMeta, id->uid);
|
||||
|
||||
if (pTable == NULL) {
|
||||
if (bytes != NULL) {
|
||||
*bytes = 0;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
} else {
|
||||
if (bytes != NULL) {
|
||||
*bytes = varDataLen(pTable->name);
|
||||
}
|
||||
|
||||
return (char*) pTable->name;
|
||||
}
|
||||
}
|
||||
|
@ -301,13 +328,16 @@ static STable *tsdbNewTable(STableCfg *pCfg, bool isSuper) {
|
|||
}
|
||||
|
||||
pTable->type = pCfg->type;
|
||||
pTable->numOfSchemas = 0;
|
||||
|
||||
if (isSuper) {
|
||||
pTable->type = TSDB_SUPER_TABLE;
|
||||
pTable->tableId.uid = pCfg->superUid;
|
||||
pTable->tableId.tid = -1;
|
||||
pTable->superUid = TSDB_INVALID_SUPER_TABLE_ID;
|
||||
pTable->schema = tdDupSchema(pCfg->schema);
|
||||
pTable->schema = (STSchema **)malloc(sizeof(STSchema *) * TSDB_MAX_TABLE_SCHEMAS);
|
||||
pTable->numOfSchemas = 1;
|
||||
pTable->schema[0] = tdDupSchema(pCfg->schema);
|
||||
pTable->tagSchema = tdDupSchema(pCfg->tagSchema);
|
||||
|
||||
tsize = strnlen(pCfg->sname, TSDB_TABLE_NAME_LEN);
|
||||
|
@ -342,14 +372,18 @@ static STable *tsdbNewTable(STableCfg *pCfg, bool isSuper) {
|
|||
if (pCfg->type == TSDB_CHILD_TABLE) {
|
||||
pTable->superUid = pCfg->superUid;
|
||||
pTable->tagVal = tdKVRowDup(pCfg->tagValues);
|
||||
} else if (pCfg->type == TSDB_NORMAL_TABLE) {
|
||||
pTable->superUid = -1;
|
||||
pTable->schema = tdDupSchema(pCfg->schema);
|
||||
} else {
|
||||
ASSERT(pCfg->type == TSDB_STREAM_TABLE);
|
||||
pTable->superUid = -1;
|
||||
pTable->schema = tdDupSchema(pCfg->schema);
|
||||
pTable->sql = strdup(pCfg->sql);
|
||||
pTable->schema = (STSchema **)malloc(sizeof(STSchema *) * TSDB_MAX_TABLE_SCHEMAS);
|
||||
pTable->numOfSchemas = 1;
|
||||
pTable->schema[0] = tdDupSchema(pCfg->schema);
|
||||
|
||||
if (pCfg->type == TSDB_NORMAL_TABLE) {
|
||||
pTable->superUid = -1;
|
||||
} else {
|
||||
ASSERT(pCfg->type == TSDB_STREAM_TABLE);
|
||||
pTable->superUid = -1;
|
||||
pTable->sql = strdup(pCfg->sql);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -360,6 +394,56 @@ _err:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int tsdbUpdateTableTagSchema(STable *pTable, STSchema *newSchema) {
|
||||
ASSERT(pTable->type == TSDB_SUPER_TABLE);
|
||||
ASSERT(schemaVersion(pTable->tagSchema) < schemaVersion(newSchema));
|
||||
STSchema *pOldSchema = pTable->tagSchema;
|
||||
STSchema *pNewSchema = tdDupSchema(newSchema);
|
||||
if (pNewSchema == NULL) return TSDB_CODE_SERV_OUT_OF_MEMORY;
|
||||
pTable->tagSchema = pNewSchema;
|
||||
tdFreeSchema(pOldSchema);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int tsdbUpdateTable(STsdbMeta *pMeta, STable *pTable, STableCfg *pCfg) {
|
||||
ASSERT(pTable->type != TSDB_CHILD_TABLE);
|
||||
bool isChanged = false;
|
||||
|
||||
if (pTable->type == TSDB_SUPER_TABLE) {
|
||||
if (schemaVersion(pTable->tagSchema) < schemaVersion(pCfg->tagSchema)) {
|
||||
int32_t code = tsdbUpdateTableTagSchema(pTable, pCfg->tagSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) return code;
|
||||
}
|
||||
isChanged = true;
|
||||
}
|
||||
|
||||
STSchema *pTSchema = tsdbGetTableSchema(pMeta, pTable);
|
||||
if (schemaVersion(pTSchema) < schemaVersion(pCfg->schema)) {
|
||||
if (pTable->numOfSchemas < TSDB_MAX_TABLE_SCHEMAS) {
|
||||
pTable->schema[pTable->numOfSchemas++] = tdDupSchema(pCfg->schema);
|
||||
} else {
|
||||
ASSERT(pTable->numOfSchemas == TSDB_MAX_TABLE_SCHEMAS);
|
||||
STSchema *tSchema = tdDupSchema(pCfg->schema);
|
||||
tdFreeSchema(pTable->schema[0]);
|
||||
memmove(pTable->schema, pTable->schema+1, sizeof(STSchema *) * (TSDB_MAX_TABLE_SCHEMAS - 1));
|
||||
pTable->schema[pTable->numOfSchemas-1] = tSchema;
|
||||
}
|
||||
|
||||
isChanged = true;
|
||||
}
|
||||
|
||||
if (isChanged) {
|
||||
char *buf = malloc(1024 * 1024);
|
||||
int bufLen = 0;
|
||||
tsdbEncodeTable(pTable, buf, &bufLen);
|
||||
tsdbInsertMetaRecord(pMeta->mfh, pTable->tableId.uid, buf, bufLen);
|
||||
free(buf);
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int tsdbCreateTable(TsdbRepoT *repo, STableCfg *pCfg) {
|
||||
STsdbRepo *pRepo = (STsdbRepo *)repo;
|
||||
STsdbMeta *pMeta = pRepo->tsdbMeta;
|
||||
|
@ -384,6 +468,8 @@ int tsdbCreateTable(TsdbRepoT *repo, STableCfg *pCfg) {
|
|||
if (super == NULL) return -1;
|
||||
} else {
|
||||
if (super->type != TSDB_SUPER_TABLE) return -1;
|
||||
if (super->tableId.uid != pCfg->superUid) return -1;
|
||||
tsdbUpdateTable(pMeta, super, pCfg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -458,23 +544,30 @@ STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg) {
|
|||
if (tsdbTableSetName(pCfg, pMsg->tableId, true) < 0) goto _err;
|
||||
|
||||
if (numOfTags > 0) {
|
||||
int accBytes = 0;
|
||||
char *pTagData = pMsg->data + (numOfCols + numOfTags) * sizeof(SSchema);
|
||||
|
||||
SKVRowBuilder kvRowBuilder = {0};
|
||||
// Decode tag schema
|
||||
tdResetTSchemaBuilder(&schemaBuilder, htonl(pMsg->tversion));
|
||||
if (tdInitKVRowBuilder(&kvRowBuilder) < 0) goto _err;
|
||||
for (int i = numOfCols; i < numOfCols + numOfTags; i++) {
|
||||
tdAddColToSchema(&schemaBuilder, pSchema[i].type, htons(pSchema[i].colId), htons(pSchema[i].bytes));
|
||||
tdAddColToKVRow(&kvRowBuilder, htons(pSchema[i].colId), pSchema[i].type, pTagData + accBytes);
|
||||
accBytes += htons(pSchema[i].bytes);
|
||||
}
|
||||
if (tsdbTableSetTagSchema(pCfg, tdGetSchemaFromBuilder(&schemaBuilder), false) < 0) goto _err;
|
||||
if (tsdbTableSetSName(pCfg, pMsg->superTableId, true) < 0) goto _err;
|
||||
if (tsdbTableSetSuperUid(pCfg, htobe64(pMsg->superTableUid)) < 0) goto _err;
|
||||
|
||||
tsdbTableSetTagValue(pCfg, tdGetKVRowFromBuilder(&kvRowBuilder), false);
|
||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||
// Decode tag values
|
||||
if (pMsg->tagDataLen) {
|
||||
int accBytes = 0;
|
||||
char *pTagData = pMsg->data + (numOfCols + numOfTags) * sizeof(SSchema);
|
||||
|
||||
SKVRowBuilder kvRowBuilder = {0};
|
||||
if (tdInitKVRowBuilder(&kvRowBuilder) < 0) goto _err;
|
||||
for (int i = numOfCols; i < numOfCols + numOfTags; i++) {
|
||||
tdAddColToKVRow(&kvRowBuilder, htons(pSchema[i].colId), pSchema[i].type, pTagData + accBytes);
|
||||
accBytes += htons(pSchema[i].bytes);
|
||||
}
|
||||
|
||||
tsdbTableSetTagValue(pCfg, tdGetKVRowFromBuilder(&kvRowBuilder), false);
|
||||
tdDestroyKVRowBuilder(&kvRowBuilder);
|
||||
}
|
||||
}
|
||||
|
||||
if (pMsg->tableType == TSDB_STREAM_TABLE) {
|
||||
|
@ -535,7 +628,7 @@ static int tsdbFreeTable(STable *pTable) {
|
|||
if (pTable->type == TSDB_CHILD_TABLE) {
|
||||
kvRowFree(pTable->tagVal);
|
||||
} else {
|
||||
tdFreeSchema(pTable->schema);
|
||||
for (int i = 0; i < pTable->numOfSchemas; i++) tdFreeSchema(pTable->schema[i]);
|
||||
}
|
||||
|
||||
if (pTable->type == TSDB_STREAM_TABLE) {
|
||||
|
@ -597,9 +690,10 @@ static int tsdbAddTableToMeta(STsdbMeta *pMeta, STable *pTable, bool addIdx) {
|
|||
}
|
||||
|
||||
// Update the pMeta->maxCols and pMeta->maxRowBytes
|
||||
if (pTable->type == TSDB_SUPER_TABLE || pTable->type == TSDB_NORMAL_TABLE) {
|
||||
if (schemaNCols(pTable->schema) > pMeta->maxCols) pMeta->maxCols = schemaNCols(pTable->schema);
|
||||
int bytes = dataRowMaxBytesFromSchema(pTable->schema);
|
||||
if (pTable->type == TSDB_SUPER_TABLE || pTable->type == TSDB_NORMAL_TABLE || pTable->type == TSDB_STREAM_TABLE) {
|
||||
if (schemaNCols(pTable->schema[pTable->numOfSchemas - 1]) > pMeta->maxCols)
|
||||
pMeta->maxCols = schemaNCols(pTable->schema[pTable->numOfSchemas - 1]);
|
||||
int bytes = dataRowMaxBytesFromSchema(pTable->schema[pTable->numOfSchemas - 1]);
|
||||
if (bytes > pMeta->maxRowBytes) pMeta->maxRowBytes = bytes;
|
||||
}
|
||||
|
||||
|
@ -648,7 +742,7 @@ static int tsdbRemoveTableFromMeta(STsdbMeta *pMeta, STable *pTable, bool rmFrom
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable) {
|
||||
int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable) {
|
||||
assert(pTable->type == TSDB_CHILD_TABLE && pTable != NULL);
|
||||
STable* pSTable = tsdbGetTableByUid(pMeta, pTable->superUid);
|
||||
assert(pSTable != NULL);
|
||||
|
@ -673,7 +767,7 @@ static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable) {
|
||||
int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable) {
|
||||
assert(pTable->type == TSDB_CHILD_TABLE && pTable != NULL);
|
||||
|
||||
STable* pSTable = tsdbGetTableByUid(pMeta, pTable->superUid);
|
||||
|
|
|
@ -289,8 +289,8 @@ void tsdbSetHelperTable(SRWHelper *pHelper, STable *pTable, STsdbRepo *pRepo) {
|
|||
|
||||
pHelper->tableInfo.tid = pTable->tableId.tid;
|
||||
pHelper->tableInfo.uid = pTable->tableId.uid;
|
||||
pHelper->tableInfo.sversion = pTable->sversion;
|
||||
STSchema *pSchema = tsdbGetTableSchema(pRepo->tsdbMeta, pTable);
|
||||
pHelper->tableInfo.sversion = schemaVersion(pSchema);
|
||||
|
||||
tdInitDataCols(pHelper->pDataCols[0], pSchema);
|
||||
tdInitDataCols(pHelper->pDataCols[1], pSchema);
|
||||
|
|
|
@ -193,7 +193,7 @@ TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STable
|
|||
}
|
||||
}
|
||||
|
||||
uTrace("%p total numOfTable:%d in query", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo));
|
||||
tsdbTrace("%p total numOfTable:%d in query", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo));
|
||||
|
||||
tsdbInitDataBlockLoadInfo(&pQueryHandle->dataBlockLoadInfo);
|
||||
tsdbInitCompBlockLoadInfo(&pQueryHandle->compBlockLoadInfo);
|
||||
|
@ -282,10 +282,10 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
|
|||
|
||||
SDataRow row = SL_GET_NODE_DATA(node);
|
||||
TSKEY key = dataRowKey(row); // first timestamp in buffer
|
||||
uTrace("%p uid:%" PRId64", tid:%d check data in mem from skey:%" PRId64 ", order:%d, %p", pHandle,
|
||||
tsdbTrace("%p uid:%" PRId64", tid:%d check data in mem from skey:%" PRId64 ", order:%d, %p", pHandle,
|
||||
pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pHandle->qinfo);
|
||||
} else {
|
||||
uTrace("%p uid:%" PRId64 ", tid:%d no data in mem", pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid);
|
||||
tsdbTrace("%p uid:%" PRId64 ", tid:%d no data in mem", pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid);
|
||||
}
|
||||
|
||||
if (!imemEmpty) {
|
||||
|
@ -294,10 +294,10 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
|
|||
|
||||
SDataRow row = SL_GET_NODE_DATA(node);
|
||||
TSKEY key = dataRowKey(row); // first timestamp in buffer
|
||||
uTrace("%p uid:%" PRId64", tid:%d check data in imem from skey:%" PRId64 ", order:%d, %p", pHandle,
|
||||
tsdbTrace("%p uid:%" PRId64", tid:%d check data in imem from skey:%" PRId64 ", order:%d, %p", pHandle,
|
||||
pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pHandle->qinfo);
|
||||
} else {
|
||||
uTrace("%p uid:%"PRId64", tid:%d no data in imem", pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid);
|
||||
tsdbTrace("%p uid:%"PRId64", tid:%d no data in imem", pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -338,7 +338,7 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
|
|||
|
||||
SDataRow row = SL_GET_NODE_DATA(node);
|
||||
pCheckInfo->lastKey = dataRowKey(row); // first timestamp in buffer
|
||||
uTrace("%p uid:%" PRId64", tid:%d check data in buffer from skey:%" PRId64 ", order:%d, %p", pHandle,
|
||||
tsdbTrace("%p uid:%" PRId64", tid:%d check data in buffer from skey:%" PRId64 ", order:%d, %p", pHandle,
|
||||
pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pCheckInfo->lastKey, pHandle->order, pHandle->qinfo);
|
||||
|
||||
// all data in mem are checked already.
|
||||
|
@ -1038,7 +1038,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
|
|||
cur->rows = numOfRows;
|
||||
cur->pos = pos;
|
||||
|
||||
uTrace("%p uid:%" PRIu64",tid:%d data block created, brange:%"PRIu64"-%"PRIu64" %p", pQueryHandle, cur->win.skey,
|
||||
tsdbTrace("%p uid:%" PRIu64",tid:%d data block created, brange:%"PRIu64"-%"PRIu64" %p", pQueryHandle, cur->win.skey,
|
||||
cur->win.ekey, cur->rows, pQueryHandle->qinfo);
|
||||
}
|
||||
|
||||
|
@ -1138,7 +1138,7 @@ static int32_t dataBlockOrderCompar(const void* pLeft, const void* pRight, void*
|
|||
if (pLeftBlockInfoEx->compBlock->offset == pRightBlockInfoEx->compBlock->offset &&
|
||||
pLeftBlockInfoEx->compBlock->last == pRightBlockInfoEx->compBlock->last) {
|
||||
// todo add more information
|
||||
uError("error in header file, two block with same offset:%p", pLeftBlockInfoEx->compBlock->offset);
|
||||
tsdbError("error in header file, two block with same offset:%p", pLeftBlockInfoEx->compBlock->offset);
|
||||
}
|
||||
|
||||
return pLeftBlockInfoEx->compBlock->offset > pRightBlockInfoEx->compBlock->offset ? 1 : -1;
|
||||
|
@ -1200,7 +1200,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
|
|||
numOfQualTables++;
|
||||
}
|
||||
|
||||
uTrace("%p create data blocks info struct completed, %d blocks in %d tables", pQueryHandle, cnt, numOfQualTables);
|
||||
tsdbTrace("%p create data blocks info struct completed, %d blocks in %d tables", pQueryHandle, cnt, numOfQualTables);
|
||||
|
||||
assert(cnt <= numOfBlocks && numOfQualTables <= numOfTables); // the pTableQueryInfo[j]->numOfBlocks may be 0
|
||||
sup.numOfTables = numOfQualTables;
|
||||
|
@ -1236,7 +1236,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
|
|||
* }
|
||||
*/
|
||||
|
||||
uTrace("%p %d data blocks sort completed", pQueryHandle, cnt);
|
||||
tsdbTrace("%p %d data blocks sort completed", pQueryHandle, cnt);
|
||||
cleanBlockOrderSupporter(&sup, numOfTables);
|
||||
free(pTree);
|
||||
|
||||
|
@ -1257,7 +1257,7 @@ static bool getDataBlocksInFilesImpl(STsdbQueryHandle* pQueryHandle) {
|
|||
break;
|
||||
}
|
||||
|
||||
uTrace("%p %d blocks found in file for %d table(s), fid:%d", pQueryHandle, numOfBlocks,
|
||||
tsdbTrace("%p %d blocks found in file for %d table(s), fid:%d", pQueryHandle, numOfBlocks,
|
||||
numOfTables, pQueryHandle->pFileGroup->fileId);
|
||||
|
||||
assert(numOfBlocks >= 0);
|
||||
|
@ -1583,7 +1583,7 @@ static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY
|
|||
if ((key > maxKey && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
|
||||
(key < maxKey && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
|
||||
|
||||
uTrace("%p key:%"PRIu64" beyond qrange:%"PRId64" - %"PRId64", no more data in buffer", pQueryHandle, key, pQueryHandle->window.skey,
|
||||
tsdbTrace("%p key:%"PRIu64" beyond qrange:%"PRId64" - %"PRId64", no more data in buffer", pQueryHandle, key, pQueryHandle->window.skey,
|
||||
pQueryHandle->window.ekey);
|
||||
|
||||
break;
|
||||
|
@ -1958,7 +1958,7 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC
|
|||
|
||||
size_t size = taosArrayGetSize(pTableList);
|
||||
if (size == 0) {
|
||||
uTrace("no qualified tables");
|
||||
tsdbTrace("no qualified tables");
|
||||
return pTableGroup;
|
||||
}
|
||||
|
||||
|
@ -1970,7 +1970,7 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC
|
|||
}
|
||||
|
||||
taosArrayPush(pTableGroup, &sa);
|
||||
uTrace("all %d tables belong to one group", size);
|
||||
tsdbTrace("all %d tables belong to one group", size);
|
||||
} else {
|
||||
STableGroupSupporter *pSupp = (STableGroupSupporter *) calloc(1, sizeof(STableGroupSupporter));
|
||||
pSupp->tsdbMeta = tsdbGetMeta(tsdb);
|
||||
|
@ -2069,12 +2069,12 @@ int32_t tsdbQuerySTableByTagCond(TsdbRepoT* tsdb, uint64_t uid, const char* pTag
|
|||
SColIndex* pColIndex, int32_t numOfCols) {
|
||||
STable* pTable = tsdbGetTableByUid(tsdbGetMeta(tsdb), uid);
|
||||
if (pTable == NULL) {
|
||||
uError("%p failed to get stable, uid:%" PRIu64, tsdb, uid);
|
||||
tsdbError("%p failed to get stable, uid:%" PRIu64, tsdb, uid);
|
||||
return TSDB_CODE_INVALID_TABLE_ID;
|
||||
}
|
||||
|
||||
if (pTable->type != TSDB_SUPER_TABLE) {
|
||||
uError("%p query normal tag not allowed, uid:%" PRIu64 ", tid:%d, name:%s",
|
||||
tsdbError("%p query normal tag not allowed, uid:%" PRIu64 ", tid:%d, name:%s",
|
||||
tsdb, uid, pTable->tableId.tid, pTable->name);
|
||||
|
||||
return TSDB_CODE_OPS_NOT_SUPPORT; //basically, this error is caused by invalid sql issued by client
|
||||
|
@ -2090,7 +2090,7 @@ int32_t tsdbQuerySTableByTagCond(TsdbRepoT* tsdb, uint64_t uid, const char* pTag
|
|||
pGroupInfo->numOfTables = taosArrayGetSize(res);
|
||||
pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, tsdb);
|
||||
|
||||
uTrace("no tbname condition or tagcond, all tables belongs to one group, numOfTables:%d", pGroupInfo->numOfTables);
|
||||
tsdbTrace("no tbname condition or tagcond, all tables belongs to one group, numOfTables:%d", pGroupInfo->numOfTables);
|
||||
} else {
|
||||
// todo add error
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ typedef struct SCacheDataNode {
|
|||
typedef struct STrashElem {
|
||||
struct STrashElem *prev;
|
||||
struct STrashElem *next;
|
||||
SCacheDataNode * pData;
|
||||
SCacheDataNode *pData;
|
||||
} STrashElem;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -42,6 +42,11 @@ extern "C" {
|
|||
} \
|
||||
}
|
||||
|
||||
#define tstrncpy(dst, src, size) do { \
|
||||
strncpy((dst), (src), (size)); \
|
||||
(dst)[(size) - 1] = 0; \
|
||||
} while (0);
|
||||
|
||||
#define tclose(x) taosCloseSocket(x)
|
||||
|
||||
// Pointer p drift right by b bytes
|
||||
|
@ -113,7 +118,7 @@ extern "C" {
|
|||
|
||||
int32_t strdequote(char *src);
|
||||
|
||||
void strtrim(char *src);
|
||||
size_t strtrim(char *src);
|
||||
|
||||
char *strnchr(char *haystack, char needle, int32_t len, bool skipquote);
|
||||
|
||||
|
|
|
@ -529,7 +529,7 @@ void taosHashTableResize(SHashObj *pHashObj) {
|
|||
}
|
||||
|
||||
SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal) {
|
||||
size_t totalSize = dsize + sizeof(SHashNode) + keyLen + 1; // one extra byte for null
|
||||
size_t totalSize = dsize + sizeof(SHashNode) + keyLen;
|
||||
|
||||
SHashNode *pNewNode = calloc(1, totalSize);
|
||||
if (pNewNode == NULL) {
|
||||
|
@ -544,7 +544,6 @@ SHashNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, s
|
|||
pNewNode->keyLen = keyLen;
|
||||
|
||||
pNewNode->hashVal = hashVal;
|
||||
|
||||
return pNewNode;
|
||||
}
|
||||
|
||||
|
@ -559,7 +558,6 @@ SHashNode *doUpdateHashNode(SHashNode *pNode, const void *key, size_t keyLen, co
|
|||
memcpy(pNewNode->data, pData, dsize);
|
||||
|
||||
pNewNode->key = pNewNode->data + dsize;
|
||||
|
||||
assert(memcmp(pNewNode->key, key, keyLen) == 0 && keyLen == pNewNode->keyLen);
|
||||
|
||||
memcpy(pNewNode->key, key, keyLen);
|
||||
|
|
|
@ -77,7 +77,7 @@ static FORCE_INLINE void taosFreeNode(void *data) {
|
|||
* @param lifespan total survial expiredTime from now
|
||||
* @return SCacheDataNode
|
||||
*/
|
||||
static SCacheDataNode *taosCreateHashNode(const char *key, size_t keyLen, const char *pData, size_t size,
|
||||
static SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *pData, size_t size,
|
||||
uint64_t duration) {
|
||||
size_t totalSize = size + sizeof(SCacheDataNode) + keyLen + 1;
|
||||
|
||||
|
@ -242,13 +242,14 @@ static SCacheDataNode *taosUpdateCacheImpl(SCacheObj *pCacheObj, SCacheDataNode
|
|||
|
||||
// only a node is not referenced by any other object, in-place update it
|
||||
if (T_REF_VAL_GET(pNode) == 0) {
|
||||
size_t newSize = sizeof(SCacheDataNode) + dataSize + keyLen;
|
||||
size_t newSize = sizeof(SCacheDataNode) + dataSize + keyLen + 1;
|
||||
|
||||
pNewNode = (SCacheDataNode *)realloc(pNode, newSize);
|
||||
if (pNewNode == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(pNewNode, 0, newSize);
|
||||
pNewNode->signature = (uint64_t)pNewNode;
|
||||
memcpy(pNewNode->data, pData, dataSize);
|
||||
|
||||
|
@ -267,7 +268,7 @@ static SCacheDataNode *taosUpdateCacheImpl(SCacheObj *pCacheObj, SCacheDataNode
|
|||
} else {
|
||||
taosCacheMoveToTrash(pCacheObj, pNode);
|
||||
|
||||
pNewNode = taosCreateHashNode(key, keyLen, pData, dataSize, duration);
|
||||
pNewNode = taosCreateCacheNode(key, keyLen, pData, dataSize, duration);
|
||||
if (pNewNode == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -293,7 +294,7 @@ static SCacheDataNode *taosUpdateCacheImpl(SCacheObj *pCacheObj, SCacheDataNode
|
|||
*/
|
||||
static FORCE_INLINE SCacheDataNode *taosAddToCacheImpl(SCacheObj *pCacheObj, const char *key, size_t keyLen, const void *pData,
|
||||
size_t dataSize, uint64_t duration) {
|
||||
SCacheDataNode *pNode = taosCreateHashNode(key, keyLen, pData, dataSize, duration);
|
||||
SCacheDataNode *pNode = taosCreateCacheNode(key, keyLen, pData, dataSize, duration);
|
||||
if (pNode == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -145,7 +145,7 @@ int taosReadQitem(taos_queue param, int *type, void **pitem) {
|
|||
queue->numOfItems--;
|
||||
if (queue->qset) atomic_sub_fetch_32(&queue->qset->numOfItems, 1);
|
||||
code = 1;
|
||||
uTrace("item:%p is read out from queue:%p, type:%d items:%d", *pitem, *type, queue->numOfItems);
|
||||
uTrace("item:%p is read out from queue:%p, type:%d items:%d", *pitem, queue, *type, queue->numOfItems);
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&queue->mutex);
|
||||
|
|
|
@ -60,7 +60,7 @@ int32_t strdequote(char *z) {
|
|||
return j + 1; // only one quote, do nothing
|
||||
}
|
||||
|
||||
void strtrim(char *z) {
|
||||
size_t strtrim(char *z) {
|
||||
int32_t i = 0;
|
||||
int32_t j = 0;
|
||||
|
||||
|
@ -71,7 +71,7 @@ void strtrim(char *z) {
|
|||
|
||||
if (z[j] == 0) {
|
||||
z[0] = 0;
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
delta = j;
|
||||
|
@ -89,9 +89,12 @@ void strtrim(char *z) {
|
|||
|
||||
if (stop > 0) {
|
||||
z[stop - delta] = 0;
|
||||
return (stop - delta);
|
||||
} else if (j != i) {
|
||||
z[i] = 0;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
char **strsplit(char *z, const char *delim, int32_t *num) {
|
||||
|
|
|
@ -20,10 +20,10 @@ TEST(testCase, string_dequote_test) {
|
|||
EXPECT_STRCASEEQ(t1, "abc");
|
||||
|
||||
char t21[] = " abc ";
|
||||
strtrim(t21);
|
||||
int32_t lx = strtrim(t21);
|
||||
|
||||
EXPECT_STREQ("abc", t21);
|
||||
EXPECT_EQ(3, strlen(t21));
|
||||
EXPECT_EQ(3, lx);
|
||||
}
|
||||
|
||||
TEST(testCase, string_replace_test) {
|
||||
|
|
|
@ -224,6 +224,7 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
|
|||
appH.cqH = pVnode->cq;
|
||||
appH.cqCreateFunc = cqCreate;
|
||||
appH.cqDropFunc = cqDrop;
|
||||
appH.configFunc = dnodeSendCfgTableToRecv;
|
||||
sprintf(temp, "%s/tsdb", rootDir);
|
||||
pVnode->tsdb = tsdbOpenRepo(temp, &appH);
|
||||
if (pVnode->tsdb == NULL) {
|
||||
|
@ -473,6 +474,7 @@ static void vnodeNotifyFileSynced(void *ahandle, uint64_t fversion) {
|
|||
appH.cqH = pVnode->cq;
|
||||
appH.cqCreateFunc = cqCreate;
|
||||
appH.cqDropFunc = cqDrop;
|
||||
appH.configFunc = dnodeSendCfgTableToRecv;
|
||||
pVnode->tsdb = tsdbOpenRepo(rootDir, &appH);
|
||||
}
|
||||
|
||||
|
|
|
@ -29,11 +29,12 @@
|
|||
#include "tcq.h"
|
||||
|
||||
static int32_t (*vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *, void *, SRspRet *);
|
||||
static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
||||
static int32_t vnodeProcessCreateTableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
||||
static int32_t vnodeProcessDropTableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
||||
static int32_t vnodeProcessAlterTableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
||||
static int32_t vnodeProcessDropStableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
||||
static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
||||
static int32_t vnodeProcessCreateTableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
||||
static int32_t vnodeProcessDropTableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
||||
static int32_t vnodeProcessAlterTableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
||||
static int32_t vnodeProcessDropStableMsg(SVnodeObj *pVnode, void *pMsg, SRspRet *);
|
||||
static int32_t vnodeProcessUpdateTagValMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pRet);
|
||||
|
||||
void vnodeInitWriteFp(void) {
|
||||
vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_SUBMIT] = vnodeProcessSubmitMsg;
|
||||
|
@ -41,6 +42,7 @@ void vnodeInitWriteFp(void) {
|
|||
vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MD_DROP_TABLE] = vnodeProcessDropTableMsg;
|
||||
vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MD_ALTER_TABLE] = vnodeProcessAlterTableMsg;
|
||||
vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MD_DROP_STABLE] = vnodeProcessDropStableMsg;
|
||||
vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_UPDATE_TAG_VAL] = vnodeProcessUpdateTagValMsg;
|
||||
}
|
||||
|
||||
int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) {
|
||||
|
@ -110,7 +112,6 @@ static int32_t vnodeProcessCreateTableMsg(SVnodeObj *pVnode, void *pCont, SRspRe
|
|||
int32_t code = tsdbCreateTable(pVnode->tsdb, pCfg);
|
||||
|
||||
tsdbClearTableCfg(pCfg);
|
||||
free(pCfg);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -134,7 +135,6 @@ static int32_t vnodeProcessAlterTableMsg(SVnodeObj *pVnode, void *pCont, SRspRet
|
|||
if (pCfg == NULL) return terrno;
|
||||
int32_t code = tsdbAlterTable(pVnode->tsdb, pCfg);
|
||||
tsdbClearTableCfg(pCfg);
|
||||
free(pCfg);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -156,6 +156,10 @@ static int32_t vnodeProcessDropStableMsg(SVnodeObj *pVnode, void *pCont, SRspRet
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t vnodeProcessUpdateTagValMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pRet) {
|
||||
return tsdbUpdateTagValue(pVnode->tsdb, (SUpdateTableTagValMsg *)pCont);
|
||||
}
|
||||
|
||||
int vnodeWriteToQueue(void *param, void *data, int type) {
|
||||
SVnodeObj *pVnode = param;
|
||||
SWalHead *pHead = data;
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
//#define _DEFAULT_SOURCE
|
||||
#include "os.h"
|
||||
#include "tutil.h"
|
||||
#include "tglobal.h"
|
||||
#include "tlog.h"
|
||||
#include "twal.h"
|
||||
|
@ -45,7 +46,7 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
for (int i=1; i<argc; ++i) {
|
||||
if (strcmp(argv[i], "-p")==0 && i < argc-1) {
|
||||
strcpy(path, argv[++i]);
|
||||
tstrncpy(path, argv[++i], sizeof(path));
|
||||
} else if (strcmp(argv[i], "-m")==0 && i < argc-1) {
|
||||
max = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-l")==0 && i < argc-1) {
|
||||
|
|
|
@ -26,13 +26,12 @@
|
|||
void taosMsleep(int mseconds);
|
||||
|
||||
static int32_t doQuery(TAOS* taos, const char* sql) {
|
||||
int32_t code = taos_query(taos, sql);
|
||||
if (code != 0) {
|
||||
TAOS_RES* res = taos_query(taos, sql);
|
||||
if (taos_errno(res) != 0) {
|
||||
printf("failed to execute query, reason:%s\n", taos_errstr(taos));
|
||||
return -1;
|
||||
}
|
||||
|
||||
TAOS_RES* res = taos_use_result(taos);
|
||||
TAOS_ROW row = NULL;
|
||||
char buf[512] = {0};
|
||||
|
||||
|
@ -46,7 +45,6 @@ static int32_t doQuery(TAOS* taos, const char* sql) {
|
|||
}
|
||||
|
||||
taos_free_result(res);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -81,6 +79,7 @@ static __attribute__((unused)) void multiThreadTest(int32_t numOfThreads, void*
|
|||
pthread_join(threadId[i], NULL);
|
||||
}
|
||||
|
||||
free(threadId);
|
||||
pthread_attr_destroy(&thattr);
|
||||
}
|
||||
|
||||
|
@ -95,7 +94,7 @@ int main(int argc, char *argv[]) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
taos_options(TSDB_OPTION_CONFIGDIR, "/home/lisa/Documents/workspace/TDinternal/community/sim/tsim/cfg");
|
||||
taos_options(TSDB_OPTION_CONFIGDIR, "~/sec/cfg");
|
||||
|
||||
// init TAOS
|
||||
taos_init();
|
||||
|
@ -108,25 +107,13 @@ int main(int argc, char *argv[]) {
|
|||
printf("success to connect to server\n");
|
||||
|
||||
// multiThreadTest(1, taos);
|
||||
doQuery(taos, "select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from lm2_db0.lm2_stb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(value, -1, -2) group by t1 limit 2 offset 10;");
|
||||
doQuery(taos, "use test");
|
||||
doQuery(taos, "alter table tm99 set tag a=99");
|
||||
// for(int32_t i = 0; i < 100000; ++i) {
|
||||
// doQuery(taos, "insert into t1 values(now, 2)");
|
||||
// }
|
||||
// doQuery(taos, "create table t1(ts timestamp, k binary(12), f nchar(2))");
|
||||
|
||||
// doQuery(taos, "insert into tm0 values('2020-1-1 1:1:1', 'abc')");
|
||||
// doQuery(taos, "create table if not exists tm0 (ts timestamp, k int);");
|
||||
// doQuery(taos, "insert into tm0 values('2020-1-1 1:1:1', 1);");
|
||||
// doQuery(taos, "insert into tm0 values('2020-1-1 1:1:2', 2);");
|
||||
// doQuery(taos, "insert into tm0 values('2020-1-1 1:1:3', 3);");
|
||||
// doQuery(taos, "insert into tm0 values('2020-1-1 1:1:4', 4);");
|
||||
// doQuery(taos, "insert into tm0 values('2020-1-1 1:1:5', 5);");
|
||||
// doQuery(taos, "insert into tm0 values('2020-1-1 1:1:6', 6);");
|
||||
// doQuery(taos, "insert into tm0 values('2020-1-1 1:1:7', 7);");
|
||||
// doQuery(taos, "insert into tm0 values('2020-1-1 1:1:8', 8);");
|
||||
// doQuery(taos, "insert into tm0 values('2020-1-1 1:1:9', 9);");
|
||||
// doQuery(taos, "select sum(k),count(*) from m1 group by a");
|
||||
|
||||
taos_close(taos);
|
||||
return 0;
|
||||
|
||||
|
@ -172,10 +159,6 @@ int main(int argc, char *argv[]) {
|
|||
exit(1);
|
||||
}
|
||||
|
||||
|
||||
result = taos_use_result(taos);
|
||||
|
||||
|
||||
if (result == NULL) {
|
||||
printf("failed to get result, reason:%s\n", taos_errstr(taos));
|
||||
exit(1);
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "taos.h"
|
||||
|
||||
|
||||
|
@ -14,6 +13,7 @@ int main(int argc, char *argv[])
|
|||
{
|
||||
TAOS *taos;
|
||||
TAOS_RES *result;
|
||||
int code;
|
||||
TAOS_STMT *stmt;
|
||||
|
||||
// connect to server
|
||||
|
@ -31,21 +31,31 @@ int main(int argc, char *argv[])
|
|||
exit(1);
|
||||
}
|
||||
|
||||
taos_query(taos, "drop database demo");
|
||||
if (taos_query(taos, "create database demo") != 0) {
|
||||
printf("failed to create database, reason:%s\n", taos_errstr(taos));
|
||||
result = taos_query(taos, "drop database demo");
|
||||
taos_free_result(result);
|
||||
|
||||
result = taos_query(taos, "create database demo");
|
||||
code = taos_errno(result);
|
||||
if (code != 0) {
|
||||
printf("failed to create database, reason:%s\n", taos_errstr(result));
|
||||
taos_free_result(result);
|
||||
exit(1);
|
||||
}
|
||||
taos_free_result(result);
|
||||
|
||||
taos_query(taos, "use demo");
|
||||
|
||||
result = taos_query(taos, "use demo");
|
||||
taos_free_result(result);
|
||||
|
||||
// create table
|
||||
const char* sql = "create table m1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), blob nchar(10))";
|
||||
if (taos_query(taos, sql) != 0) {
|
||||
printf("failed to create table, reason:%s\n", taos_errstr(taos));
|
||||
result = taos_query(taos, sql);
|
||||
code = taos_errno(result);
|
||||
if (code != 0) {
|
||||
printf("failed to create table, reason:%s\n", taos_errstr(result));
|
||||
taos_free_result(result);
|
||||
exit(1);
|
||||
}
|
||||
taos_free_result(result);
|
||||
|
||||
// sleep for one second to make sure table is created on data node
|
||||
// taosMsleep(1000);
|
||||
|
@ -130,7 +140,7 @@ int main(int argc, char *argv[])
|
|||
int is_null = 1;
|
||||
|
||||
sql = "insert into m1 values(?,?,?,?,?,?,?,?,?,?)";
|
||||
int code = taos_stmt_prepare(stmt, sql, 0);
|
||||
code = taos_stmt_prepare(stmt, sql, 0);
|
||||
if (code != 0){
|
||||
printf("failed to execute taos_stmt_prepare. code:0x%x\n", code);
|
||||
}
|
||||
|
@ -159,7 +169,6 @@ int main(int argc, char *argv[])
|
|||
exit(1);
|
||||
}
|
||||
taos_stmt_close(stmt);
|
||||
printf("==== success inset data ====.\n");
|
||||
|
||||
// query the records
|
||||
stmt = taos_stmt_init(taos);
|
||||
|
|
|
@ -609,7 +609,8 @@ class StateDbOnly(AnyState):
|
|||
]
|
||||
|
||||
def verifyTasksToState(self, tasks, newState):
|
||||
self.assertAtMostOneSuccess(tasks, DropDbTask) # not true in massively parralel cases
|
||||
if ( not self.hasTask(tasks, CreateDbTask) ):
|
||||
self.assertAtMostOneSuccess(tasks, DropDbTask) # only if we don't create any more
|
||||
self.assertIfExistThenSuccess(tasks, DropDbTask)
|
||||
# self.assertAtMostOneSuccess(tasks, CreateFixedTableTask) # not true in massively parrallel cases
|
||||
# Nothing to be said about adding data task
|
||||
|
@ -619,7 +620,8 @@ class StateDbOnly(AnyState):
|
|||
# self._state = self.STATE_EMPTY
|
||||
elif ( self.hasSuccess(tasks, CreateFixedSuperTableTask) ): # did not drop db, create table success
|
||||
# self.assertHasTask(tasks, CreateFixedTableTask) # tried to create table
|
||||
self.assertAtMostOneSuccess(tasks, CreateFixedSuperTableTask) # at most 1 attempt is successful
|
||||
if ( not self.hasTask(tasks, DropFixedSuperTableTask) ):
|
||||
self.assertAtMostOneSuccess(tasks, CreateFixedSuperTableTask) # at most 1 attempt is successful, if we don't drop anything
|
||||
self.assertNoTask(tasks, DropDbTask) # should have have tried
|
||||
# if ( not self.hasSuccess(tasks, AddFixedDataTask) ): # just created table, no data yet
|
||||
# # can't say there's add-data attempts, since they may all fail
|
||||
|
@ -674,7 +676,7 @@ class StateHasData(AnyState):
|
|||
if ( not self.hasTask(tasks, CreateDbTask)): # without a create_db task
|
||||
self.assertNoTask(tasks, DropDbTask) # we must have drop_db task
|
||||
self.hasSuccess(tasks, DropFixedSuperTableTask)
|
||||
self.assertAtMostOneSuccess(tasks, DropFixedSuperTableTask) # TODO: dicy
|
||||
# self.assertAtMostOneSuccess(tasks, DropFixedSuperTableTask) # TODO: dicy
|
||||
elif ( newState.equals(AnyState.STATE_TABLE_ONLY) ): # data deleted
|
||||
self.assertNoTask(tasks, DropDbTask)
|
||||
self.assertNoTask(tasks, DropFixedSuperTableTask)
|
||||
|
@ -689,9 +691,9 @@ class StateHasData(AnyState):
|
|||
# State of the database as we believe it to be
|
||||
class DbState():
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, resetDb = True):
|
||||
self.tableNumQueue = LinearQueue()
|
||||
self._lastTick = datetime.datetime(2019, 1, 1) # initial date time tick
|
||||
self._lastTick = self.setupLastTick() # datetime.datetime(2019, 1, 1) # initial date time tick
|
||||
self._lastInt = 0 # next one is initial integer
|
||||
self._lock = threading.RLock()
|
||||
|
||||
|
@ -712,12 +714,32 @@ class DbState():
|
|||
except:
|
||||
print("[=] Unexpected exception")
|
||||
raise
|
||||
self._dbConn.resetDb() # drop and recreate DB
|
||||
self._state = StateEmpty() # initial state, the result of above
|
||||
|
||||
if resetDb :
|
||||
self._dbConn.resetDb() # drop and recreate DB
|
||||
self._state = self._findCurrentState()
|
||||
|
||||
def getDbConn(self):
|
||||
return self._dbConn
|
||||
|
||||
def getState(self):
|
||||
return self._state
|
||||
|
||||
# We aim to create a starting time tick, such that, whenever we run our test here once
|
||||
# We should be able to safely create 100,000 records, which will not have any repeated time stamp
|
||||
# when we re-run the test in 3 minutes (180 seconds), basically we should expand time duration
|
||||
# by a factor of 500.
|
||||
# TODO: what if it goes beyond 10 years into the future
|
||||
def setupLastTick(self):
|
||||
t1 = datetime.datetime(2020, 5, 30)
|
||||
t2 = datetime.datetime.now()
|
||||
elSec = t2.timestamp() - t1.timestamp()
|
||||
# print("elSec = {}".format(elSec))
|
||||
t3 = datetime.datetime(2012, 1, 1) # default "keep" is 10 years
|
||||
t4 = datetime.datetime.fromtimestamp( t3.timestamp() + elSec * 500) # see explanation above
|
||||
logger.info("Setting up TICKS to start from: {}".format(t4))
|
||||
return t4
|
||||
|
||||
def pickAndAllocateTable(self): # pick any table, and "use" it
|
||||
return self.tableNumQueue.pickAndAllocate()
|
||||
|
||||
|
@ -743,7 +765,7 @@ class DbState():
|
|||
return self._lastInt
|
||||
|
||||
def getNextBinary(self):
|
||||
return "Los_Angeles_{}".format(self.getNextInt())
|
||||
return "Beijing_Shanghai_Los_Angeles_New_York_San_Francisco_Chicago_Beijing_Shanghai_Los_Angeles_New_York_San_Francisco_Chicago_{}".format(self.getNextInt())
|
||||
|
||||
def getNextFloat(self):
|
||||
return 0.9 + self.getNextInt()
|
||||
|
@ -1089,7 +1111,7 @@ class CreateFixedSuperTableTask(StateTransitionTask):
|
|||
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
tblName = self._dbState.getFixedSuperTableName()
|
||||
wt.execSql("create table db.{} (ts timestamp, speed int) tags (b binary(20), f float) ".format(tblName))
|
||||
wt.execSql("create table db.{} (ts timestamp, speed int) tags (b binary(200), f float) ".format(tblName))
|
||||
# No need to create the regular tables, INSERT will do that automatically
|
||||
|
||||
|
||||
|
@ -1148,12 +1170,13 @@ class AddFixedDataTask(StateTransitionTask):
|
|||
ds = self._dbState
|
||||
wt.execSql("use db") # TODO: seems to be an INSERT bug to require this
|
||||
for i in range(10): # 0 to 9
|
||||
sql = "insert into db.reg_table_{} using {} tags ('{}', {}) values ('{}', {});".format(
|
||||
i,
|
||||
ds.getFixedSuperTableName(),
|
||||
ds.getNextBinary(), ds.getNextFloat(),
|
||||
ds.getNextTick(), ds.getNextInt())
|
||||
wt.execSql(sql)
|
||||
for j in range(10) :
|
||||
sql = "insert into db.reg_table_{} using {} tags ('{}', {}) values ('{}', {});".format(
|
||||
i,
|
||||
ds.getFixedSuperTableName(),
|
||||
ds.getNextBinary(), ds.getNextFloat(),
|
||||
ds.getNextTick(), ds.getNextInt())
|
||||
wt.execSql(sql)
|
||||
|
||||
|
||||
#---------- Non State-Transition Related Tasks ----------#
|
||||
|
@ -1301,7 +1324,9 @@ def main():
|
|||
ch = logging.StreamHandler()
|
||||
logger.addHandler(ch)
|
||||
|
||||
dbState = DbState()
|
||||
# resetDb = False # DEBUG only
|
||||
# dbState = DbState(resetDb) # DBEUG only!
|
||||
dbState = DbState() # Regular function
|
||||
Dice.seed(0) # initial seeding of dice
|
||||
tc = ThreadCoordinator(
|
||||
ThreadPool(dbState, gConfig.num_threads, gConfig.max_steps, 0),
|
||||
|
@ -1309,6 +1334,43 @@ def main():
|
|||
dbState
|
||||
)
|
||||
|
||||
# # Hack to exercise reading from disk, imcreasing coverage. TODO: fix
|
||||
# dbc = dbState.getDbConn()
|
||||
# sTbName = dbState.getFixedSuperTableName()
|
||||
# dbc.execute("create database if not exists db")
|
||||
# if not dbState.getState().equals(StateEmpty()):
|
||||
# dbc.execute("use db")
|
||||
|
||||
# rTables = None
|
||||
# try: # the super table may not exist
|
||||
# sql = "select TBNAME from db.{}".format(sTbName)
|
||||
# logger.info("Finding out tables in super table: {}".format(sql))
|
||||
# dbc.query(sql) # TODO: analyze result set later
|
||||
# logger.info("Fetching result")
|
||||
# rTables = dbc.getQueryResult()
|
||||
# logger.info("Result: {}".format(rTables))
|
||||
# except taos.error.ProgrammingError as err:
|
||||
# logger.info("Initial Super table OPS error: {}".format(err))
|
||||
|
||||
# # sys.exit()
|
||||
# if ( not rTables == None):
|
||||
# # print("rTables[0] = {}, type = {}".format(rTables[0], type(rTables[0])))
|
||||
# try:
|
||||
# for rTbName in rTables : # regular tables
|
||||
# ds = dbState
|
||||
# logger.info("Inserting into table: {}".format(rTbName[0]))
|
||||
# sql = "insert into db.{} values ('{}', {});".format(
|
||||
# rTbName[0],
|
||||
# ds.getNextTick(), ds.getNextInt())
|
||||
# dbc.execute(sql)
|
||||
# for rTbName in rTables : # regular tables
|
||||
# dbc.query("select * from db.{}".format(rTbName[0])) # TODO: check success failure
|
||||
# logger.info("Initial READING operation is successful")
|
||||
# except taos.error.ProgrammingError as err:
|
||||
# logger.info("Initial WRITE/READ error: {}".format(err))
|
||||
|
||||
|
||||
|
||||
# Sandbox testing code
|
||||
# dbc = dbState.getDbConn()
|
||||
# while True:
|
||||
|
|
|
@ -1,831 +0,0 @@
|
|||
#!/usr/bin/python3.7
|
||||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import annotations # For type hinting before definition, ref: https://stackoverflow.com/questions/33533148/how-do-i-specify-that-the-return-type-of-a-method-is-the-same-as-the-class-itsel
|
||||
|
||||
import sys
|
||||
# Require Python 3
|
||||
if sys.version_info[0] < 3:
|
||||
raise Exception("Must be using Python 3")
|
||||
|
||||
import getopt
|
||||
import argparse
|
||||
import copy
|
||||
|
||||
import threading
|
||||
import random
|
||||
import logging
|
||||
import datetime
|
||||
import textwrap
|
||||
|
||||
from typing import List
|
||||
|
||||
from util.log import *
|
||||
from util.dnodes import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
|
||||
import crash_gen
|
||||
import taos
|
||||
|
||||
# Global variables, tried to keep a small number.
|
||||
gConfig = None # Command-line/Environment Configurations, will set a bit later
|
||||
logger = None
|
||||
|
||||
def runThread(wt: WorkerThread):
|
||||
wt.run()
|
||||
|
||||
class WorkerThread:
|
||||
def __init__(self, pool: ThreadPool, tid,
|
||||
tc: ThreadCoordinator,
|
||||
# te: TaskExecutor,
|
||||
): # note: main thread context!
|
||||
# self._curStep = -1
|
||||
self._pool = pool
|
||||
self._tid = tid
|
||||
self._tc = tc
|
||||
# self.threadIdent = threading.get_ident()
|
||||
self._thread = threading.Thread(target=runThread, args=(self,))
|
||||
self._stepGate = threading.Event()
|
||||
|
||||
# Let us have a DB connection of our own
|
||||
if ( gConfig.per_thread_db_connection ): # type: ignore
|
||||
self._dbConn = DbConn()
|
||||
|
||||
def logDebug(self, msg):
|
||||
logger.info(" t[{}] {}".format(self._tid, msg))
|
||||
|
||||
def logInfo(self, msg):
|
||||
logger.info(" t[{}] {}".format(self._tid, msg))
|
||||
|
||||
|
||||
def getTaskExecutor(self):
|
||||
return self._tc.getTaskExecutor()
|
||||
|
||||
def start(self):
|
||||
self._thread.start() # AFTER the thread is recorded
|
||||
|
||||
def run(self):
|
||||
# initialization after thread starts, in the thread context
|
||||
# self.isSleeping = False
|
||||
logger.info("Starting to run thread: {}".format(self._tid))
|
||||
|
||||
if ( gConfig.per_thread_db_connection ): # type: ignore
|
||||
self._dbConn.open()
|
||||
|
||||
self._doTaskLoop()
|
||||
|
||||
# clean up
|
||||
if ( gConfig.per_thread_db_connection ): # type: ignore
|
||||
self._dbConn.close()
|
||||
|
||||
def _doTaskLoop(self) :
|
||||
# while self._curStep < self._pool.maxSteps:
|
||||
# tc = ThreadCoordinator(None)
|
||||
while True:
|
||||
tc = self._tc # Thread Coordinator, the overall master
|
||||
tc.crossStepBarrier() # shared barrier first, INCLUDING the last one
|
||||
logger.debug("Thread task loop exited barrier...")
|
||||
self.crossStepGate() # then per-thread gate, after being tapped
|
||||
logger.debug("Thread task loop exited step gate...")
|
||||
if not self._tc.isRunning():
|
||||
break
|
||||
|
||||
task = tc.fetchTask()
|
||||
task.execute(self)
|
||||
tc.saveExecutedTask(task)
|
||||
|
||||
def verifyThreadSelf(self): # ensure we are called by this own thread
|
||||
if ( threading.get_ident() != self._thread.ident ):
|
||||
raise RuntimeError("Unexpectly called from other threads")
|
||||
|
||||
def verifyThreadMain(self): # ensure we are called by the main thread
|
||||
if ( threading.get_ident() != threading.main_thread().ident ):
|
||||
raise RuntimeError("Unexpectly called from other threads")
|
||||
|
||||
def verifyThreadAlive(self):
|
||||
if ( not self._thread.is_alive() ):
|
||||
raise RuntimeError("Unexpected dead thread")
|
||||
|
||||
# A gate is different from a barrier in that a thread needs to be "tapped"
|
||||
def crossStepGate(self):
|
||||
self.verifyThreadAlive()
|
||||
self.verifyThreadSelf() # only allowed by ourselves
|
||||
|
||||
# Wait again at the "gate", waiting to be "tapped"
|
||||
# logger.debug("Worker thread {} about to cross the step gate".format(self._tid))
|
||||
self._stepGate.wait()
|
||||
self._stepGate.clear()
|
||||
|
||||
# self._curStep += 1 # off to a new step...
|
||||
|
||||
def tapStepGate(self): # give it a tap, release the thread waiting there
|
||||
self.verifyThreadAlive()
|
||||
self.verifyThreadMain() # only allowed for main thread
|
||||
|
||||
logger.debug("Tapping worker thread {}".format(self._tid))
|
||||
self._stepGate.set() # wake up!
|
||||
time.sleep(0) # let the released thread run a bit
|
||||
|
||||
def execSql(self, sql): # not "execute", since we are out side the DB context
|
||||
if ( gConfig.per_thread_db_connection ):
|
||||
return self._dbConn.execute(sql)
|
||||
else:
|
||||
return self._tc.getDbState().getDbConn().execute(sql)
|
||||
|
||||
def querySql(self, sql): # not "execute", since we are out side the DB context
|
||||
if ( gConfig.per_thread_db_connection ):
|
||||
return self._dbConn.query(sql)
|
||||
else:
|
||||
return self._tc.getDbState().getDbConn().query(sql)
|
||||
|
||||
class ThreadCoordinator:
|
||||
def __init__(self, pool, wd: WorkDispatcher, dbState):
|
||||
self._curStep = -1 # first step is 0
|
||||
self._pool = pool
|
||||
self._wd = wd
|
||||
self._te = None # prepare for every new step
|
||||
self._dbState = dbState
|
||||
self._executedTasks: List[Task] = [] # in a given step
|
||||
self._lock = threading.RLock() # sync access for a few things
|
||||
|
||||
self._stepBarrier = threading.Barrier(self._pool.numThreads + 1) # one barrier for all threads
|
||||
|
||||
def getTaskExecutor(self):
|
||||
return self._te
|
||||
|
||||
def getDbState(self) -> DbState :
|
||||
return self._dbState
|
||||
|
||||
def crossStepBarrier(self):
|
||||
self._stepBarrier.wait()
|
||||
|
||||
def run(self):
|
||||
self._pool.createAndStartThreads(self)
|
||||
|
||||
# Coordinate all threads step by step
|
||||
self._curStep = -1 # not started yet
|
||||
maxSteps = gConfig.max_steps # type: ignore
|
||||
while(self._curStep < maxSteps):
|
||||
print(".", end="", flush=True)
|
||||
logger.debug("Main thread going to sleep")
|
||||
|
||||
# Now ready to enter a step
|
||||
self.crossStepBarrier() # let other threads go past the pool barrier, but wait at the thread gate
|
||||
self._stepBarrier.reset() # Other worker threads should now be at the "gate"
|
||||
|
||||
# At this point, all threads should be pass the overall "barrier" and before the per-thread "gate"
|
||||
self._dbState.transition(self._executedTasks) # at end of step, transiton the DB state
|
||||
self.resetExecutedTasks() # clear the tasks after we are done
|
||||
|
||||
# Get ready for next step
|
||||
logger.info("<-- Step {} finished".format(self._curStep))
|
||||
self._curStep += 1 # we are about to get into next step. TODO: race condition here!
|
||||
logger.debug("\r\n--> Step {} starts with main thread waking up".format(self._curStep)) # Now not all threads had time to go to sleep
|
||||
|
||||
# A new TE for the new step
|
||||
self._te = TaskExecutor(self._curStep)
|
||||
|
||||
logger.debug("Main thread waking up at step {}, tapping worker threads".format(self._curStep)) # Now not all threads had time to go to sleep
|
||||
self.tapAllThreads()
|
||||
|
||||
logger.debug("Main thread ready to finish up...")
|
||||
self.crossStepBarrier() # Cross it one last time, after all threads finish
|
||||
self._stepBarrier.reset()
|
||||
logger.debug("Main thread in exclusive zone...")
|
||||
self._te = None # No more executor, time to end
|
||||
logger.debug("Main thread tapping all threads one last time...")
|
||||
self.tapAllThreads() # Let the threads run one last time
|
||||
logger.debug("Main thread joining all threads")
|
||||
self._pool.joinAll() # Get all threads to finish
|
||||
|
||||
logger.info("All threads finished")
|
||||
print("\r\nFinished")
|
||||
|
||||
def tapAllThreads(self): # in a deterministic manner
|
||||
wakeSeq = []
|
||||
for i in range(self._pool.numThreads): # generate a random sequence
|
||||
if Dice.throw(2) == 1 :
|
||||
wakeSeq.append(i)
|
||||
else:
|
||||
wakeSeq.insert(0, i)
|
||||
logger.info("Waking up threads: {}".format(str(wakeSeq)))
|
||||
# TODO: set dice seed to a deterministic value
|
||||
for i in wakeSeq:
|
||||
self._pool.threadList[i].tapStepGate() # TODO: maybe a bit too deep?!
|
||||
time.sleep(0) # yield
|
||||
|
||||
def isRunning(self):
|
||||
return self._te != None
|
||||
|
||||
def fetchTask(self) -> Task :
|
||||
if ( not self.isRunning() ): # no task
|
||||
raise RuntimeError("Cannot fetch task when not running")
|
||||
# return self._wd.pickTask()
|
||||
# Alternatively, let's ask the DbState for the appropriate task
|
||||
dbState = self.getDbState()
|
||||
tasks = dbState.getTasksAtState()
|
||||
i = Dice.throw(len(tasks))
|
||||
# return copy.copy(tasks[i]) # Needs a fresh copy, to save execution results, etc.
|
||||
return tasks[i].clone()
|
||||
|
||||
def resetExecutedTasks(self):
|
||||
self._executedTasks = [] # should be under single thread
|
||||
|
||||
def saveExecutedTask(self, task):
|
||||
with self._lock:
|
||||
self._executedTasks.append(task)
|
||||
|
||||
# We define a class to run a number of threads in locking steps.
|
||||
class ThreadPool:
|
||||
def __init__(self, dbState, numThreads, maxSteps, funcSequencer):
|
||||
self.numThreads = numThreads
|
||||
self.maxSteps = maxSteps
|
||||
self.funcSequencer = funcSequencer
|
||||
# Internal class variables
|
||||
self.dispatcher = WorkDispatcher(dbState)
|
||||
self.curStep = 0
|
||||
self.threadList = []
|
||||
# self.stepGate = threading.Condition() # Gate to hold/sync all threads
|
||||
# self.numWaitingThreads = 0
|
||||
|
||||
# starting to run all the threads, in locking steps
|
||||
def createAndStartThreads(self, tc: ThreadCoordinator):
|
||||
for tid in range(0, self.numThreads): # Create the threads
|
||||
workerThread = WorkerThread(self, tid, tc)
|
||||
self.threadList.append(workerThread)
|
||||
workerThread.start() # start, but should block immediately before step 0
|
||||
|
||||
def joinAll(self):
|
||||
for workerThread in self.threadList:
|
||||
logger.debug("Joining thread...")
|
||||
workerThread._thread.join()
|
||||
|
||||
# A queue of continguous POSITIVE integers
|
||||
class LinearQueue():
|
||||
def __init__(self):
|
||||
self.firstIndex = 1 # 1st ever element
|
||||
self.lastIndex = 0
|
||||
self._lock = threading.RLock() # our functions may call each other
|
||||
self.inUse = set() # the indexes that are in use right now
|
||||
|
||||
def toText(self):
|
||||
return "[{}..{}], in use: {}".format(self.firstIndex, self.lastIndex, self.inUse)
|
||||
|
||||
# Push (add new element, largest) to the tail, and mark it in use
|
||||
def push(self):
|
||||
with self._lock:
|
||||
# if ( self.isEmpty() ):
|
||||
# self.lastIndex = self.firstIndex
|
||||
# return self.firstIndex
|
||||
# Otherwise we have something
|
||||
self.lastIndex += 1
|
||||
self.allocate(self.lastIndex)
|
||||
# self.inUse.add(self.lastIndex) # mark it in use immediately
|
||||
return self.lastIndex
|
||||
|
||||
def pop(self):
|
||||
with self._lock:
|
||||
if ( self.isEmpty() ):
|
||||
# raise RuntimeError("Cannot pop an empty queue")
|
||||
return False # TODO: None?
|
||||
|
||||
index = self.firstIndex
|
||||
if ( index in self.inUse ):
|
||||
return False
|
||||
|
||||
self.firstIndex += 1
|
||||
return index
|
||||
|
||||
def isEmpty(self):
|
||||
return self.firstIndex > self.lastIndex
|
||||
|
||||
def popIfNotEmpty(self):
|
||||
with self._lock:
|
||||
if (self.isEmpty()):
|
||||
return 0
|
||||
return self.pop()
|
||||
|
||||
def allocate(self, i):
|
||||
with self._lock:
|
||||
# logger.debug("LQ allocating item {}".format(i))
|
||||
if ( i in self.inUse ):
|
||||
raise RuntimeError("Cannot re-use same index in queue: {}".format(i))
|
||||
self.inUse.add(i)
|
||||
|
||||
def release(self, i):
|
||||
with self._lock:
|
||||
# logger.debug("LQ releasing item {}".format(i))
|
||||
self.inUse.remove(i) # KeyError possible, TODO: why?
|
||||
|
||||
def size(self):
|
||||
return self.lastIndex + 1 - self.firstIndex
|
||||
|
||||
def pickAndAllocate(self):
|
||||
if ( self.isEmpty() ):
|
||||
return None
|
||||
with self._lock:
|
||||
cnt = 0 # counting the interations
|
||||
while True:
|
||||
cnt += 1
|
||||
if ( cnt > self.size()*10 ): # 10x iteration already
|
||||
# raise RuntimeError("Failed to allocate LinearQueue element")
|
||||
return None
|
||||
ret = Dice.throwRange(self.firstIndex, self.lastIndex+1)
|
||||
if ( not ret in self.inUse ):
|
||||
self.allocate(ret)
|
||||
return ret
|
||||
|
||||
class DbConn:
|
||||
def __init__(self):
|
||||
self._conn = None
|
||||
self._cursor = None
|
||||
self.isOpen = False
|
||||
|
||||
def open(self): # Open connection
|
||||
if ( self.isOpen ):
|
||||
raise RuntimeError("Cannot re-open an existing DB connection")
|
||||
|
||||
cfgPath = "../../build/test/cfg"
|
||||
self._conn = taos.connect(host="127.0.0.1", config=cfgPath) # TODO: make configurable
|
||||
self._cursor = self._conn.cursor()
|
||||
|
||||
# Get the connection/cursor ready
|
||||
self._cursor.execute('reset query cache')
|
||||
# self._cursor.execute('use db')
|
||||
|
||||
# Open connection
|
||||
self._tdSql = TDSql()
|
||||
self._tdSql.init(self._cursor)
|
||||
self.isOpen = True
|
||||
|
||||
def resetDb(self): # reset the whole database, etc.
|
||||
if ( not self.isOpen ):
|
||||
raise RuntimeError("Cannot reset database until connection is open")
|
||||
# self._tdSql.prepare() # Recreate database, etc.
|
||||
|
||||
self._cursor.execute('drop database if exists db')
|
||||
logger.debug("Resetting DB, dropped database")
|
||||
# self._cursor.execute('create database db')
|
||||
# self._cursor.execute('use db')
|
||||
|
||||
# tdSql.execute('show databases')
|
||||
|
||||
def close(self):
|
||||
if ( not self.isOpen ):
|
||||
raise RuntimeError("Cannot clean up database until connection is open")
|
||||
self._tdSql.close()
|
||||
self.isOpen = False
|
||||
|
||||
def execute(self, sql):
|
||||
if ( not self.isOpen ):
|
||||
raise RuntimeError("Cannot execute database commands until connection is open")
|
||||
return self._tdSql.execute(sql)
|
||||
|
||||
def query(self, sql) -> int : # return number of rows retrieved
|
||||
if ( not self.isOpen ):
|
||||
raise RuntimeError("Cannot query database until connection is open")
|
||||
return self._tdSql.query(sql)
|
||||
|
||||
|
||||
# State of the database as we believe it to be
|
||||
class DbState():
|
||||
STATE_INVALID = -1
|
||||
STATE_EMPTY = 1 # nothing there, no even a DB
|
||||
STATE_DB_ONLY = 2 # we have a DB, but nothing else
|
||||
STATE_TABLE_ONLY = 3 # we have a table, but totally empty
|
||||
STATE_HAS_DATA = 4 # we have some data in the table
|
||||
|
||||
def __init__(self):
|
||||
self.tableNumQueue = LinearQueue()
|
||||
self._lastTick = datetime.datetime(2019, 1, 1) # initial date time tick
|
||||
self._lastInt = 0 # next one is initial integer
|
||||
self._lock = threading.RLock()
|
||||
self._state = self.STATE_INVALID
|
||||
|
||||
# self.openDbServerConnection()
|
||||
self._dbConn = DbConn()
|
||||
try:
|
||||
self._dbConn.open() # may throw taos.error.ProgrammingError: disconnected
|
||||
except taos.error.ProgrammingError as err:
|
||||
# print("Error type: {}, msg: {}, value: {}".format(type(err), err.msg, err))
|
||||
if ( err.msg == 'disconnected' ): # cannot open DB connection
|
||||
print("Cannot establish DB connection, please re-run script without parameter, and follow the instructions.")
|
||||
sys.exit()
|
||||
else:
|
||||
raise
|
||||
except:
|
||||
print("[=]Unexpected exception")
|
||||
raise
|
||||
self._dbConn.resetDb() # drop and recreate DB
|
||||
self._state = self.STATE_EMPTY # initial state, the result of above
|
||||
|
||||
def getDbConn(self):
|
||||
return self._dbConn
|
||||
|
||||
def pickAndAllocateTable(self): # pick any table, and "use" it
|
||||
return self.tableNumQueue.pickAndAllocate()
|
||||
|
||||
def addTable(self):
|
||||
with self._lock:
|
||||
tIndex = self.tableNumQueue.push()
|
||||
return tIndex
|
||||
|
||||
def getFixedTableName(self):
|
||||
return "fixed_table"
|
||||
|
||||
def releaseTable(self, i): # return the table back, so others can use it
|
||||
self.tableNumQueue.release(i)
|
||||
|
||||
def getNextTick(self):
|
||||
with self._lock: # prevent duplicate tick
|
||||
self._lastTick += datetime.timedelta(0, 1) # add one second to it
|
||||
return self._lastTick
|
||||
|
||||
def getNextInt(self):
|
||||
with self._lock:
|
||||
self._lastInt += 1
|
||||
return self._lastInt
|
||||
|
||||
def getTableNameToDelete(self):
|
||||
tblNum = self.tableNumQueue.pop() # TODO: race condition!
|
||||
if ( not tblNum ): # maybe false
|
||||
return False
|
||||
|
||||
return "table_{}".format(tblNum)
|
||||
|
||||
def execSql(self, sql): # using the main DB connection
|
||||
return self._dbConn.execute(sql)
|
||||
|
||||
def cleanUp(self):
|
||||
self._dbConn.close()
|
||||
|
||||
def getTasksAtState(self):
|
||||
tasks = []
|
||||
tasks.append(ReadFixedDataTask(self)) # always
|
||||
if ( self._state == self.STATE_EMPTY ):
|
||||
tasks.append(CreateDbTask(self))
|
||||
tasks.append(CreateFixedTableTask(self))
|
||||
elif ( self._state == self.STATE_DB_ONLY ):
|
||||
tasks.append(DropDbTask(self))
|
||||
tasks.append(CreateFixedTableTask(self))
|
||||
tasks.append(AddFixedDataTask(self))
|
||||
elif ( self._state == self.STATE_TABLE_ONLY ):
|
||||
tasks.append(DropFixedTableTask(self))
|
||||
tasks.append(AddFixedDataTask(self))
|
||||
elif ( self._state == self.STATE_HAS_DATA ) : # same as above. TODO: adjust
|
||||
tasks.append(DropFixedTableTask(self))
|
||||
tasks.append(AddFixedDataTask(self))
|
||||
else:
|
||||
raise RuntimeError("Unexpected DbState state: {}".format(self._state))
|
||||
return tasks
|
||||
|
||||
def transition(self, tasks):
|
||||
if ( len(tasks) == 0 ): # before 1st step, or otherwise empty
|
||||
return # do nothing
|
||||
if ( self._state == self.STATE_EMPTY ):
|
||||
# self.assertNoSuccess(tasks, ReadFixedDataTask) # some read may be successful, since we might be creating a table
|
||||
if ( self.hasSuccess(tasks, CreateDbTask) ):
|
||||
self.assertAtMostOneSuccess(tasks, CreateDbTask) # param is class
|
||||
self._state = self.STATE_DB_ONLY
|
||||
if ( self.hasSuccess(tasks, CreateFixedTableTask )):
|
||||
self._state = self.STATE_TABLE_ONLY
|
||||
# else: # no successful table creation, not much we can say, as it is step 2
|
||||
else: # did not create db
|
||||
self.assertNoTask(tasks, CreateDbTask) # because we did not have such task
|
||||
# self.assertNoSuccess(tasks, CreateDbTask) # not necessary, since we just verified no such task
|
||||
self.assertNoSuccess(tasks, CreateFixedTableTask)
|
||||
|
||||
elif ( self._state == self.STATE_DB_ONLY ):
|
||||
self.assertAtMostOneSuccess(tasks, DropDbTask)
|
||||
self.assertIfExistThenSuccess(tasks, DropDbTask)
|
||||
self.assertAtMostOneSuccess(tasks, CreateFixedTableTask)
|
||||
# Nothing to be said about adding data task
|
||||
if ( self.hasSuccess(tasks, DropDbTask) ): # dropped the DB
|
||||
# self.assertHasTask(tasks, DropDbTask) # implied by hasSuccess
|
||||
self.assertAtMostOneSuccess(tasks, DropDbTask)
|
||||
self._state = self.STATE_EMPTY
|
||||
elif ( self.hasSuccess(tasks, CreateFixedTableTask) ): # did not drop db, create table success
|
||||
# self.assertHasTask(tasks, CreateFixedTableTask) # tried to create table
|
||||
self.assertAtMostOneSuccess(tasks, CreateFixedTableTask) # at most 1 attempt is successful
|
||||
self.assertNoTask(tasks, DropDbTask) # should have have tried
|
||||
if ( not self.hasSuccess(tasks, AddFixedDataTask) ): # just created table, no data yet
|
||||
# can't say there's add-data attempts, since they may all fail
|
||||
self._state = self.STATE_TABLE_ONLY
|
||||
else:
|
||||
self._state = self.STATE_HAS_DATA
|
||||
else: # no success in dropping db tasks, no success in create fixed table, not acceptable
|
||||
raise RuntimeError("Unexpected no-success scenario")
|
||||
|
||||
elif ( self._state == self.STATE_TABLE_ONLY ):
|
||||
if ( self.hasSuccess(tasks, DropFixedTableTask) ):
|
||||
self.assertAtMostOneSuccess(tasks, DropFixedTableTask)
|
||||
self._state = self.STATE_DB_ONLY
|
||||
elif ( self.hasSuccess(tasks, AddFixedDataTask) ): # no success dropping the table
|
||||
self.assertNoTask(tasks, DropFixedTableTask)
|
||||
self._state = self.STATE_HAS_DATA
|
||||
else: # did not drop table, did not insert data, that is impossible
|
||||
raise RuntimeError("Unexpected no-success scenarios")
|
||||
|
||||
elif ( self._state == self.STATE_HAS_DATA ): # Same as above, TODO: adjust
|
||||
if ( self.hasSuccess(tasks, DropFixedTableTask) ):
|
||||
self.assertAtMostOneSuccess(tasks, DropFixedTableTask)
|
||||
self._state = self.STATE_DB_ONLY
|
||||
elif ( self.hasSuccess(tasks, AddFixedDataTask) ): # no success dropping the table
|
||||
self.assertNoTask(tasks, DropFixedTableTask)
|
||||
self._state = self.STATE_HAS_DATA
|
||||
else: # did not drop table, did not insert data, that is impossible
|
||||
raise RuntimeError("Unexpected no-success scenarios")
|
||||
|
||||
else:
|
||||
raise RuntimeError("Unexpected DbState state: {}".format(self._state))
|
||||
logger.debug("New DB state is: {}".format(self._state))
|
||||
|
||||
def assertAtMostOneSuccess(self, tasks, cls):
|
||||
sCnt = 0
|
||||
for task in tasks :
|
||||
if not isinstance(task, cls):
|
||||
continue
|
||||
if task.isSuccess():
|
||||
task.logDebug("Task success found")
|
||||
sCnt += 1
|
||||
if ( sCnt >= 2 ):
|
||||
raise RuntimeError("Unexpected more than 1 success with task: {}".format(cls))
|
||||
|
||||
def assertIfExistThenSuccess(self, tasks, cls):
|
||||
sCnt = 0
|
||||
exists = False
|
||||
for task in tasks :
|
||||
if not isinstance(task, cls):
|
||||
continue
|
||||
exists = True # we have a valid instance
|
||||
if task.isSuccess():
|
||||
sCnt += 1
|
||||
if ( exists and sCnt <= 0 ):
|
||||
raise RuntimeError("Unexpected zero success for task: {}".format(cls))
|
||||
|
||||
def assertNoTask(self, tasks, cls):
|
||||
for task in tasks :
|
||||
if isinstance(task, cls):
|
||||
raise RuntimeError("Unexpected task: {}".format(cls))
|
||||
|
||||
def assertNoSuccess(self, tasks, cls):
|
||||
for task in tasks :
|
||||
if isinstance(task, cls):
|
||||
if task.isSuccess():
|
||||
raise RuntimeError("Unexpected successful task: {}".format(cls))
|
||||
|
||||
def hasSuccess(self, tasks, cls):
|
||||
for task in tasks :
|
||||
if not isinstance(task, cls):
|
||||
continue
|
||||
if task.isSuccess():
|
||||
return True
|
||||
return False
|
||||
|
||||
class TaskExecutor():
|
||||
def __init__(self, curStep):
|
||||
self._curStep = curStep
|
||||
|
||||
def getCurStep(self):
|
||||
return self._curStep
|
||||
|
||||
def execute(self, task: Task, wt: WorkerThread): # execute a task on a thread
|
||||
task.execute(wt)
|
||||
|
||||
# def logInfo(self, msg):
|
||||
# logger.info(" T[{}.x]: ".format(self._curStep) + msg)
|
||||
|
||||
# def logDebug(self, msg):
|
||||
# logger.debug(" T[{}.x]: ".format(self._curStep) + msg)
|
||||
|
||||
class Task():
|
||||
taskSn = 100
|
||||
|
||||
@classmethod
|
||||
def allocTaskNum(cls):
|
||||
cls.taskSn += 1
|
||||
return cls.taskSn
|
||||
|
||||
def __init__(self, dbState: DbState):
|
||||
self._dbState = dbState
|
||||
self._workerThread = None
|
||||
self._err = None
|
||||
self._curStep = None
|
||||
self._numRows = None # Number of rows affected
|
||||
|
||||
# Assign an incremental task serial number
|
||||
self._taskNum = self.allocTaskNum()
|
||||
|
||||
def isSuccess(self):
|
||||
return self._err == None
|
||||
|
||||
def clone(self):
|
||||
newTask = self.__class__(self._dbState)
|
||||
return newTask
|
||||
|
||||
def logDebug(self, msg):
|
||||
self._workerThread.logDebug("s[{}.{}] {}".format(self._curStep, self._taskNum, msg))
|
||||
|
||||
def logInfo(self, msg):
|
||||
self._workerThread.logInfo("s[{}.{}] {}".format(self._curStep, self._taskNum, msg))
|
||||
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
raise RuntimeError("To be implemeted by child classes, class name: {}".format(self.__class__.__name__))
|
||||
|
||||
def execute(self, wt: WorkerThread):
|
||||
wt.verifyThreadSelf()
|
||||
self._workerThread = wt # type: ignore
|
||||
|
||||
te = wt.getTaskExecutor()
|
||||
self._curStep = te.getCurStep()
|
||||
self.logDebug("[-] executing task {}...".format(self.__class__.__name__))
|
||||
|
||||
self._err = None
|
||||
try:
|
||||
self._executeInternal(te, wt) # TODO: no return value?
|
||||
except taos.error.ProgrammingError as err:
|
||||
self.logDebug("[=]Taos Execution exception: {0}".format(err))
|
||||
self._err = err
|
||||
except:
|
||||
self.logDebug("[=]Unexpected exception")
|
||||
raise
|
||||
|
||||
self.logDebug("[X] task execution completed, {}, status: {}".format(self.__class__.__name__, "Success" if self.isSuccess() else "Failure"))
|
||||
|
||||
def execSql(self, sql):
|
||||
return self._dbState.execute(sql)
|
||||
|
||||
class CreateDbTask(Task):
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
wt.execSql("create database db")
|
||||
|
||||
class DropDbTask(Task):
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
wt.execSql("drop database db")
|
||||
|
||||
class CreateTableTask(Task):
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
tIndex = self._dbState.addTable()
|
||||
self.logDebug("Creating a table {} ...".format(tIndex))
|
||||
wt.execSql("create table db.table_{} (ts timestamp, speed int)".format(tIndex))
|
||||
self.logDebug("Table {} created.".format(tIndex))
|
||||
self._dbState.releaseTable(tIndex)
|
||||
|
||||
class CreateFixedTableTask(Task):
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
tblName = self._dbState.getFixedTableName()
|
||||
wt.execSql("create table db.{} (ts timestamp, speed int)".format(tblName))
|
||||
|
||||
class ReadFixedDataTask(Task):
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
tblName = self._dbState.getFixedTableName()
|
||||
self._numRows = wt.querySql("select * from db.{}".format(tblName)) # save the result for later
|
||||
# tdSql.query(" cars where tbname in ('carzero', 'carone')")
|
||||
|
||||
class DropTableTask(Task):
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
tableName = self._dbState.getTableNameToDelete()
|
||||
if ( not tableName ): # May be "False"
|
||||
self.logInfo("Cannot generate a table to delete, skipping...")
|
||||
return
|
||||
self.logInfo("Dropping a table db.{} ...".format(tableName))
|
||||
wt.execSql("drop table db.{}".format(tableName))
|
||||
|
||||
class DropFixedTableTask(Task):
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
tblName = self._dbState.getFixedTableName()
|
||||
wt.execSql("drop table db.{}".format(tblName))
|
||||
|
||||
class AddDataTask(Task):
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
ds = self._dbState
|
||||
self.logInfo("Adding some data... numQueue={}".format(ds.tableNumQueue.toText()))
|
||||
tIndex = ds.pickAndAllocateTable()
|
||||
if ( tIndex == None ):
|
||||
self.logInfo("No table found to add data, skipping...")
|
||||
return
|
||||
sql = "insert into db.table_{} values ('{}', {});".format(tIndex, ds.getNextTick(), ds.getNextInt())
|
||||
self.logDebug("Executing SQL: {}".format(sql))
|
||||
wt.execSql(sql)
|
||||
ds.releaseTable(tIndex)
|
||||
self.logDebug("Finished adding data")
|
||||
|
||||
class AddFixedDataTask(Task):
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
ds = self._dbState
|
||||
sql = "insert into db.table_{} values ('{}', {});".format(ds.getFixedTableName(), ds.getNextTick(), ds.getNextInt())
|
||||
wt.execSql(sql)
|
||||
|
||||
# Deterministic random number generator
|
||||
class Dice():
|
||||
seeded = False # static, uninitialized
|
||||
|
||||
@classmethod
|
||||
def seed(cls, s): # static
|
||||
if (cls.seeded):
|
||||
raise RuntimeError("Cannot seed the random generator more than once")
|
||||
cls.verifyRNG()
|
||||
random.seed(s)
|
||||
cls.seeded = True # TODO: protect against multi-threading
|
||||
|
||||
@classmethod
|
||||
def verifyRNG(cls): # Verify that the RNG is determinstic
|
||||
random.seed(0)
|
||||
x1 = random.randrange(0, 1000)
|
||||
x2 = random.randrange(0, 1000)
|
||||
x3 = random.randrange(0, 1000)
|
||||
if ( x1 != 864 or x2!=394 or x3!=776 ):
|
||||
raise RuntimeError("System RNG is not deterministic")
|
||||
|
||||
@classmethod
|
||||
def throw(cls, stop): # get 0 to stop-1
|
||||
return cls.throwRange(0, stop)
|
||||
|
||||
@classmethod
|
||||
def throwRange(cls, start, stop): # up to stop-1
|
||||
if ( not cls.seeded ):
|
||||
raise RuntimeError("Cannot throw dice before seeding it")
|
||||
return random.randrange(start, stop)
|
||||
|
||||
|
||||
# Anyone needing to carry out work should simply come here
|
||||
class WorkDispatcher():
|
||||
def __init__(self, dbState):
|
||||
# self.totalNumMethods = 2
|
||||
self.tasks = [
|
||||
CreateTableTask(dbState),
|
||||
DropTableTask(dbState),
|
||||
AddDataTask(dbState),
|
||||
]
|
||||
|
||||
def throwDice(self):
|
||||
max = len(self.tasks) - 1
|
||||
dRes = random.randint(0, max)
|
||||
# logger.debug("Threw the dice in range [{},{}], and got: {}".format(0,max,dRes))
|
||||
return dRes
|
||||
|
||||
def pickTask(self):
|
||||
dice = self.throwDice()
|
||||
return self.tasks[dice]
|
||||
|
||||
def doWork(self, workerThread):
|
||||
task = self.pickTask()
|
||||
task.execute(workerThread)
|
||||
|
||||
def main():
|
||||
# Super cool Python argument library: https://docs.python.org/3/library/argparse.html
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
description=textwrap.dedent('''\
|
||||
TDengine Auto Crash Generator (PLEASE NOTICE the Prerequisites Below)
|
||||
---------------------------------------------------------------------
|
||||
1. You build TDengine in the top level ./build directory, as described in offical docs
|
||||
2. You run the server there before this script: ./build/bin/taosd -c test/cfg
|
||||
|
||||
'''))
|
||||
parser.add_argument('-p', '--per-thread-db-connection', action='store_true',
|
||||
help='Use a single shared db connection (default: false)')
|
||||
parser.add_argument('-d', '--debug', action='store_true',
|
||||
help='Turn on DEBUG mode for more logging (default: false)')
|
||||
parser.add_argument('-s', '--max-steps', action='store', default=100, type=int,
|
||||
help='Maximum number of steps to run (default: 100)')
|
||||
parser.add_argument('-t', '--num-threads', action='store', default=10, type=int,
|
||||
help='Number of threads to run (default: 10)')
|
||||
|
||||
global gConfig
|
||||
gConfig = parser.parse_args()
|
||||
if len(sys.argv) == 1:
|
||||
parser.print_help()
|
||||
sys.exit()
|
||||
|
||||
global logger
|
||||
logger = logging.getLogger('myApp')
|
||||
if ( gConfig.debug ):
|
||||
logger.setLevel(logging.DEBUG) # default seems to be INFO
|
||||
ch = logging.StreamHandler()
|
||||
logger.addHandler(ch)
|
||||
|
||||
dbState = DbState()
|
||||
Dice.seed(0) # initial seeding of dice
|
||||
tc = ThreadCoordinator(
|
||||
ThreadPool(dbState, gConfig.num_threads, gConfig.max_steps, 0),
|
||||
WorkDispatcher(dbState),
|
||||
dbState
|
||||
)
|
||||
tc.run()
|
||||
dbState.cleanUp()
|
||||
logger.info("Finished running thread pool")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,41 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This is the script for us to try to cause the TDengine server or client to crash
|
||||
#
|
||||
# PREPARATION
|
||||
#
|
||||
# 1. Build an compile the TDengine source code that comes with this script, in the same directory tree
|
||||
# 2. Please follow the direction in our README.md, and build TDengine in the build/ directory
|
||||
# 3. Adjust the configuration file if needed under build/test/cfg/taos.cfg
|
||||
# 4. Run the TDengine server instance: cd build; ./build/bin/taosd -c test/cfg
|
||||
# 5. Make sure you have a working Python3 environment: run /usr/bin/python3 --version, and you should get 3.6 or above
|
||||
# 6. Make sure you have the proper Python packages: # sudo apt install python3-setuptools python3-pip python3-distutils
|
||||
#
|
||||
# RUNNING THIS SCRIPT
|
||||
#
|
||||
# This script assumes the source code directory is intact, and that the binaries has been built in the
|
||||
# build/ directory, as such, will will load the Python libraries in the directory tree, and also load
|
||||
# the TDengine client shared library (so) file, in the build/directory, as evidenced in the env
|
||||
# variables below.
|
||||
#
|
||||
# Running the script is simple, no parameter is needed (for now, but will change in the future).
|
||||
#
|
||||
# Happy Crashing...
|
||||
|
||||
|
||||
# Due to the heavy path name assumptions/usage, let us require that the user be in the current directory
|
||||
EXEC_DIR=`dirname "$0"`
|
||||
if [[ $EXEC_DIR != "." ]]
|
||||
then
|
||||
echo "ERROR: Please execute `basename "$0"` in its own directory (for now anyway, pardon the dust)"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
# First we need to set up a path for Python to find our own TAOS modules, so that "import" can work.
|
||||
export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3
|
||||
|
||||
# Then let us set up the library path so that our compiled SO file can be loaded by Python
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/../../build/build/lib
|
||||
|
||||
# Now we are all let, and let's see if we can find a crash. Note we pass all params
|
||||
./crash_gen_0519.py $@
|
|
@ -131,6 +131,6 @@ python3 ./test.py -f user/pass_len.py
|
|||
|
||||
#query
|
||||
python3 ./test.py -f query/filter.py
|
||||
python3 ./test.py $1 -f query/filterCombo.py
|
||||
python3 ./test.py $1 -f query/queryNormal.py
|
||||
python3 ./test.py $1 -f query/queryError.py
|
||||
python3 ./test.py -f query/filterCombo.py
|
||||
python3 ./test.py -f query/queryNormal.py
|
||||
python3 ./test.py -f query/queryError.py
|
||||
|
|
|
@ -0,0 +1,281 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import random
|
||||
import threading
|
||||
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.dnodes import *
|
||||
|
||||
last_tb = ""
|
||||
last_stb = ""
|
||||
written = 0
|
||||
|
||||
|
||||
class Test (threading.Thread):
|
||||
def __init__(self, threadId, name):
|
||||
threading.Thread.__init__(self)
|
||||
self.threadId = threadId
|
||||
self.name = name
|
||||
|
||||
self.threadLock = threading.Lock()
|
||||
|
||||
def create_table(self):
|
||||
tdLog.info("create_table")
|
||||
global last_tb
|
||||
global written
|
||||
|
||||
current_tb = "tb%d" % int(round(time.time() * 1000))
|
||||
|
||||
if (current_tb == last_tb):
|
||||
return
|
||||
else:
|
||||
tdLog.info("will create table %s" % current_tb)
|
||||
|
||||
try:
|
||||
tdSql.execute(
|
||||
'create table %s (ts timestamp, speed int)' %
|
||||
current_tb)
|
||||
last_tb = current_tb
|
||||
written = 0
|
||||
except Exception as e:
|
||||
tdLog.info(repr(e))
|
||||
|
||||
def insert_data(self):
|
||||
tdLog.info("insert_data")
|
||||
global last_tb
|
||||
global written
|
||||
|
||||
if (last_tb == ""):
|
||||
tdLog.info("no table, create first")
|
||||
self.create_table()
|
||||
|
||||
tdLog.info("will insert data to table")
|
||||
for i in range(0, 10):
|
||||
insertRows = 1000
|
||||
tdLog.info("insert %d rows to %s" % (insertRows, last_tb))
|
||||
|
||||
for j in range(0, insertRows):
|
||||
ret = tdSql.execute(
|
||||
'insert into %s values (now + %dm, %d)' %
|
||||
(last_tb, j, j))
|
||||
written = written + 1
|
||||
|
||||
def query_data(self):
|
||||
tdLog.info("query_data")
|
||||
global last_tb
|
||||
global written
|
||||
|
||||
if (written > 0):
|
||||
tdLog.info("query data from table")
|
||||
tdSql.query("select * from %s" % last_tb)
|
||||
tdSql.checkRows(written)
|
||||
|
||||
def create_stable(self):
|
||||
tdLog.info("create_stable")
|
||||
global last_tb
|
||||
global last_stb
|
||||
global written
|
||||
|
||||
current_stb = "stb%d" % int(round(time.time() * 1000))
|
||||
|
||||
if (current_stb == last_stb):
|
||||
return
|
||||
else:
|
||||
tdLog.info("will create stable %s" % current_stb)
|
||||
tdSql.execute(
|
||||
'create table %s(ts timestamp, c1 int, c2 nchar(10)) tags (t1 int, t2 nchar(10))' %
|
||||
current_stb)
|
||||
last_stb = current_stb
|
||||
|
||||
current_tb = "tb%d" % int(round(time.time() * 1000))
|
||||
tdSql.execute(
|
||||
"create table %s using %s tags (1, '表1')" %
|
||||
(current_tb, last_stb))
|
||||
last_tb = current_tb
|
||||
tdSql.execute(
|
||||
"insert into %s values (now, 27, '我是nchar字符串')" %
|
||||
last_tb)
|
||||
written = written + 1
|
||||
|
||||
|
||||
def drop_stable(self):
|
||||
tdLog.info("drop_stable")
|
||||
global last_stb
|
||||
|
||||
if (last_stb == ""):
|
||||
tdLog.info("no super table")
|
||||
return
|
||||
else:
|
||||
tdLog.info("will drop last super table")
|
||||
tdSql.execute('drop table %s' % last_stb)
|
||||
last_stb = ""
|
||||
|
||||
def restart_database(self):
|
||||
tdLog.info("restart_database")
|
||||
global last_tb
|
||||
global written
|
||||
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
|
||||
def force_restart_database(self):
|
||||
tdLog.info("force_restart_database")
|
||||
global last_tb
|
||||
global written
|
||||
|
||||
tdDnodes.forcestop(1)
|
||||
tdDnodes.start(1)
|
||||
|
||||
def drop_table(self):
|
||||
tdLog.info("drop_table")
|
||||
global last_tb
|
||||
global written
|
||||
|
||||
for i in range(0, 10):
|
||||
if (last_tb != ""):
|
||||
tdLog.info("drop last_tb %s" % last_tb)
|
||||
tdSql.execute("drop table %s" % last_tb)
|
||||
last_tb = ""
|
||||
written = 0
|
||||
|
||||
|
||||
def query_data_from_stable(self):
|
||||
tdLog.info("query_data_from_stable")
|
||||
global last_stb
|
||||
|
||||
if (last_stb == ""):
|
||||
tdLog.info("no super table")
|
||||
return
|
||||
else:
|
||||
tdLog.info("will query data from super table")
|
||||
tdSql.execute('select * from %s' % last_stb)
|
||||
|
||||
|
||||
def reset_query_cache(self):
|
||||
tdLog.info("reset_query_cache")
|
||||
global last_tb
|
||||
global written
|
||||
|
||||
tdLog.info("reset query cache")
|
||||
tdSql.execute("reset query cache")
|
||||
tdLog.sleep(1)
|
||||
|
||||
def reset_database(self):
|
||||
tdLog.info("reset_database")
|
||||
global last_tb
|
||||
global last_stb
|
||||
global written
|
||||
|
||||
tdDnodes.forcestop(1)
|
||||
tdDnodes.deploy(1)
|
||||
last_tb = ""
|
||||
last_stb = ""
|
||||
written = 0
|
||||
tdDnodes.start(1)
|
||||
tdSql.prepare()
|
||||
|
||||
def delete_datafiles(self):
|
||||
tdLog.info("delete_data_files")
|
||||
global last_tb
|
||||
global written
|
||||
|
||||
dnodesDir = tdDnodes.getDnodesRootDir()
|
||||
dataDir = dnodesDir + '/dnode1/*'
|
||||
deleteCmd = 'rm -rf %s' % dataDir
|
||||
os.system(deleteCmd)
|
||||
|
||||
last_tb = ""
|
||||
written = 0
|
||||
tdDnodes.start(1)
|
||||
tdSql.prepare()
|
||||
|
||||
def run(self):
|
||||
dataOp = {
|
||||
1: self.insert_data,
|
||||
2: self.query_data,
|
||||
3: self.query_data_from_stable,
|
||||
}
|
||||
|
||||
dbOp = {
|
||||
1: self.create_table,
|
||||
2: self.create_stable,
|
||||
3: self.restart_database,
|
||||
4: self.force_restart_database,
|
||||
5: self.drop_table,
|
||||
6: self.reset_query_cache,
|
||||
7: self.reset_database,
|
||||
8: self.delete_datafiles,
|
||||
9: self.drop_stable,
|
||||
}
|
||||
|
||||
queryOp = {
|
||||
1: self.query_data,
|
||||
2: self.query_data_from_stable,
|
||||
}
|
||||
|
||||
if (self.threadId == 1):
|
||||
while True:
|
||||
self.threadLock.acquire()
|
||||
tdLog.notice("first thread")
|
||||
randDataOp = random.randint(1, 3)
|
||||
dataOp.get(randDataOp , lambda: "ERROR")()
|
||||
self.threadLock.release()
|
||||
|
||||
elif (self.threadId == 2):
|
||||
while True:
|
||||
tdLog.notice("second thread")
|
||||
self.threadLock.acquire()
|
||||
randDbOp = random.randint(1, 9)
|
||||
dbOp.get(randDbOp, lambda: "ERROR")()
|
||||
self.threadLock.release()
|
||||
elif (self.threadId == 3):
|
||||
while True:
|
||||
tdLog.notice("third thread")
|
||||
self.threadLock.acquire()
|
||||
randQueryOp = random.randint(1, 9)
|
||||
queryOp.get(randQueryOp, lambda: "ERROR")()
|
||||
self.threadLock.release()
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
test1 = Test(1, "data operation")
|
||||
test2 = Test(2, "db operation")
|
||||
test2 = Test(3, "query operation")
|
||||
|
||||
test1.start()
|
||||
test2.start()
|
||||
test3.start()
|
||||
test1.join()
|
||||
test2.join()
|
||||
test3.join()
|
||||
|
||||
tdLog.info("end of test")
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -20,63 +20,62 @@ from util.cases import *
|
|||
from util.sql import *
|
||||
from util.dnodes import *
|
||||
|
||||
current_tb = ""
|
||||
last_tb = ""
|
||||
last_stb = ""
|
||||
written = 0
|
||||
|
||||
|
||||
class Test (threading.Thread):
|
||||
def __init__(self, threadId, name, sleepTime):
|
||||
def __init__(self, threadId, name):
|
||||
threading.Thread.__init__(self)
|
||||
self.threadId = threadId
|
||||
self.name = name
|
||||
self.sleepTime = sleepTime
|
||||
|
||||
self.threadLock = threading.Lock()
|
||||
|
||||
def create_table(self):
|
||||
global current_tb
|
||||
tdLog.info("create_table")
|
||||
global last_tb
|
||||
global written
|
||||
|
||||
tdLog.info("create a table")
|
||||
current_tb = "tb%d" % int(round(time.time() * 1000))
|
||||
tdLog.info("current table %s" % current_tb)
|
||||
|
||||
if (current_tb == last_tb):
|
||||
return
|
||||
else:
|
||||
tdSql.execute(
|
||||
'create table %s (ts timestamp, speed int)' %
|
||||
current_tb)
|
||||
last_tb = current_tb
|
||||
written = 0
|
||||
tdLog.info("will create table %s" % current_tb)
|
||||
|
||||
try:
|
||||
tdSql.execute(
|
||||
'create table %s (ts timestamp, speed int)' %
|
||||
current_tb)
|
||||
last_tb = current_tb
|
||||
written = 0
|
||||
except Exception as e:
|
||||
tdLog.info(repr(e))
|
||||
|
||||
def insert_data(self):
|
||||
global current_tb
|
||||
tdLog.info("insert_data")
|
||||
global last_tb
|
||||
global written
|
||||
|
||||
tdLog.info("will insert data to table")
|
||||
if (current_tb == ""):
|
||||
if (last_tb == ""):
|
||||
tdLog.info("no table, create first")
|
||||
self.create_table()
|
||||
|
||||
tdLog.info("insert data to table")
|
||||
tdLog.info("will insert data to table")
|
||||
for i in range(0, 10):
|
||||
self.threadLock.acquire()
|
||||
insertRows = 1000
|
||||
tdLog.info("insert %d rows to %s" % (insertRows, current_tb))
|
||||
tdLog.info("insert %d rows to %s" % (insertRows, last_tb))
|
||||
|
||||
for j in range(0, insertRows):
|
||||
ret = tdSql.execute(
|
||||
'insert into %s values (now + %dm, %d)' %
|
||||
(current_tb, j, j))
|
||||
(last_tb, j, j))
|
||||
written = written + 1
|
||||
self.threadLock.release()
|
||||
|
||||
def query_data(self):
|
||||
global current_tb
|
||||
tdLog.info("query_data")
|
||||
global last_tb
|
||||
global written
|
||||
|
||||
|
@ -86,53 +85,90 @@ class Test (threading.Thread):
|
|||
tdSql.checkRows(written)
|
||||
|
||||
def create_stable(self):
|
||||
global current_tb
|
||||
tdLog.info("create_stable")
|
||||
global last_tb
|
||||
global last_stb
|
||||
global written
|
||||
|
||||
tdLog.info("create a super table")
|
||||
current_stb = "stb%d" % int(round(time.time() * 1000))
|
||||
|
||||
if (current_stb == last_stb):
|
||||
return
|
||||
else:
|
||||
tdLog.info("will create stable %s" % current_stb)
|
||||
tdSql.execute(
|
||||
'create table %s(ts timestamp, c1 int, c2 nchar(10)) tags (t1 int, t2 nchar(10))' %
|
||||
current_stb)
|
||||
last_stb = current_stb
|
||||
|
||||
current_tb = "tb%d" % int(round(time.time() * 1000))
|
||||
tdSql.execute(
|
||||
"create table %s using %s tags (1, '表1')" %
|
||||
(current_tb, last_stb))
|
||||
last_tb = current_tb
|
||||
tdSql.execute(
|
||||
"insert into %s values (now, 27, '我是nchar字符串')" %
|
||||
last_tb)
|
||||
written = written + 1
|
||||
|
||||
|
||||
def drop_stable(self):
|
||||
tdLog.info("drop_stable")
|
||||
global last_stb
|
||||
|
||||
if (last_stb == ""):
|
||||
tdLog.info("no super table")
|
||||
return
|
||||
else:
|
||||
tdLog.info("will drop last super table")
|
||||
tdSql.execute('drop table %s' % last_stb)
|
||||
last_stb = ""
|
||||
|
||||
def restart_database(self):
|
||||
global current_tb
|
||||
tdLog.info("restart_database")
|
||||
global last_tb
|
||||
global written
|
||||
|
||||
tdLog.info("restart databae")
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
tdLog.sleep(5)
|
||||
|
||||
def force_restart(self):
|
||||
global current_tb
|
||||
def force_restart_database(self):
|
||||
tdLog.info("force_restart_database")
|
||||
global last_tb
|
||||
global written
|
||||
|
||||
tdLog.info("force restart database")
|
||||
tdDnodes.forcestop(1)
|
||||
tdDnodes.start(1)
|
||||
tdLog.sleep(5)
|
||||
tdLog.sleep(10)
|
||||
|
||||
def drop_table(self):
|
||||
global current_tb
|
||||
tdLog.info("drop_table")
|
||||
global last_tb
|
||||
global written
|
||||
|
||||
for i in range(0, 10):
|
||||
self.threadLock.acquire()
|
||||
|
||||
tdLog.info("current_tb %s" % current_tb)
|
||||
|
||||
if (current_tb != ""):
|
||||
tdLog.info("drop current tb %s" % current_tb)
|
||||
tdSql.execute("drop table %s" % current_tb)
|
||||
current_tb = ""
|
||||
if (last_tb != ""):
|
||||
tdLog.info("drop last_tb %s" % last_tb)
|
||||
tdSql.execute("drop table %s" % last_tb)
|
||||
last_tb = ""
|
||||
written = 0
|
||||
tdLog.sleep(self.sleepTime)
|
||||
self.threadLock.release()
|
||||
|
||||
|
||||
def query_data_from_stable(self):
|
||||
tdLog.info("query_data_from_stable")
|
||||
global last_stb
|
||||
|
||||
if (last_stb == ""):
|
||||
tdLog.info("no super table")
|
||||
return
|
||||
else:
|
||||
tdLog.info("will query data from super table")
|
||||
tdSql.execute('select * from %s' % last_stb)
|
||||
|
||||
|
||||
def reset_query_cache(self):
|
||||
global current_tb
|
||||
tdLog.info("reset_query_cache")
|
||||
global last_tb
|
||||
global written
|
||||
|
||||
|
@ -141,51 +177,69 @@ class Test (threading.Thread):
|
|||
tdLog.sleep(1)
|
||||
|
||||
def reset_database(self):
|
||||
global current_tb
|
||||
tdLog.info("reset_database")
|
||||
global last_tb
|
||||
global last_stb
|
||||
global written
|
||||
|
||||
tdLog.info("reset database")
|
||||
tdDnodes.forcestop(1)
|
||||
tdDnodes.deploy(1)
|
||||
current_tb = ""
|
||||
last_tb = ""
|
||||
last_stb = ""
|
||||
written = 0
|
||||
tdDnodes.start(1)
|
||||
tdSql.prepare()
|
||||
|
||||
def delete_datafiles(self):
|
||||
global current_tb
|
||||
tdLog.info("delete_data_files")
|
||||
global last_tb
|
||||
global written
|
||||
|
||||
tdLog.info("delete data files")
|
||||
dnodesDir = tdDnodes.getDnodesRootDir()
|
||||
dataDir = dnodesDir + '/dnode1/*'
|
||||
deleteCmd = 'rm -rf %s' % dataDir
|
||||
os.system(deleteCmd)
|
||||
|
||||
current_tb = ""
|
||||
last_tb = ""
|
||||
written = 0
|
||||
tdDnodes.start(1)
|
||||
tdLog.sleep(10)
|
||||
tdSql.prepare()
|
||||
|
||||
def run(self):
|
||||
switch = {
|
||||
1: self.create_table,
|
||||
2: self.insert_data,
|
||||
3: self.query_data,
|
||||
4: self.create_stable,
|
||||
5: self.restart_database,
|
||||
6: self.force_restart,
|
||||
7: self.drop_table,
|
||||
8: self.reset_query_cache,
|
||||
9: self.reset_database,
|
||||
10: self.delete_datafiles,
|
||||
dataOp = {
|
||||
1: self.insert_data,
|
||||
2: self.query_data,
|
||||
3: self.query_data_from_stable,
|
||||
}
|
||||
|
||||
switch.get(self.threadId, lambda: "ERROR")()
|
||||
dbOp = {
|
||||
1: self.create_table,
|
||||
2: self.create_stable,
|
||||
3: self.restart_database,
|
||||
4: self.force_restart_database,
|
||||
5: self.drop_table,
|
||||
6: self.reset_query_cache,
|
||||
7: self.reset_database,
|
||||
8: self.delete_datafiles,
|
||||
9: self.drop_stable,
|
||||
}
|
||||
|
||||
if (self.threadId == 1):
|
||||
while True:
|
||||
self.threadLock.acquire()
|
||||
tdLog.notice("first thread")
|
||||
randDataOp = random.randint(1, 3)
|
||||
dataOp.get(randDataOp , lambda: "ERROR")()
|
||||
self.threadLock.release()
|
||||
|
||||
elif (self.threadId == 2):
|
||||
while True:
|
||||
tdLog.notice("second thread")
|
||||
self.threadLock.acquire()
|
||||
randDbOp = random.randint(1, 9)
|
||||
dbOp.get(randDbOp, lambda: "ERROR")()
|
||||
self.threadLock.release()
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
@ -196,8 +250,8 @@ class TDTestCase:
|
|||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
test1 = Test(2, "insert_data", 1)
|
||||
test2 = Test(7, "drop_table", 2)
|
||||
test1 = Test(1, "data operation")
|
||||
test2 = Test(2, "db operation")
|
||||
|
||||
test1.start()
|
||||
test2.start()
|
||||
|
|
|
@ -21,104 +21,157 @@ from util.dnodes import *
|
|||
|
||||
class Test:
|
||||
def __init__(self):
|
||||
self.current_tb = ""
|
||||
self.last_tb = ""
|
||||
self.last_stb = ""
|
||||
self.written = 0
|
||||
|
||||
def create_table(self):
|
||||
tdLog.info("create a table")
|
||||
self.current_tb = "tb%d" % int(round(time.time() * 1000))
|
||||
tdLog.info("current table %s" % self.current_tb)
|
||||
tdLog.info("create_table")
|
||||
current_tb = "tb%d" % int(round(time.time() * 1000))
|
||||
|
||||
if (self.current_tb == self.last_tb):
|
||||
if (current_tb == self.last_tb):
|
||||
return
|
||||
else:
|
||||
tdLog.info("will create table %s" % current_tb)
|
||||
tdSql.execute(
|
||||
'create table %s (ts timestamp, speed int)' %
|
||||
self.current_tb)
|
||||
self.last_tb = self.current_tb
|
||||
'create table %s (ts timestamp, c1 int, c2 nchar(10))' %
|
||||
current_tb)
|
||||
self.last_tb = current_tb
|
||||
self.written = 0
|
||||
|
||||
def insert_data(self):
|
||||
tdLog.info("will insert data to table")
|
||||
if (self.current_tb == ""):
|
||||
tdLog.info("insert_data")
|
||||
if (self.last_tb == ""):
|
||||
tdLog.info("no table, create first")
|
||||
self.create_table()
|
||||
|
||||
tdLog.info("insert data to table")
|
||||
tdLog.info("will insert data to table")
|
||||
insertRows = 10
|
||||
tdLog.info("insert %d rows to %s" % (insertRows, self.last_tb))
|
||||
for i in range(0, insertRows):
|
||||
ret = tdSql.execute(
|
||||
'insert into %s values (now + %dm, %d)' %
|
||||
(self.last_tb, i, i))
|
||||
'insert into %s values (now + %dm, %d, "%s")' %
|
||||
(self.last_tb, i, i, "->" + str(i)))
|
||||
self.written = self.written + 1
|
||||
|
||||
tdLog.info("insert earlier data")
|
||||
tdSql.execute('insert into %s values (now - 5m , 10)' % self.last_tb)
|
||||
tdSql.execute(
|
||||
'insert into %s values (now - 5m , 10, " - 5m")' %
|
||||
self.last_tb)
|
||||
self.written = self.written + 1
|
||||
tdSql.execute('insert into %s values (now - 6m , 10)' % self.last_tb)
|
||||
tdSql.execute(
|
||||
'insert into %s values (now - 6m , 10, " - 6m")' %
|
||||
self.last_tb)
|
||||
self.written = self.written + 1
|
||||
tdSql.execute('insert into %s values (now - 7m , 10)' % self.last_tb)
|
||||
tdSql.execute(
|
||||
'insert into %s values (now - 7m , 10, " - 7m")' %
|
||||
self.last_tb)
|
||||
self.written = self.written + 1
|
||||
tdSql.execute('insert into %s values (now - 8m , 10)' % self.last_tb)
|
||||
tdSql.execute(
|
||||
'insert into %s values (now - 8m , 10, " - 8m")' %
|
||||
self.last_tb)
|
||||
self.written = self.written + 1
|
||||
|
||||
def query_data(self):
|
||||
tdLog.info("query_data")
|
||||
if (self.written > 0):
|
||||
tdLog.info("query data from table")
|
||||
tdSql.query("select * from %s" % self.last_tb)
|
||||
tdSql.checkRows(self.written)
|
||||
|
||||
def create_stable(self):
|
||||
tdLog.info("create a super table")
|
||||
tdLog.info("create_stable")
|
||||
current_stb = "stb%d" % int(round(time.time() * 1000))
|
||||
|
||||
if (current_stb == self.last_stb):
|
||||
return
|
||||
else:
|
||||
tdLog.info("will create stable %s" % current_stb)
|
||||
tdSql.execute(
|
||||
'create table %s(ts timestamp, c1 int, c2 nchar(10)) tags (t1 int, t2 nchar(10))' %
|
||||
current_stb)
|
||||
self.last_stb = current_stb
|
||||
|
||||
current_tb = "tb%d" % int(round(time.time() * 1000))
|
||||
tdSql.execute(
|
||||
"create table %s using %s tags (1, '表1')" %
|
||||
(current_tb, self.last_stb))
|
||||
self.last_tb = current_tb
|
||||
tdSql.execute(
|
||||
"insert into %s values (now, 27, '我是nchar字符串')" %
|
||||
self.last_tb)
|
||||
self.written = self.written + 1
|
||||
|
||||
def drop_stable(self):
|
||||
tdLog.info("drop_stable")
|
||||
if (self.last_stb == ""):
|
||||
tdLog.info("no super table")
|
||||
return
|
||||
else:
|
||||
tdLog.info("will drop last super table")
|
||||
tdSql.execute('drop table %s' % self.last_stb)
|
||||
self.last_stb = ""
|
||||
|
||||
|
||||
def query_data_from_stable(self):
|
||||
tdLog.info("query_data_from_stable")
|
||||
if (self.last_stb == ""):
|
||||
tdLog.info("no super table")
|
||||
return
|
||||
else:
|
||||
tdLog.info("will query data from super table")
|
||||
tdSql.execute('select * from %s' % self.last_stb)
|
||||
|
||||
|
||||
def restart_database(self):
|
||||
tdLog.info("restart databae")
|
||||
tdLog.info("restart_databae")
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
tdLog.sleep(5)
|
||||
|
||||
def force_restart(self):
|
||||
tdLog.info("force restart database")
|
||||
|
||||
def force_restart_database(self):
|
||||
tdLog.info("force_restart_database")
|
||||
tdDnodes.forcestop(1)
|
||||
tdDnodes.start(1)
|
||||
tdLog.sleep(5)
|
||||
tdSql.prepare()
|
||||
|
||||
def drop_table(self):
|
||||
if (self.current_tb != ""):
|
||||
tdLog.info("drop current tb %s" % self.current_tb)
|
||||
tdSql.execute("drop table %s" % self.current_tb)
|
||||
self.current_tb = ""
|
||||
tdLog.info("drop_table")
|
||||
if (self.last_tb != ""):
|
||||
tdLog.info("drop last tb %s" % self.last_tb)
|
||||
tdSql.execute("drop table %s" % self.last_tb)
|
||||
self.last_tb = ""
|
||||
self.written = 0
|
||||
|
||||
def reset_query_cache(self):
|
||||
tdLog.info("reset query cache")
|
||||
tdLog.info("reset_query_cache")
|
||||
tdSql.execute("reset query cache")
|
||||
tdLog.sleep(1)
|
||||
|
||||
def reset_database(self):
|
||||
tdLog.info("reset database")
|
||||
tdLog.info("reset_database")
|
||||
tdDnodes.forcestop(1)
|
||||
tdDnodes.deploy(1)
|
||||
self.current_tb = ""
|
||||
self.last_tb = ""
|
||||
self.written = 0
|
||||
tdDnodes.start(1)
|
||||
tdLog.sleep(5)
|
||||
tdSql.prepare()
|
||||
|
||||
def delete_datafiles(self):
|
||||
tdLog.info("delete data files")
|
||||
tdLog.info("delete_datafiles")
|
||||
dnodesDir = tdDnodes.getDnodesRootDir()
|
||||
dataDir = dnodesDir + '/dnode1/*'
|
||||
deleteCmd = 'rm -rf %s' % dataDir
|
||||
os.system(deleteCmd)
|
||||
|
||||
self.current_tb = ""
|
||||
self.last_tb = ""
|
||||
self.last_stb = ""
|
||||
self.written = 0
|
||||
tdDnodes.start(1)
|
||||
tdLog.sleep(10)
|
||||
tdSql.prepare()
|
||||
|
||||
|
||||
|
@ -138,15 +191,17 @@ class TDTestCase:
|
|||
3: test.query_data,
|
||||
4: test.create_stable,
|
||||
5: test.restart_database,
|
||||
6: test.force_restart,
|
||||
6: test.force_restart_database,
|
||||
7: test.drop_table,
|
||||
8: test.reset_query_cache,
|
||||
9: test.reset_database,
|
||||
10: test.delete_datafiles,
|
||||
11: test.query_data_from_stable,
|
||||
12: test.drop_stable,
|
||||
}
|
||||
|
||||
for x in range(1, 100):
|
||||
r = random.randint(1, 10)
|
||||
for x in range(1, 1000):
|
||||
r = random.randint(1, 12)
|
||||
tdLog.notice("iteration %d run func %d" % (x, r))
|
||||
switch.get(r, lambda: "ERROR")()
|
||||
|
||||
|
|
|
@ -60,17 +60,10 @@ if $rows != 75 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = '1' group by tgcol -x step13
|
||||
return -1
|
||||
step13:
|
||||
|
||||
sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tbcol = '1' group by tgcol -x step14
|
||||
return -1
|
||||
step14:
|
||||
|
||||
sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = '1' interval(1d) group by tgcol -x step15
|
||||
return -1
|
||||
step15:
|
||||
print select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = '1'
|
||||
sql_error select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = '1' group by tgcol
|
||||
sql_error select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tbcol = '1' group by tgcol
|
||||
sql_error select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = '1' interval(1d) group by tgcol
|
||||
|
||||
#can't filter binary fields
|
||||
|
||||
|
|
|
@ -133,6 +133,7 @@ $limit = $rowNum
|
|||
$offset = $limit / 2
|
||||
sql select max(c1), min(c2), sum(c3), avg(c4), stddev(c5), spread(c6), first(c7), last(c8), first(c9) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2) limit $limit offset $offset
|
||||
if $rows != $limit then
|
||||
print expect $limit, actual $rows
|
||||
return -1
|
||||
endi
|
||||
if $data01 != 0 then
|
||||
|
@ -147,6 +148,7 @@ $limit = $rowNum
|
|||
$offset = $limit / 2
|
||||
sql select max(c1), min(c2), sum(c3), avg(c4), stddev(c5), spread(c6), first(c7), last(c8), first(c9) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(linear) limit $limit offset $offset
|
||||
if $rows != $limit then
|
||||
print expect $limit, actual $rows
|
||||
return -1
|
||||
endi
|
||||
if $data01 != 0 then
|
||||
|
@ -182,6 +184,7 @@ $limit = $rowNum
|
|||
$offset = $limit / 2
|
||||
sql select max(c1), min(c2), sum(c3), avg(c4), stddev(c5), spread(c6), first(c7), last(c8), first(c9) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(prev) limit $limit offset $offset
|
||||
if $rows != $limit then
|
||||
print expect $limit, actual: $rows
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue