Merge branch 'master' of github.com:taosdata/TDengine into test/chr
This commit is contained in:
commit
8015a2a490
|
@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS)
|
||||||
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
|
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
|
||||||
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
|
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
|
||||||
IF (TD_MVN_INSTALLED)
|
IF (TD_MVN_INSTALLED)
|
||||||
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.29.jar DESTINATION connector/jdbc)
|
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.31.jar DESTINATION connector/jdbc)
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
ELSEIF (TD_DARWIN)
|
ELSEIF (TD_DARWIN)
|
||||||
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
|
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
|
||||||
|
|
|
@ -367,6 +367,7 @@ static bool bnMonitorBalance() {
|
||||||
for (int32_t dest = 0; dest < src; dest++) {
|
for (int32_t dest = 0; dest < src; dest++) {
|
||||||
SDnodeObj *pDestDnode = tsBnDnodes.list[dest];
|
SDnodeObj *pDestDnode = tsBnDnodes.list[dest];
|
||||||
if (bnCheckDnodeInVgroup(pDestDnode, pVgroup)) continue;
|
if (bnCheckDnodeInVgroup(pDestDnode, pVgroup)) continue;
|
||||||
|
if (taosGetTimestampMs() - pDestDnode->createdTime < 2000) continue;
|
||||||
|
|
||||||
float destScore = bnTryCalcDnodeScore(pDestDnode, 1);
|
float destScore = bnTryCalcDnodeScore(pDestDnode, 1);
|
||||||
if (srcScore + 0.0001 < destScore) continue;
|
if (srcScore + 0.0001 < destScore) continue;
|
||||||
|
|
|
@ -879,6 +879,11 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
|
||||||
return TSDB_CODE_TSC_DISCONNECTED;
|
return TSDB_CODE_TSC_DISCONNECTED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (sql == NULL) {
|
||||||
|
tscError("sql is NULL");
|
||||||
|
return TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
SSqlObj* pSql = pStmt->pSql;
|
SSqlObj* pSql = pStmt->pSql;
|
||||||
size_t sqlLen = strlen(sql);
|
size_t sqlLen = strlen(sql);
|
||||||
|
|
||||||
|
|
|
@ -4694,7 +4694,10 @@ int32_t getTimeRange(STimeWindow* win, tSqlExpr* pRight, int32_t optr, int16_t t
|
||||||
win->skey = val;
|
win->skey = val;
|
||||||
} else if (optr == TK_EQ) {
|
} else if (optr == TK_EQ) {
|
||||||
win->ekey = win->skey = val;
|
win->ekey = win->skey = val;
|
||||||
|
} else if (optr == TK_NE) {
|
||||||
|
return TSDB_CODE_TSC_INVALID_SQL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -282,6 +282,7 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
|
||||||
|
|
||||||
SArray* tables = getTableList(pSql);
|
SArray* tables = getTableList(pSql);
|
||||||
if (tables == NULL) {
|
if (tables == NULL) {
|
||||||
|
pSub->lastSyncTime = 0; //force to get table list next time
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
size_t numOfTables = taosArrayGetSize(tables);
|
size_t numOfTables = taosArrayGetSize(tables);
|
||||||
|
@ -488,7 +489,15 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
|
||||||
SSub *pSub = (SSub *)tsub;
|
SSub *pSub = (SSub *)tsub;
|
||||||
if (pSub == NULL) return NULL;
|
if (pSub == NULL) return NULL;
|
||||||
|
|
||||||
if (pSub->pSql->cmd.command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) {
|
if (pSub->pTimer == NULL) {
|
||||||
|
int64_t duration = taosGetTimestampMs() - pSub->lastConsumeTime;
|
||||||
|
if (duration < (int64_t)(pSub->interval)) {
|
||||||
|
tscDebug("subscription consume too frequently, blocking...");
|
||||||
|
taosMsleep(pSub->interval - (int32_t)duration);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pSub->pSql->cmd.command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) { //may reach here when retireve stable vgroup failed
|
||||||
SSqlObj* pSql = recreateSqlObj(pSub);
|
SSqlObj* pSql = recreateSqlObj(pSub);
|
||||||
if (pSql == NULL) {
|
if (pSql == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -500,6 +509,11 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
|
||||||
}
|
}
|
||||||
pSub->pSql = pSql;
|
pSub->pSql = pSql;
|
||||||
pSql->pSubscription = pSub;
|
pSql->pSubscription = pSub;
|
||||||
|
|
||||||
|
// no table list now, force to update it
|
||||||
|
tscDebug("begin table synchronization");
|
||||||
|
if (!tscUpdateSubscription(pSub->taos, pSub)) return NULL;
|
||||||
|
tscDebug("table synchronization completed");
|
||||||
}
|
}
|
||||||
|
|
||||||
tscSaveSubscriptionProgress(pSub);
|
tscSaveSubscriptionProgress(pSub);
|
||||||
|
@ -524,14 +538,6 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
|
||||||
tscDebug("subscribe:%s set next round subscribe skey:%"PRId64, pSub->topic, pQueryInfo->window.skey);
|
tscDebug("subscribe:%s set next round subscribe skey:%"PRId64, pSub->topic, pQueryInfo->window.skey);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pSub->pTimer == NULL) {
|
|
||||||
int64_t duration = taosGetTimestampMs() - pSub->lastConsumeTime;
|
|
||||||
if (duration < (int64_t)(pSub->interval)) {
|
|
||||||
tscDebug("subscription consume too frequently, blocking...");
|
|
||||||
taosMsleep(pSub->interval - (int32_t)duration);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t size = taosArrayGetSize(pSub->progress) * sizeof(STableIdInfo);
|
size_t size = taosArrayGetSize(pSub->progress) * sizeof(STableIdInfo);
|
||||||
size += sizeof(SQueryTableMsg) + 4096;
|
size += sizeof(SQueryTableMsg) + 4096;
|
||||||
int code = tscAllocPayload(&pSql->cmd, (int)size);
|
int code = tscAllocPayload(&pSql->cmd, (int)size);
|
||||||
|
|
|
@ -3154,6 +3154,13 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
|
||||||
|
|
||||||
// it is the failure retry insert
|
// it is the failure retry insert
|
||||||
if (pSql->pSubs != NULL) {
|
if (pSql->pSubs != NULL) {
|
||||||
|
int32_t blockNum = (int32_t)taosArrayGetSize(pCmd->pDataBlocks);
|
||||||
|
if (pSql->subState.numOfSub != blockNum) {
|
||||||
|
tscError("0x%"PRIx64" sub num:%d is not same with data block num:%d", pSql->self, pSql->subState.numOfSub, blockNum);
|
||||||
|
pRes->code = TSDB_CODE_TSC_APP_ERROR;
|
||||||
|
return pRes->code;
|
||||||
|
}
|
||||||
|
|
||||||
for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
|
for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) {
|
||||||
SSqlObj* pSub = pSql->pSubs[i];
|
SSqlObj* pSub = pSql->pSubs[i];
|
||||||
SInsertSupporter* pSup = calloc(1, sizeof(SInsertSupporter));
|
SInsertSupporter* pSup = calloc(1, sizeof(SInsertSupporter));
|
||||||
|
|
|
@ -8,10 +8,8 @@ IF (TD_MVN_INSTALLED)
|
||||||
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
|
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
|
||||||
POST_BUILD
|
POST_BUILD
|
||||||
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||||
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.29.jar ${LIBRARY_OUTPUT_PATH}
|
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.31.jar ${LIBRARY_OUTPUT_PATH}
|
||||||
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
|
||||||
COMMENT "build jdbc driver")
|
COMMENT "build jdbc driver")
|
||||||
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
|
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>2.0.29</version>
|
<version>2.0.31</version>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
||||||
<name>JDBCDriver</name>
|
<name>JDBCDriver</name>
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>2.0.29</version>
|
<version>2.0.31</version>
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
<name>JDBCDriver</name>
|
<name>JDBCDriver</name>
|
||||||
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
|
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
|
||||||
|
@ -123,6 +123,7 @@
|
||||||
<exclude>**/InvalidResultSetPointerTest.java</exclude>
|
<exclude>**/InvalidResultSetPointerTest.java</exclude>
|
||||||
<exclude>**/RestfulConnectionTest.java</exclude>
|
<exclude>**/RestfulConnectionTest.java</exclude>
|
||||||
<exclude>**/TD4144Test.java</exclude>
|
<exclude>**/TD4144Test.java</exclude>
|
||||||
|
<exclude>**/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java</exclude>
|
||||||
</excludes>
|
</excludes>
|
||||||
<testFailureIgnore>true</testFailureIgnore>
|
<testFailureIgnore>true</testFailureIgnore>
|
||||||
</configuration>
|
</configuration>
|
||||||
|
|
|
@ -17,16 +17,18 @@ public class RestfulConnection extends AbstractConnection {
|
||||||
private final int port;
|
private final int port;
|
||||||
private final String url;
|
private final String url;
|
||||||
private volatile String database;
|
private volatile String database;
|
||||||
|
private final String token;
|
||||||
/******************************************************/
|
/******************************************************/
|
||||||
private boolean isClosed;
|
private boolean isClosed;
|
||||||
private final DatabaseMetaData metadata;
|
private final DatabaseMetaData metadata;
|
||||||
|
|
||||||
public RestfulConnection(String host, String port, Properties props, String database, String url) {
|
public RestfulConnection(String host, String port, Properties props, String database, String url, String token) {
|
||||||
super(props);
|
super(props);
|
||||||
this.host = host;
|
this.host = host;
|
||||||
this.port = Integer.parseInt(port);
|
this.port = Integer.parseInt(port);
|
||||||
this.database = database;
|
this.database = database;
|
||||||
this.url = url;
|
this.url = url;
|
||||||
|
this.token = token;
|
||||||
this.metadata = new RestfulDatabaseMetaData(url, props.getProperty(TSDBDriver.PROPERTY_KEY_USER), this);
|
this.metadata = new RestfulDatabaseMetaData(url, props.getProperty(TSDBDriver.PROPERTY_KEY_USER), this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,6 +68,7 @@ public class RestfulConnection extends AbstractConnection {
|
||||||
return this.metadata;
|
return this.metadata;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getters
|
||||||
public String getHost() {
|
public String getHost() {
|
||||||
return host;
|
return host;
|
||||||
}
|
}
|
||||||
|
@ -81,4 +84,8 @@ public class RestfulConnection extends AbstractConnection {
|
||||||
public String getUrl() {
|
public String getUrl() {
|
||||||
return url;
|
return url;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getToken() {
|
||||||
|
return token;
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -38,15 +38,11 @@ public class RestfulDriver extends AbstractDriver {
|
||||||
String port = props.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "6041");
|
String port = props.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "6041");
|
||||||
String database = props.containsKey(TSDBDriver.PROPERTY_KEY_DBNAME) ? props.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME) : null;
|
String database = props.containsKey(TSDBDriver.PROPERTY_KEY_DBNAME) ? props.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME) : null;
|
||||||
|
|
||||||
String loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":"
|
String loginUrl = "http://" + host + ":" + port + "/rest/login/" + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + "/" + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD) + "";
|
||||||
+ props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/"
|
|
||||||
+ props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + "/"
|
|
||||||
+ props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD) + "";
|
|
||||||
try {
|
try {
|
||||||
String user = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_USER), "UTF-8");
|
String user = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_USER), "UTF-8");
|
||||||
String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), "UTF-8");
|
String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), "UTF-8");
|
||||||
loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":"
|
loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":" + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + user + "/" + password + "";
|
||||||
+ props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + user + "/" + password + "";
|
|
||||||
} catch (UnsupportedEncodingException e) {
|
} catch (UnsupportedEncodingException e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
}
|
}
|
||||||
|
@ -55,12 +51,12 @@ public class RestfulDriver extends AbstractDriver {
|
||||||
JSONObject jsonResult = JSON.parseObject(result);
|
JSONObject jsonResult = JSON.parseObject(result);
|
||||||
String status = jsonResult.getString("status");
|
String status = jsonResult.getString("status");
|
||||||
String token = jsonResult.getString("desc");
|
String token = jsonResult.getString("desc");
|
||||||
HttpClientPoolUtil.token = token;
|
|
||||||
if (!status.equals("succ")) {
|
if (!status.equals("succ")) {
|
||||||
throw new SQLException(jsonResult.getString("desc"));
|
throw new SQLException(jsonResult.getString("desc"));
|
||||||
}
|
}
|
||||||
|
|
||||||
RestfulConnection conn = new RestfulConnection(host, port, props, database, url);
|
RestfulConnection conn = new RestfulConnection(host, port, props, database, url, token);
|
||||||
if (database != null && !database.trim().replaceAll("\\s", "").isEmpty()) {
|
if (database != null && !database.trim().replaceAll("\\s", "").isEmpty()) {
|
||||||
Statement stmt = conn.createStatement();
|
Statement stmt = conn.createStatement();
|
||||||
stmt.execute("use " + database);
|
stmt.execute("use " + database);
|
||||||
|
|
|
@ -83,7 +83,7 @@ public class RestfulStatement extends AbstractStatement {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (SqlSyntaxValidator.isUseSql(sql)) {
|
if (SqlSyntaxValidator.isUseSql(sql)) {
|
||||||
HttpClientPoolUtil.execute(url, sql);
|
HttpClientPoolUtil.execute(url, sql, this.conn.getToken());
|
||||||
this.database = sql.trim().replace("use", "").trim();
|
this.database = sql.trim().replace("use", "").trim();
|
||||||
this.conn.setCatalog(this.database);
|
this.conn.setCatalog(this.database);
|
||||||
result = false;
|
result = false;
|
||||||
|
@ -116,7 +116,7 @@ public class RestfulStatement extends AbstractStatement {
|
||||||
if ("UTC".equalsIgnoreCase(timestampFormat))
|
if ("UTC".equalsIgnoreCase(timestampFormat))
|
||||||
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
|
url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sqlutc";
|
||||||
|
|
||||||
String result = HttpClientPoolUtil.execute(url, sql);
|
String result = HttpClientPoolUtil.execute(url, sql, this.conn.getToken());
|
||||||
JSONObject resultJson = JSON.parseObject(result);
|
JSONObject resultJson = JSON.parseObject(result);
|
||||||
if (resultJson.getString("status").equals("error")) {
|
if (resultJson.getString("status").equals("error")) {
|
||||||
throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc"));
|
throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc"));
|
||||||
|
@ -130,7 +130,7 @@ public class RestfulStatement extends AbstractStatement {
|
||||||
if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql))
|
if (!SqlSyntaxValidator.isValidForExecuteUpdate(sql))
|
||||||
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql);
|
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql);
|
||||||
|
|
||||||
String result = HttpClientPoolUtil.execute(url, sql);
|
String result = HttpClientPoolUtil.execute(url, sql, this.conn.getToken());
|
||||||
JSONObject jsonObject = JSON.parseObject(result);
|
JSONObject jsonObject = JSON.parseObject(result);
|
||||||
if (jsonObject.getString("status").equals("error")) {
|
if (jsonObject.getString("status").equals("error")) {
|
||||||
throw TSDBError.createSQLException(jsonObject.getInteger("code"), jsonObject.getString("desc"));
|
throw TSDBError.createSQLException(jsonObject.getInteger("code"), jsonObject.getString("desc"));
|
||||||
|
|
|
@ -20,39 +20,29 @@ import java.nio.charset.Charset;
|
||||||
|
|
||||||
|
|
||||||
public class HttpClientPoolUtil {
|
public class HttpClientPoolUtil {
|
||||||
public static PoolingHttpClientConnectionManager cm = null;
|
|
||||||
public static CloseableHttpClient httpClient = null;
|
|
||||||
public static String token = "cm9vdDp0YW9zZGF0YQ==";
|
|
||||||
/**
|
|
||||||
* 默认content 类型
|
|
||||||
*/
|
|
||||||
private static final String DEFAULT_CONTENT_TYPE = "application/json";
|
|
||||||
/**
|
|
||||||
* 默认请求超时时间30s
|
|
||||||
*/
|
|
||||||
private static final int DEFAULT_TIME_OUT = 15000;
|
|
||||||
private static final int count = 32;
|
|
||||||
private static final int totalCount = 1000;
|
|
||||||
private static final int Http_Default_Keep_Time = 15000;
|
|
||||||
|
|
||||||
/**
|
private static final String DEFAULT_CONTENT_TYPE = "application/json";
|
||||||
* 初始化连接池
|
private static final String DEFAULT_TOKEN = "cm9vdDp0YW9zZGF0YQ==";
|
||||||
*/
|
private static final int DEFAULT_TIME_OUT = 15000;
|
||||||
|
private static final int DEFAULT_MAX_PER_ROUTE = 32;
|
||||||
|
private static final int DEFAULT_MAX_TOTAL = 1000;
|
||||||
|
private static final int DEFAULT_HTTP_KEEP_TIME = 15000;
|
||||||
|
|
||||||
|
private static PoolingHttpClientConnectionManager connectionManager;
|
||||||
|
private static CloseableHttpClient httpClient;
|
||||||
|
|
||||||
private static synchronized void initPools() {
|
private static synchronized void initPools() {
|
||||||
if (httpClient == null) {
|
if (httpClient == null) {
|
||||||
cm = new PoolingHttpClientConnectionManager();
|
connectionManager = new PoolingHttpClientConnectionManager();
|
||||||
cm.setDefaultMaxPerRoute(count);
|
connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE);
|
||||||
cm.setMaxTotal(totalCount);
|
connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL);
|
||||||
httpClient = HttpClients.custom().setKeepAliveStrategy(defaultStrategy).setConnectionManager(cm).build();
|
httpClient = HttpClients.custom().setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY).setConnectionManager(connectionManager).build();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
private static ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> {
|
||||||
* Http connection keepAlive 设置
|
|
||||||
*/
|
|
||||||
private static ConnectionKeepAliveStrategy defaultStrategy = (response, context) -> {
|
|
||||||
HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
|
HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
|
||||||
int keepTime = Http_Default_Keep_Time * 1000;
|
int keepTime = DEFAULT_HTTP_KEEP_TIME * 1000;
|
||||||
while (it.hasNext()) {
|
while (it.hasNext()) {
|
||||||
HeaderElement headerElement = it.nextElement();
|
HeaderElement headerElement = it.nextElement();
|
||||||
String param = headerElement.getName();
|
String param = headerElement.getName();
|
||||||
|
@ -76,7 +66,7 @@ public class HttpClientPoolUtil {
|
||||||
* @param data 请求数据
|
* @param data 请求数据
|
||||||
* @return responseBody
|
* @return responseBody
|
||||||
*/
|
*/
|
||||||
public static String execute(String uri, String data) {
|
public static String execute(String uri, String data, String token) {
|
||||||
long startTime = System.currentTimeMillis();
|
long startTime = System.currentTimeMillis();
|
||||||
HttpEntity httpEntity = null;
|
HttpEntity httpEntity = null;
|
||||||
HttpEntityEnclosingRequestBase method = null;
|
HttpEntityEnclosingRequestBase method = null;
|
||||||
|
|
|
@ -28,9 +28,7 @@ public class BatchInsertTest {
|
||||||
@Before
|
@Before
|
||||||
public void before() {
|
public void before() {
|
||||||
try {
|
try {
|
||||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
|
||||||
Properties properties = new Properties();
|
Properties properties = new Properties();
|
||||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
|
|
||||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||||
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||||
|
@ -44,7 +42,7 @@ public class BatchInsertTest {
|
||||||
String createTableSql = "create table " + stbName + "(ts timestamp, f1 int, f2 int, f3 int) tags(areaid int, loc binary(20))";
|
String createTableSql = "create table " + stbName + "(ts timestamp, f1 int, f2 int, f3 int) tags(areaid int, loc binary(20))";
|
||||||
statement.executeUpdate(createTableSql);
|
statement.executeUpdate(createTableSql);
|
||||||
// create tables
|
// create tables
|
||||||
for(int i = 0; i < numOfTables; i++) {
|
for (int i = 0; i < numOfTables; i++) {
|
||||||
String loc = i % 2 == 0 ? "beijing" : "shanghai";
|
String loc = i % 2 == 0 ? "beijing" : "shanghai";
|
||||||
String createSubTalbesSql = "create table " + tablePrefix + i + " using " + stbName + " tags(" + i + ", '" + loc + "')";
|
String createSubTalbesSql = "create table " + tablePrefix + i + " using " + stbName + " tags(" + i + ", '" + loc + "')";
|
||||||
statement.executeUpdate(createSubTalbesSql);
|
statement.executeUpdate(createSubTalbesSql);
|
||||||
|
|
|
@ -0,0 +1,59 @@
|
||||||
|
package com.taosdata.jdbc.cases;
|
||||||
|
|
||||||
|
import com.taosdata.jdbc.TSDBDriver;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.sql.*;
|
||||||
|
import java.util.Properties;
|
||||||
|
|
||||||
|
public class ConnectMultiTaosdByRestfulWithDifferentTokenTest {
|
||||||
|
|
||||||
|
private static String host1 = "192.168.17.156";
|
||||||
|
private static String user1 = "root";
|
||||||
|
private static String password1 = "tqueue";
|
||||||
|
private Connection conn1;
|
||||||
|
private static String host2 = "192.168.17.82";
|
||||||
|
private static String user2 = "root";
|
||||||
|
private static String password2 = "taosdata";
|
||||||
|
private Connection conn2;
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void test() {
|
||||||
|
//when
|
||||||
|
executeSelectStatus(conn1);
|
||||||
|
executeSelectStatus(conn2);
|
||||||
|
executeSelectStatus(conn1);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void executeSelectStatus(Connection connection) {
|
||||||
|
try (Statement stmt = connection.createStatement()) {
|
||||||
|
ResultSet rs = stmt.executeQuery("select server_status()");
|
||||||
|
ResultSetMetaData meta = rs.getMetaData();
|
||||||
|
while (rs.next()) {
|
||||||
|
for (int i = 1; i <= meta.getColumnCount(); i++) {
|
||||||
|
System.out.println(meta.getColumnLabel(i) + ": " + rs.getString(i));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (SQLException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void before() {
|
||||||
|
Properties properties = new Properties();
|
||||||
|
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||||
|
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
|
||||||
|
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||||
|
|
||||||
|
String url1 = "jdbc:TAOS-RS://" + host1 + ":6041/?user=" + user1 + "&password=" + password1;
|
||||||
|
String url2 = "jdbc:TAOS-RS://" + host2 + ":6041/?user=" + user2 + "&password=" + password2;
|
||||||
|
try {
|
||||||
|
conn1 = DriverManager.getConnection(url1, properties);
|
||||||
|
conn2 = DriverManager.getConnection(url2, properties);
|
||||||
|
} catch (SQLException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -27,6 +27,7 @@ void dnodeUpdateCfg(SDnodeCfg *cfg);
|
||||||
int32_t dnodeGetDnodeId();
|
int32_t dnodeGetDnodeId();
|
||||||
void dnodeGetClusterId(char *clusterId);
|
void dnodeGetClusterId(char *clusterId);
|
||||||
void dnodeGetCfg(int32_t *dnodeId, char *clusterId);
|
void dnodeGetCfg(int32_t *dnodeId, char *clusterId);
|
||||||
|
void dnodeSetDropped();
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
|
|
||||||
static SDnodeCfg tsCfg = {0};
|
static SDnodeCfg tsCfg = {0};
|
||||||
static pthread_mutex_t tsCfgMutex;
|
static pthread_mutex_t tsCfgMutex;
|
||||||
|
static int32_t tsDnodeDropped;
|
||||||
|
|
||||||
static int32_t dnodeReadCfg();
|
static int32_t dnodeReadCfg();
|
||||||
static int32_t dnodeWriteCfg();
|
static int32_t dnodeWriteCfg();
|
||||||
|
@ -34,6 +35,10 @@ int32_t dnodeInitCfg() {
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
dInfo("dnode cfg is initialized");
|
dInfo("dnode cfg is initialized");
|
||||||
}
|
}
|
||||||
|
if (tsDnodeDropped) {
|
||||||
|
dInfo("dnode is dropped, exiting");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,6 +49,14 @@ void dnodeUpdateCfg(SDnodeCfg *cfg) {
|
||||||
dnodeResetCfg(cfg);
|
dnodeResetCfg(cfg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void dnodeSetDropped() {
|
||||||
|
pthread_mutex_lock(&tsCfgMutex);
|
||||||
|
tsDnodeDropped = 1;
|
||||||
|
|
||||||
|
dnodeWriteCfg();
|
||||||
|
pthread_mutex_unlock(&tsCfgMutex);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t dnodeGetDnodeId() {
|
int32_t dnodeGetDnodeId() {
|
||||||
int32_t dnodeId = 0;
|
int32_t dnodeId = 0;
|
||||||
pthread_mutex_lock(&tsCfgMutex);
|
pthread_mutex_lock(&tsCfgMutex);
|
||||||
|
@ -119,6 +132,14 @@ static int32_t dnodeReadCfg() {
|
||||||
}
|
}
|
||||||
cfg.dnodeId = (int32_t)dnodeId->valueint;
|
cfg.dnodeId = (int32_t)dnodeId->valueint;
|
||||||
|
|
||||||
|
cJSON *dnodeDropped = cJSON_GetObjectItem(root, "dnodeDropped");
|
||||||
|
if (!dnodeDropped || dnodeDropped->type != cJSON_Number) {
|
||||||
|
dError("failed to read %s, dnodeDropped not found", file);
|
||||||
|
//goto PARSE_CFG_OVER;
|
||||||
|
} else {
|
||||||
|
tsDnodeDropped = (int32_t)dnodeDropped->valueint;
|
||||||
|
}
|
||||||
|
|
||||||
cJSON *clusterId = cJSON_GetObjectItem(root, "clusterId");
|
cJSON *clusterId = cJSON_GetObjectItem(root, "clusterId");
|
||||||
if (!clusterId || clusterId->type != cJSON_String) {
|
if (!clusterId || clusterId->type != cJSON_String) {
|
||||||
dError("failed to read %s, clusterId not found", file);
|
dError("failed to read %s, clusterId not found", file);
|
||||||
|
@ -154,6 +175,7 @@ static int32_t dnodeWriteCfg() {
|
||||||
|
|
||||||
len += snprintf(content + len, maxLen - len, "{\n");
|
len += snprintf(content + len, maxLen - len, "{\n");
|
||||||
len += snprintf(content + len, maxLen - len, " \"dnodeId\": %d,\n", tsCfg.dnodeId);
|
len += snprintf(content + len, maxLen - len, " \"dnodeId\": %d,\n", tsCfg.dnodeId);
|
||||||
|
len += snprintf(content + len, maxLen - len, " \"dnodeDropped\": %d,\n", tsDnodeDropped);
|
||||||
len += snprintf(content + len, maxLen - len, " \"clusterId\": \"%s\"\n", tsCfg.clusterId);
|
len += snprintf(content + len, maxLen - len, " \"clusterId\": \"%s\"\n", tsCfg.clusterId);
|
||||||
len += snprintf(content + len, maxLen - len, "}\n");
|
len += snprintf(content + len, maxLen - len, "}\n");
|
||||||
|
|
||||||
|
|
|
@ -202,6 +202,7 @@ static void dnodeProcessStatusRsp(SRpcMsg *pMsg) {
|
||||||
char clusterId[TSDB_CLUSTER_ID_LEN];
|
char clusterId[TSDB_CLUSTER_ID_LEN];
|
||||||
dnodeGetClusterId(clusterId);
|
dnodeGetClusterId(clusterId);
|
||||||
if (clusterId[0] != '\0') {
|
if (clusterId[0] != '\0') {
|
||||||
|
dnodeSetDropped();
|
||||||
dError("exit zombie dropped dnode");
|
dError("exit zombie dropped dnode");
|
||||||
exit(EXIT_FAILURE);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -212,6 +212,8 @@
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#define TK_SPACE 300
|
#define TK_SPACE 300
|
||||||
#define TK_COMMENT 301
|
#define TK_COMMENT 301
|
||||||
#define TK_ILLEGAL 302
|
#define TK_ILLEGAL 302
|
||||||
|
|
|
@ -663,10 +663,17 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void *
|
||||||
pTable->name, actStr[action], sdbGetKeyStr(pTable, pHead->cont), qtype, pHead->version, tsSdbMgmt.version);
|
pTable->name, actStr[action], sdbGetKeyStr(pTable, pHead->cont), qtype, pHead->version, tsSdbMgmt.version);
|
||||||
return TSDB_CODE_SUCCESS;
|
return TSDB_CODE_SUCCESS;
|
||||||
} else if (pHead->version != tsSdbMgmt.version + 1) {
|
} else if (pHead->version != tsSdbMgmt.version + 1) {
|
||||||
pthread_mutex_unlock(&tsSdbMgmt.mutex);
|
if (qtype != TAOS_QTYPE_WAL) {
|
||||||
sdbError("vgId:1, sdb:%s, failed to restore %s key:%s from source(%d), hver:%" PRIu64 " too large, mver:%" PRIu64,
|
pthread_mutex_unlock(&tsSdbMgmt.mutex);
|
||||||
pTable->name, actStr[action], sdbGetKeyStr(pTable, pHead->cont), qtype, pHead->version, tsSdbMgmt.version);
|
sdbError(
|
||||||
return TSDB_CODE_SYN_INVALID_VERSION;
|
"vgId:1, sdb:%s, failed to restore %s key:%s from source(%d), hver:%" PRIu64 " too large, mver:%" PRIu64,
|
||||||
|
pTable->name, actStr[action], sdbGetKeyStr(pTable, pHead->cont), qtype, pHead->version, tsSdbMgmt.version);
|
||||||
|
return TSDB_CODE_SYN_INVALID_VERSION;
|
||||||
|
} else {
|
||||||
|
// If cksum is wrong when recovering wal, use this code
|
||||||
|
tsSdbMgmt.version = pHead->version;
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
tsSdbMgmt.version = pHead->version;
|
tsSdbMgmt.version = pHead->version;
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
#include "qSqlparser.h"
|
#include "qSqlparser.h"
|
||||||
#include "tcmdtype.h"
|
#include "tcmdtype.h"
|
||||||
#include "tstoken.h"
|
#include "ttoken.h"
|
||||||
#include "ttokendef.h"
|
#include "ttokendef.h"
|
||||||
#include "tutil.h"
|
#include "tutil.h"
|
||||||
#include "tvariant.h"
|
#include "tvariant.h"
|
||||||
|
@ -507,7 +507,6 @@ distinct(X) ::= . { X.n = 0;}
|
||||||
// A complete FROM clause.
|
// A complete FROM clause.
|
||||||
%type from {SFromInfo*}
|
%type from {SFromInfo*}
|
||||||
from(A) ::= FROM tablelist(X). {A = X;}
|
from(A) ::= FROM tablelist(X). {A = X;}
|
||||||
from(A) ::= FROM LP union(Y) RP. {A = Y;}
|
|
||||||
|
|
||||||
%type tablelist {SArray*}
|
%type tablelist {SArray*}
|
||||||
tablelist(A) ::= ids(X) cpxName(Y). {
|
tablelist(A) ::= ids(X) cpxName(Y). {
|
||||||
|
|
1193
src/query/src/sql.c
1193
src/query/src/sql.c
File diff suppressed because it is too large
Load Diff
|
@ -346,7 +346,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(WAL_CHECKSUM_WHOLE)
|
#if defined(WAL_CHECKSUM_WHOLE)
|
||||||
if (pHead->sver == 0 && !walValidateChecksum(pHead)) {
|
if ((pHead->sver == 0 && !walValidateChecksum(pHead)) || pHead->sver < 0 || pHead->sver > 1) {
|
||||||
wError("vgId:%d, file:%s, wal head cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name,
|
wError("vgId:%d, file:%s, wal head cksum is messed up, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name,
|
||||||
pHead->version, pHead->len, offset);
|
pHead->version, pHead->len, offset);
|
||||||
code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset);
|
code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset);
|
||||||
|
|
|
@ -91,8 +91,8 @@ class TDTestCase:
|
||||||
tdSql.query("select * from db.st where name = 1231231")
|
tdSql.query("select * from db.st where name = 1231231")
|
||||||
tdSql.checkRows(0)
|
tdSql.checkRows(0)
|
||||||
|
|
||||||
# <> for timestamp type
|
# <> for timestamp type not supported for primary timestamp
|
||||||
tdSql.query("select * from db.st where ts <> '2020-05-13 10:00:00.002'")
|
tdSql.error("select * from db.st where ts <> '2020-05-13 10:00:00.002'")
|
||||||
# tdSql.checkRows(4)
|
# tdSql.checkRows(4)
|
||||||
|
|
||||||
# <> for numeric type
|
# <> for numeric type
|
||||||
|
|
|
@ -58,8 +58,8 @@ class TDTestCase:
|
||||||
ts_len4 = len(tdSql.cursor.fetchall())
|
ts_len4 = len(tdSql.cursor.fetchall())
|
||||||
tdSql.execute("select * from t2ts1 where ts = now")
|
tdSql.execute("select * from t2ts1 where ts = now")
|
||||||
ts_len5 = len(tdSql.cursor.fetchall())
|
ts_len5 = len(tdSql.cursor.fetchall())
|
||||||
tdSql.execute("select * from t2ts1 where ts <> now")
|
# tdSql.execute("select * from t2ts1 where ts <> now")
|
||||||
ts_len6 = len(tdSql.cursor.fetchall())
|
ts_len6 = 3
|
||||||
tdSql.execute("select * from t2ts1 where ts between 0 and now")
|
tdSql.execute("select * from t2ts1 where ts between 0 and now")
|
||||||
ts_len7 = len(tdSql.cursor.fetchall())
|
ts_len7 = len(tdSql.cursor.fetchall())
|
||||||
tdSql.execute("select * from t2ts1 where ts between now and now+100d")
|
tdSql.execute("select * from t2ts1 where ts between now and now+100d")
|
||||||
|
|
|
@ -2,36 +2,36 @@
|
||||||
ulimit -c unlimited
|
ulimit -c unlimited
|
||||||
|
|
||||||
# insert
|
# insert
|
||||||
python3.8 ./test.py $1 -f insert/basic.py
|
python3 ./test.py $1 -f insert/basic.py
|
||||||
python3.8 ./test.py $1 -s && sleep 1
|
python3 ./test.py $1 -s && sleep 1
|
||||||
python3.8 ./test.py $1 -f insert/bigint.py
|
python3 ./test.py $1 -f insert/bigint.py
|
||||||
python3.8 ./test.py $1 -s && sleep 1
|
python3 ./test.py $1 -s && sleep 1
|
||||||
python3.8 ./test.py $1 -f insert/nchar.py
|
python3 ./test.py $1 -f insert/nchar.py
|
||||||
python3.8 ./test.py $1 -s && sleep 1
|
python3 ./test.py $1 -s && sleep 1
|
||||||
python3.8 ./test.py $1 -f insert/multi.py
|
python3 ./test.py $1 -f insert/multi.py
|
||||||
python3.8 ./test.py $1 -s && sleep 1
|
python3 ./test.py $1 -s && sleep 1
|
||||||
|
|
||||||
# table
|
# table
|
||||||
python3.8 ./test.py $1 -f table/column_name.py
|
python3 ./test.py $1 -f table/column_name.py
|
||||||
python3.8 ./test.py $1 -s && sleep 1
|
python3 ./test.py $1 -s && sleep 1
|
||||||
python3.8 ./test.py $1 -f table/column_num.py
|
python3 ./test.py $1 -f table/column_num.py
|
||||||
python3.8 ./test.py $1 -s && sleep 1
|
python3 ./test.py $1 -s && sleep 1
|
||||||
python3.8 ./test.py $1 -f table/db_table.py
|
python3 ./test.py $1 -f table/db_table.py
|
||||||
python3.8 ./test.py $1 -s && sleep 1
|
python3 ./test.py $1 -s && sleep 1
|
||||||
|
|
||||||
# import
|
# import
|
||||||
python3.8 ./test.py $1 -f import_merge/importDataLastSub.py
|
python3 ./test.py $1 -f import_merge/importDataLastSub.py
|
||||||
python3.8 ./test.py $1 -s && sleep 1
|
python3 ./test.py $1 -s && sleep 1
|
||||||
|
|
||||||
#tag
|
#tag
|
||||||
python3.8 ./test.py $1 -f tag_lite/filter.py
|
python3 ./test.py $1 -f tag_lite/filter.py
|
||||||
python3.8 ./test.py $1 -s && sleep 1
|
python3 ./test.py $1 -s && sleep 1
|
||||||
|
|
||||||
#query
|
#query
|
||||||
python3.8 ./test.py $1 -f query/filter.py
|
python3 ./test.py $1 -f query/filter.py
|
||||||
python3.8 ./test.py $1 -s && sleep 1
|
python3 ./test.py $1 -s && sleep 1
|
||||||
|
|
||||||
# client
|
# client
|
||||||
python3.8 ./test.py $1 -f client/client.py
|
python3 ./test.py $1 -f client/client.py
|
||||||
python3.8 ./test.py $1 -s && sleep 1
|
python3 ./test.py $1 -s && sleep 1
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,8 @@ $tsu = $tsu + $ts0
|
||||||
print ==================>issue #3481, normal column not allowed,
|
print ==================>issue #3481, normal column not allowed,
|
||||||
sql_error select ts,c1,min(c2) from ts_stb0
|
sql_error select ts,c1,min(c2) from ts_stb0
|
||||||
|
|
||||||
|
print ==================>issue #4681, not equal operator on primary timestamp not allowed
|
||||||
|
sql_error select * from ts_stb0 where ts <> $ts0
|
||||||
|
|
||||||
##### select from supertable
|
##### select from supertable
|
||||||
$tb = $tbPrefix . 0
|
$tb = $tbPrefix . 0
|
||||||
|
|
Loading…
Reference in New Issue