Merge branch 'develop' into feature/TD-2200

This commit is contained in:
huili 2021-01-06 09:28:25 +08:00 committed by GitHub
commit 8ee706dfef
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
390 changed files with 21620 additions and 8281 deletions

View File

@ -146,7 +146,7 @@ matrix:
branch_pattern: coverity_scan
- os: linux
dist: trusty
dist: xenial
language: c
git:
- depth: 1
@ -157,7 +157,7 @@ matrix:
- build-essential
- cmake
env:
- DESC="trusty/gcc-4.8 build"
- DESC="xenial build"
before_script:
- export TZ=Asia/Harbin
@ -227,7 +227,7 @@ matrix:
- os: linux
arch: arm64
dist: trusty
dist: xenial
language: c
git:
- depth: 1
@ -238,7 +238,7 @@ matrix:
- build-essential
- cmake
env:
- DESC="trusty/gcc-4.8 build"
- DESC="xenial build"
before_script:
- export TZ=Asia/Harbin

24
Jenkinsfile vendored
View File

@ -41,12 +41,15 @@ def pre_test(){
cd ${WKC}
git checkout develop
git reset --hard HEAD~10
git pull
git fetch
git checkout ${CHANGE_BRANCH}
git reset --hard HEAD~10
git pull
git merge develop
cd ${WK}
git reset --hard
git reset --hard HEAD~10
git checkout develop
git pull
cd ${WK}
@ -79,14 +82,27 @@ pipeline {
changeRequest()
}
parallel {
stage('python') {
agent{label 'pytest'}
stage('python_1') {
agent{label 'p1'}
steps {
pre_test()
sh '''
cd ${WKC}/tests
./test-all.sh pytestfq
find pytest -name '*'sql|xargs rm -rf
./test-all.sh p1
date'''
}
}
stage('python_2') {
agent{label 'p2'}
steps {
pre_test()
sh '''
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
./test-all.sh p2
date'''
}
}

View File

@ -5,7 +5,7 @@ go 1.14
require (
github.com/jmoiron/sqlx v1.2.0
github.com/mattn/go-sqlite3 v2.0.3+incompatible
github.com/taosdata/driver-go v0.0.0-20200727182616-1a3b1941c206
github.com/taosdata/driver-go v0.0.0-20201113094317-050667e5b4d0
go.uber.org/zap v1.14.1
google.golang.org/appengine v1.6.5 // indirect
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c

View File

@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.0.11.0")
SET(TD_VER_NUMBER "2.0.13.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

2415
deps/libcurl/include/curl/curl.h vendored Normal file

File diff suppressed because it is too large Load Diff

198
deps/libcurl/include/curl/curlbuild.h vendored Normal file
View File

@ -0,0 +1,198 @@
/* include/curl/curlbuild.h. Generated from curlbuild.h.in by configure. */
#ifndef __CURL_CURLBUILD_H
#define __CURL_CURLBUILD_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* ================================================================ */
/* NOTES FOR CONFIGURE CAPABLE SYSTEMS */
/* ================================================================ */
/*
* NOTE 1:
* -------
*
* Nothing in this file is intended to be modified or adjusted by the
* curl library user nor by the curl library builder.
*
* If you think that something actually needs to be changed, adjusted
* or fixed in this file, then, report it on the libcurl development
* mailing list: http://cool.haxx.se/mailman/listinfo/curl-library/
*
* This header file shall only export symbols which are 'curl' or 'CURL'
* prefixed, otherwise public name space would be polluted.
*
* NOTE 2:
* -------
*
* Right now you might be staring at file include/curl/curlbuild.h.in or
* at file include/curl/curlbuild.h, this is due to the following reason:
*
* On systems capable of running the configure script, the configure process
* will overwrite the distributed include/curl/curlbuild.h file with one that
* is suitable and specific to the library being configured and built, which
* is generated from the include/curl/curlbuild.h.in template file.
*
*/
/* ================================================================ */
/* DEFINITION OF THESE SYMBOLS SHALL NOT TAKE PLACE ANYWHERE ELSE */
/* ================================================================ */
#ifdef CURL_SIZEOF_LONG
#error "CURL_SIZEOF_LONG shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_SIZEOF_LONG_already_defined
#endif
#ifdef CURL_TYPEOF_CURL_SOCKLEN_T
#error "CURL_TYPEOF_CURL_SOCKLEN_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_TYPEOF_CURL_SOCKLEN_T_already_defined
#endif
#ifdef CURL_SIZEOF_CURL_SOCKLEN_T
#error "CURL_SIZEOF_CURL_SOCKLEN_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_SIZEOF_CURL_SOCKLEN_T_already_defined
#endif
#ifdef CURL_TYPEOF_CURL_OFF_T
#error "CURL_TYPEOF_CURL_OFF_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_TYPEOF_CURL_OFF_T_already_defined
#endif
#ifdef CURL_FORMAT_CURL_OFF_T
#error "CURL_FORMAT_CURL_OFF_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_FORMAT_CURL_OFF_T_already_defined
#endif
#ifdef CURL_FORMAT_CURL_OFF_TU
#error "CURL_FORMAT_CURL_OFF_TU shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_FORMAT_CURL_OFF_TU_already_defined
#endif
#ifdef CURL_FORMAT_OFF_T
#error "CURL_FORMAT_OFF_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_FORMAT_OFF_T_already_defined
#endif
#ifdef CURL_SIZEOF_CURL_OFF_T
#error "CURL_SIZEOF_CURL_OFF_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_SIZEOF_CURL_OFF_T_already_defined
#endif
#ifdef CURL_SUFFIX_CURL_OFF_T
#error "CURL_SUFFIX_CURL_OFF_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_T_already_defined
#endif
#ifdef CURL_SUFFIX_CURL_OFF_TU
#error "CURL_SUFFIX_CURL_OFF_TU shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_TU_already_defined
#endif
/* ================================================================ */
/* EXTERNAL INTERFACE SETTINGS FOR CONFIGURE CAPABLE SYSTEMS ONLY */
/* ================================================================ */
/* Configure process defines this to 1 when it finds out that system */
/* header file ws2tcpip.h must be included by the external interface. */
/* #undef CURL_PULL_WS2TCPIP_H */
#ifdef CURL_PULL_WS2TCPIP_H
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# include <winsock2.h>
# include <ws2tcpip.h>
#endif
/* Configure process defines this to 1 when it finds out that system */
/* header file sys/types.h must be included by the external interface. */
#define CURL_PULL_SYS_TYPES_H 1
#ifdef CURL_PULL_SYS_TYPES_H
# include <sys/types.h>
#endif
/* Configure process defines this to 1 when it finds out that system */
/* header file stdint.h must be included by the external interface. */
/* #undef CURL_PULL_STDINT_H */
#ifdef CURL_PULL_STDINT_H
# include <stdint.h>
#endif
/* Configure process defines this to 1 when it finds out that system */
/* header file inttypes.h must be included by the external interface. */
/* #undef CURL_PULL_INTTYPES_H */
#ifdef CURL_PULL_INTTYPES_H
# include <inttypes.h>
#endif
/* Configure process defines this to 1 when it finds out that system */
/* header file sys/socket.h must be included by the external interface. */
#define CURL_PULL_SYS_SOCKET_H 1
#ifdef CURL_PULL_SYS_SOCKET_H
# include <sys/socket.h>
#endif
/* Configure process defines this to 1 when it finds out that system */
/* header file sys/poll.h must be included by the external interface. */
/* #undef CURL_PULL_SYS_POLL_H */
#ifdef CURL_PULL_SYS_POLL_H
# include <sys/poll.h>
#endif
/* The size of `long', as computed by sizeof. */
#define CURL_SIZEOF_LONG 8
/* Integral data type used for curl_socklen_t. */
#define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t
/* The size of `curl_socklen_t', as computed by sizeof. */
#define CURL_SIZEOF_CURL_SOCKLEN_T 4
/* Data type definition of curl_socklen_t. */
typedef CURL_TYPEOF_CURL_SOCKLEN_T curl_socklen_t;
/* Signed integral data type used for curl_off_t. */
#define CURL_TYPEOF_CURL_OFF_T long
/* Data type definition of curl_off_t. */
typedef CURL_TYPEOF_CURL_OFF_T curl_off_t;
/* curl_off_t formatting string directive without "%" conversion specifier. */
#define CURL_FORMAT_CURL_OFF_T "ld"
/* unsigned curl_off_t formatting string without "%" conversion specifier. */
#define CURL_FORMAT_CURL_OFF_TU "lu"
/* curl_off_t formatting string directive with "%" conversion specifier. */
#define CURL_FORMAT_OFF_T "%ld"
/* The size of `curl_off_t', as computed by sizeof. */
#define CURL_SIZEOF_CURL_OFF_T 8
/* curl_off_t constant suffix. */
#define CURL_SUFFIX_CURL_OFF_T L
/* unsigned curl_off_t constant suffix. */
#define CURL_SUFFIX_CURL_OFF_TU UL
#endif /* __CURL_CURLBUILD_H */

262
deps/libcurl/include/curl/curlrules.h vendored Normal file
View File

@ -0,0 +1,262 @@
#ifndef __CURL_CURLRULES_H
#define __CURL_CURLRULES_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* ================================================================ */
/* COMPILE TIME SANITY CHECKS */
/* ================================================================ */
/*
* NOTE 1:
* -------
*
* All checks done in this file are intentionally placed in a public
* header file which is pulled by curl/curl.h when an application is
* being built using an already built libcurl library. Additionally
* this file is also included and used when building the library.
*
* If compilation fails on this file it is certainly sure that the
* problem is elsewhere. It could be a problem in the curlbuild.h
* header file, or simply that you are using different compilation
* settings than those used to build the library.
*
* Nothing in this file is intended to be modified or adjusted by the
* curl library user nor by the curl library builder.
*
* Do not deactivate any check, these are done to make sure that the
* library is properly built and used.
*
* You can find further help on the libcurl development mailing list:
* http://cool.haxx.se/mailman/listinfo/curl-library/
*
* NOTE 2
* ------
*
* Some of the following compile time checks are based on the fact
* that the dimension of a constant array can not be a negative one.
* In this way if the compile time verification fails, the compilation
* will fail issuing an error. The error description wording is compiler
* dependent but it will be quite similar to one of the following:
*
* "negative subscript or subscript is too large"
* "array must have at least one element"
* "-1 is an illegal array size"
* "size of array is negative"
*
* If you are building an application which tries to use an already
* built libcurl library and you are getting this kind of errors on
* this file, it is a clear indication that there is a mismatch between
* how the library was built and how you are trying to use it for your
* application. Your already compiled or binary library provider is the
* only one who can give you the details you need to properly use it.
*/
/*
* Verify that some macros are actually defined.
*/
#ifndef CURL_SIZEOF_LONG
# error "CURL_SIZEOF_LONG definition is missing!"
Error Compilation_aborted_CURL_SIZEOF_LONG_is_missing
#endif
#ifndef CURL_TYPEOF_CURL_SOCKLEN_T
# error "CURL_TYPEOF_CURL_SOCKLEN_T definition is missing!"
Error Compilation_aborted_CURL_TYPEOF_CURL_SOCKLEN_T_is_missing
#endif
#ifndef CURL_SIZEOF_CURL_SOCKLEN_T
# error "CURL_SIZEOF_CURL_SOCKLEN_T definition is missing!"
Error Compilation_aborted_CURL_SIZEOF_CURL_SOCKLEN_T_is_missing
#endif
#ifndef CURL_TYPEOF_CURL_OFF_T
# error "CURL_TYPEOF_CURL_OFF_T definition is missing!"
Error Compilation_aborted_CURL_TYPEOF_CURL_OFF_T_is_missing
#endif
#ifndef CURL_FORMAT_CURL_OFF_T
# error "CURL_FORMAT_CURL_OFF_T definition is missing!"
Error Compilation_aborted_CURL_FORMAT_CURL_OFF_T_is_missing
#endif
#ifndef CURL_FORMAT_CURL_OFF_TU
# error "CURL_FORMAT_CURL_OFF_TU definition is missing!"
Error Compilation_aborted_CURL_FORMAT_CURL_OFF_TU_is_missing
#endif
#ifndef CURL_FORMAT_OFF_T
# error "CURL_FORMAT_OFF_T definition is missing!"
Error Compilation_aborted_CURL_FORMAT_OFF_T_is_missing
#endif
#ifndef CURL_SIZEOF_CURL_OFF_T
# error "CURL_SIZEOF_CURL_OFF_T definition is missing!"
Error Compilation_aborted_CURL_SIZEOF_CURL_OFF_T_is_missing
#endif
#ifndef CURL_SUFFIX_CURL_OFF_T
# error "CURL_SUFFIX_CURL_OFF_T definition is missing!"
Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_T_is_missing
#endif
#ifndef CURL_SUFFIX_CURL_OFF_TU
# error "CURL_SUFFIX_CURL_OFF_TU definition is missing!"
Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_TU_is_missing
#endif
/*
* Macros private to this header file.
*/
#define CurlchkszEQ(t, s) sizeof(t) == s ? 1 : -1
#define CurlchkszGE(t1, t2) sizeof(t1) >= sizeof(t2) ? 1 : -1
/*
* Verify that the size previously defined and expected for long
* is the same as the one reported by sizeof() at compile time.
*/
typedef char
__curl_rule_01__
[CurlchkszEQ(long, CURL_SIZEOF_LONG)];
/*
* Verify that the size previously defined and expected for
* curl_off_t is actually the the same as the one reported
* by sizeof() at compile time.
*/
typedef char
__curl_rule_02__
[CurlchkszEQ(curl_off_t, CURL_SIZEOF_CURL_OFF_T)];
/*
* Verify at compile time that the size of curl_off_t as reported
* by sizeof() is greater or equal than the one reported for long
* for the current compilation.
*/
typedef char
__curl_rule_03__
[CurlchkszGE(curl_off_t, long)];
/*
* Verify that the size previously defined and expected for
* curl_socklen_t is actually the the same as the one reported
* by sizeof() at compile time.
*/
typedef char
__curl_rule_04__
[CurlchkszEQ(curl_socklen_t, CURL_SIZEOF_CURL_SOCKLEN_T)];
/*
* Verify at compile time that the size of curl_socklen_t as reported
* by sizeof() is greater or equal than the one reported for int for
* the current compilation.
*/
typedef char
__curl_rule_05__
[CurlchkszGE(curl_socklen_t, int)];
/* ================================================================ */
/* EXTERNALLY AND INTERNALLY VISIBLE DEFINITIONS */
/* ================================================================ */
/*
* CURL_ISOCPP and CURL_OFF_T_C definitions are done here in order to allow
* these to be visible and exported by the external libcurl interface API,
* while also making them visible to the library internals, simply including
* curl_setup.h, without actually needing to include curl.h internally.
* If some day this section would grow big enough, all this should be moved
* to its own header file.
*/
/*
* Figure out if we can use the ## preprocessor operator, which is supported
* by ISO/ANSI C and C++. Some compilers support it without setting __STDC__
* or __cplusplus so we need to carefully check for them too.
*/
#if defined(__STDC__) || defined(_MSC_VER) || defined(__cplusplus) || \
defined(__HP_aCC) || defined(__BORLANDC__) || defined(__LCC__) || \
defined(__POCC__) || defined(__SALFORDC__) || defined(__HIGHC__) || \
defined(__ILEC400__)
/* This compiler is believed to have an ISO compatible preprocessor */
#define CURL_ISOCPP
#else
/* This compiler is believed NOT to have an ISO compatible preprocessor */
#undef CURL_ISOCPP
#endif
/*
* Macros for minimum-width signed and unsigned curl_off_t integer constants.
*/
#if defined(__BORLANDC__) && (__BORLANDC__ == 0x0551)
# define __CURL_OFF_T_C_HLPR2(x) x
# define __CURL_OFF_T_C_HLPR1(x) __CURL_OFF_T_C_HLPR2(x)
# define CURL_OFF_T_C(Val) __CURL_OFF_T_C_HLPR1(Val) ## \
__CURL_OFF_T_C_HLPR1(CURL_SUFFIX_CURL_OFF_T)
# define CURL_OFF_TU_C(Val) __CURL_OFF_T_C_HLPR1(Val) ## \
__CURL_OFF_T_C_HLPR1(CURL_SUFFIX_CURL_OFF_TU)
#else
# ifdef CURL_ISOCPP
# define __CURL_OFF_T_C_HLPR2(Val,Suffix) Val ## Suffix
# else
# define __CURL_OFF_T_C_HLPR2(Val,Suffix) Val/**/Suffix
# endif
# define __CURL_OFF_T_C_HLPR1(Val,Suffix) __CURL_OFF_T_C_HLPR2(Val,Suffix)
# define CURL_OFF_T_C(Val) __CURL_OFF_T_C_HLPR1(Val,CURL_SUFFIX_CURL_OFF_T)
# define CURL_OFF_TU_C(Val) __CURL_OFF_T_C_HLPR1(Val,CURL_SUFFIX_CURL_OFF_TU)
#endif
/*
* Get rid of macros private to this header file.
*/
#undef CurlchkszEQ
#undef CurlchkszGE
/*
* Get rid of macros not intended to exist beyond this point.
*/
#undef CURL_PULL_WS2TCPIP_H
#undef CURL_PULL_SYS_TYPES_H
#undef CURL_PULL_SYS_SOCKET_H
#undef CURL_PULL_SYS_POLL_H
#undef CURL_PULL_STDINT_H
#undef CURL_PULL_INTTYPES_H
#undef CURL_TYPEOF_CURL_SOCKLEN_T
#undef CURL_TYPEOF_CURL_OFF_T
#ifdef CURL_NO_OLDIES
#undef CURL_FORMAT_OFF_T /* not required since 7.19.0 - obsoleted in 7.20.0 */
#endif
#endif /* __CURL_CURLRULES_H */

77
deps/libcurl/include/curl/curlver.h vendored Normal file
View File

@ -0,0 +1,77 @@
#ifndef __CURL_CURLVER_H
#define __CURL_CURLVER_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* This header file contains nothing but libcurl version info, generated by
a script at release-time. This was made its own header file in 7.11.2 */
/* This is the global package copyright */
#define LIBCURL_COPYRIGHT "1996 - 2015 Daniel Stenberg, <daniel@haxx.se>."
/* This is the version number of the libcurl package from which this header
file origins: */
#define LIBCURL_VERSION "7.47.0"
/* The numeric version number is also available "in parts" by using these
defines: */
#define LIBCURL_VERSION_MAJOR 7
#define LIBCURL_VERSION_MINOR 47
#define LIBCURL_VERSION_PATCH 0
/* This is the numeric version of the libcurl version number, meant for easier
parsing and comparions by programs. The LIBCURL_VERSION_NUM define will
always follow this syntax:
0xXXYYZZ
Where XX, YY and ZZ are the main version, release and patch numbers in
hexadecimal (using 8 bits each). All three numbers are always represented
using two digits. 1.2 would appear as "0x010200" while version 9.11.7
appears as "0x090b07".
This 6-digit (24 bits) hexadecimal number does not show pre-release number,
and it is always a greater number in a more recent release. It makes
comparisons with greater than and less than work.
Note: This define is the full hex number and _does not_ use the
CURL_VERSION_BITS() macro since curl's own configure script greps for it
and needs it to contain the full number.
*/
#define LIBCURL_VERSION_NUM 0x072f00
/*
* This is the date and time when the full source package was created. The
* timestamp is not stored in git, as the timestamp is properly set in the
* tarballs by the maketgz script.
*
* The format of the date should follow this template:
*
* "Mon Feb 12 11:35:33 UTC 2007"
*/
#define LIBCURL_TIMESTAMP "Wed Jan 27 07:32:44 UTC 2016"
#define CURL_VERSION_BITS(x,y,z) ((x)<<16|(y)<<8|z)
#define CURL_AT_LEAST_VERSION(x,y,z) \
(LIBCURL_VERSION_NUM >= CURL_VERSION_BITS(x, y, z))
#endif /* __CURL_CURLVER_H */

102
deps/libcurl/include/curl/easy.h vendored Normal file
View File

@ -0,0 +1,102 @@
#ifndef __CURL_EASY_H
#define __CURL_EASY_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2008, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#ifdef __cplusplus
extern "C" {
#endif
CURL_EXTERN CURL *curl_easy_init(void);
CURL_EXTERN CURLcode curl_easy_setopt(CURL *curl, CURLoption option, ...);
CURL_EXTERN CURLcode curl_easy_perform(CURL *curl);
CURL_EXTERN void curl_easy_cleanup(CURL *curl);
/*
* NAME curl_easy_getinfo()
*
* DESCRIPTION
*
* Request internal information from the curl session with this function. The
* third argument MUST be a pointer to a long, a pointer to a char * or a
* pointer to a double (as the documentation describes elsewhere). The data
* pointed to will be filled in accordingly and can be relied upon only if the
* function returns CURLE_OK. This function is intended to get used *AFTER* a
* performed transfer, all results from this function are undefined until the
* transfer is completed.
*/
CURL_EXTERN CURLcode curl_easy_getinfo(CURL *curl, CURLINFO info, ...);
/*
* NAME curl_easy_duphandle()
*
* DESCRIPTION
*
* Creates a new curl session handle with the same options set for the handle
* passed in. Duplicating a handle could only be a matter of cloning data and
* options, internal state info and things like persistent connections cannot
* be transferred. It is useful in multithreaded applications when you can run
* curl_easy_duphandle() for each new thread to avoid a series of identical
* curl_easy_setopt() invokes in every thread.
*/
CURL_EXTERN CURL* curl_easy_duphandle(CURL *curl);
/*
* NAME curl_easy_reset()
*
* DESCRIPTION
*
* Re-initializes a CURL handle to the default values. This puts back the
* handle to the same state as it was in when it was just created.
*
* It does keep: live connections, the Session ID cache, the DNS cache and the
* cookies.
*/
CURL_EXTERN void curl_easy_reset(CURL *curl);
/*
* NAME curl_easy_recv()
*
* DESCRIPTION
*
* Receives data from the connected socket. Use after successful
* curl_easy_perform() with CURLOPT_CONNECT_ONLY option.
*/
CURL_EXTERN CURLcode curl_easy_recv(CURL *curl, void *buffer, size_t buflen,
size_t *n);
/*
* NAME curl_easy_send()
*
* DESCRIPTION
*
* Sends data over the connected socket. Use after successful
* curl_easy_perform() with CURLOPT_CONNECT_ONLY option.
*/
CURL_EXTERN CURLcode curl_easy_send(CURL *curl, const void *buffer,
size_t buflen, size_t *n);
#ifdef __cplusplus
}
#endif
#endif

74
deps/libcurl/include/curl/mprintf.h vendored Normal file
View File

@ -0,0 +1,74 @@
#ifndef __CURL_MPRINTF_H
#define __CURL_MPRINTF_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#include <stdarg.h>
#include <stdio.h> /* needed for FILE */
#include "curl.h"
#ifdef __cplusplus
extern "C" {
#endif
CURL_EXTERN int curl_mprintf(const char *format, ...);
CURL_EXTERN int curl_mfprintf(FILE *fd, const char *format, ...);
CURL_EXTERN int curl_msprintf(char *buffer, const char *format, ...);
CURL_EXTERN int curl_msnprintf(char *buffer, size_t maxlength,
const char *format, ...);
CURL_EXTERN int curl_mvprintf(const char *format, va_list args);
CURL_EXTERN int curl_mvfprintf(FILE *fd, const char *format, va_list args);
CURL_EXTERN int curl_mvsprintf(char *buffer, const char *format, va_list args);
CURL_EXTERN int curl_mvsnprintf(char *buffer, size_t maxlength,
const char *format, va_list args);
CURL_EXTERN char *curl_maprintf(const char *format, ...);
CURL_EXTERN char *curl_mvaprintf(const char *format, va_list args);
#ifdef _MPRINTF_REPLACE
# undef printf
# undef fprintf
# undef sprintf
# undef vsprintf
# undef snprintf
# undef vprintf
# undef vfprintf
# undef vsnprintf
# undef aprintf
# undef vaprintf
# define printf curl_mprintf
# define fprintf curl_mfprintf
# define sprintf curl_msprintf
# define vsprintf curl_mvsprintf
# define snprintf curl_msnprintf
# define vprintf curl_mvprintf
# define vfprintf curl_mvfprintf
# define vsnprintf curl_mvsnprintf
# define aprintf curl_maprintf
# define vaprintf curl_mvaprintf
#endif
#ifdef __cplusplus
}
#endif
#endif /* __CURL_MPRINTF_H */

435
deps/libcurl/include/curl/multi.h vendored Normal file
View File

@ -0,0 +1,435 @@
#ifndef __CURL_MULTI_H
#define __CURL_MULTI_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/*
This is an "external" header file. Don't give away any internals here!
GOALS
o Enable a "pull" interface. The application that uses libcurl decides where
and when to ask libcurl to get/send data.
o Enable multiple simultaneous transfers in the same thread without making it
complicated for the application.
o Enable the application to select() on its own file descriptors and curl's
file descriptors simultaneous easily.
*/
/*
* This header file should not really need to include "curl.h" since curl.h
* itself includes this file and we expect user applications to do #include
* <curl/curl.h> without the need for especially including multi.h.
*
* For some reason we added this include here at one point, and rather than to
* break existing (wrongly written) libcurl applications, we leave it as-is
* but with this warning attached.
*/
#include "curl.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef void CURLM;
typedef enum {
CURLM_CALL_MULTI_PERFORM = -1, /* please call curl_multi_perform() or
curl_multi_socket*() soon */
CURLM_OK,
CURLM_BAD_HANDLE, /* the passed-in handle is not a valid CURLM handle */
CURLM_BAD_EASY_HANDLE, /* an easy handle was not good/valid */
CURLM_OUT_OF_MEMORY, /* if you ever get this, you're in deep sh*t */
CURLM_INTERNAL_ERROR, /* this is a libcurl bug */
CURLM_BAD_SOCKET, /* the passed in socket argument did not match */
CURLM_UNKNOWN_OPTION, /* curl_multi_setopt() with unsupported option */
CURLM_ADDED_ALREADY, /* an easy handle already added to a multi handle was
attempted to get added - again */
CURLM_LAST
} CURLMcode;
/* just to make code nicer when using curl_multi_socket() you can now check
for CURLM_CALL_MULTI_SOCKET too in the same style it works for
curl_multi_perform() and CURLM_CALL_MULTI_PERFORM */
#define CURLM_CALL_MULTI_SOCKET CURLM_CALL_MULTI_PERFORM
/* bitmask bits for CURLMOPT_PIPELINING */
#define CURLPIPE_NOTHING 0L
#define CURLPIPE_HTTP1 1L
#define CURLPIPE_MULTIPLEX 2L
typedef enum {
CURLMSG_NONE, /* first, not used */
CURLMSG_DONE, /* This easy handle has completed. 'result' contains
the CURLcode of the transfer */
CURLMSG_LAST /* last, not used */
} CURLMSG;
struct CURLMsg {
CURLMSG msg; /* what this message means */
CURL *easy_handle; /* the handle it concerns */
union {
void *whatever; /* message-specific data */
CURLcode result; /* return code for transfer */
} data;
};
typedef struct CURLMsg CURLMsg;
/* Based on poll(2) structure and values.
* We don't use pollfd and POLL* constants explicitly
* to cover platforms without poll(). */
#define CURL_WAIT_POLLIN 0x0001
#define CURL_WAIT_POLLPRI 0x0002
#define CURL_WAIT_POLLOUT 0x0004
struct curl_waitfd {
curl_socket_t fd;
short events;
short revents; /* not supported yet */
};
/*
* Name: curl_multi_init()
*
* Desc: inititalize multi-style curl usage
*
* Returns: a new CURLM handle to use in all 'curl_multi' functions.
*/
CURL_EXTERN CURLM *curl_multi_init(void);
/*
* Name: curl_multi_add_handle()
*
* Desc: add a standard curl handle to the multi stack
*
* Returns: CURLMcode type, general multi error code.
*/
CURL_EXTERN CURLMcode curl_multi_add_handle(CURLM *multi_handle,
CURL *curl_handle);
/*
* Name: curl_multi_remove_handle()
*
* Desc: removes a curl handle from the multi stack again
*
* Returns: CURLMcode type, general multi error code.
*/
CURL_EXTERN CURLMcode curl_multi_remove_handle(CURLM *multi_handle,
CURL *curl_handle);
/*
* Name: curl_multi_fdset()
*
* Desc: Ask curl for its fd_set sets. The app can use these to select() or
* poll() on. We want curl_multi_perform() called as soon as one of
* them are ready.
*
* Returns: CURLMcode type, general multi error code.
*/
CURL_EXTERN CURLMcode curl_multi_fdset(CURLM *multi_handle,
fd_set *read_fd_set,
fd_set *write_fd_set,
fd_set *exc_fd_set,
int *max_fd);
/*
* Name: curl_multi_wait()
*
* Desc: Poll on all fds within a CURLM set as well as any
* additional fds passed to the function.
*
* Returns: CURLMcode type, general multi error code.
*/
CURL_EXTERN CURLMcode curl_multi_wait(CURLM *multi_handle,
struct curl_waitfd extra_fds[],
unsigned int extra_nfds,
int timeout_ms,
int *ret);
/*
* Name: curl_multi_perform()
*
* Desc: When the app thinks there's data available for curl it calls this
* function to read/write whatever there is right now. This returns
* as soon as the reads and writes are done. This function does not
* require that there actually is data available for reading or that
* data can be written, it can be called just in case. It returns
* the number of handles that still transfer data in the second
* argument's integer-pointer.
*
* Returns: CURLMcode type, general multi error code. *NOTE* that this only
* returns errors etc regarding the whole multi stack. There might
* still have occurred problems on invidual transfers even when this
* returns OK.
*/
CURL_EXTERN CURLMcode curl_multi_perform(CURLM *multi_handle,
int *running_handles);
/*
* Name: curl_multi_cleanup()
*
* Desc: Cleans up and removes a whole multi stack. It does not free or
* touch any individual easy handles in any way. We need to define
* in what state those handles will be if this function is called
* in the middle of a transfer.
*
* Returns: CURLMcode type, general multi error code.
*/
CURL_EXTERN CURLMcode curl_multi_cleanup(CURLM *multi_handle);
/*
* Name: curl_multi_info_read()
*
* Desc: Ask the multi handle if there's any messages/informationals from
* the individual transfers. Messages include informationals such as
* error code from the transfer or just the fact that a transfer is
* completed. More details on these should be written down as well.
*
* Repeated calls to this function will return a new struct each
* time, until a special "end of msgs" struct is returned as a signal
* that there is no more to get at this point.
*
* The data the returned pointer points to will not survive calling
* curl_multi_cleanup().
*
* The 'CURLMsg' struct is meant to be very simple and only contain
* very basic informations. If more involved information is wanted,
* we will provide the particular "transfer handle" in that struct
* and that should/could/would be used in subsequent
* curl_easy_getinfo() calls (or similar). The point being that we
* must never expose complex structs to applications, as then we'll
* undoubtably get backwards compatibility problems in the future.
*
* Returns: A pointer to a filled-in struct, or NULL if it failed or ran out
* of structs. It also writes the number of messages left in the
* queue (after this read) in the integer the second argument points
* to.
*/
CURL_EXTERN CURLMsg *curl_multi_info_read(CURLM *multi_handle,
int *msgs_in_queue);
/*
* Name: curl_multi_strerror()
*
* Desc: The curl_multi_strerror function may be used to turn a CURLMcode
* value into the equivalent human readable error string. This is
* useful for printing meaningful error messages.
*
* Returns: A pointer to a zero-terminated error message.
*/
CURL_EXTERN const char *curl_multi_strerror(CURLMcode);
/*
* Name: curl_multi_socket() and
* curl_multi_socket_all()
*
* Desc: An alternative version of curl_multi_perform() that allows the
* application to pass in one of the file descriptors that have been
* detected to have "action" on them and let libcurl perform.
* See man page for details.
*/
#define CURL_POLL_NONE 0
#define CURL_POLL_IN 1
#define CURL_POLL_OUT 2
#define CURL_POLL_INOUT 3
#define CURL_POLL_REMOVE 4
#define CURL_SOCKET_TIMEOUT CURL_SOCKET_BAD
#define CURL_CSELECT_IN 0x01
#define CURL_CSELECT_OUT 0x02
#define CURL_CSELECT_ERR 0x04
typedef int (*curl_socket_callback)(CURL *easy, /* easy handle */
curl_socket_t s, /* socket */
int what, /* see above */
void *userp, /* private callback
pointer */
void *socketp); /* private socket
pointer */
/*
* Name: curl_multi_timer_callback
*
* Desc: Called by libcurl whenever the library detects a change in the
* maximum number of milliseconds the app is allowed to wait before
* curl_multi_socket() or curl_multi_perform() must be called
* (to allow libcurl's timed events to take place).
*
* Returns: The callback should return zero.
*/
typedef int (*curl_multi_timer_callback)(CURLM *multi, /* multi handle */
long timeout_ms, /* see above */
void *userp); /* private callback
pointer */
CURL_EXTERN CURLMcode curl_multi_socket(CURLM *multi_handle, curl_socket_t s,
int *running_handles);
CURL_EXTERN CURLMcode curl_multi_socket_action(CURLM *multi_handle,
curl_socket_t s,
int ev_bitmask,
int *running_handles);
CURL_EXTERN CURLMcode curl_multi_socket_all(CURLM *multi_handle,
int *running_handles);
#ifndef CURL_ALLOW_OLD_MULTI_SOCKET
/* This macro below was added in 7.16.3 to push users who recompile to use
the new curl_multi_socket_action() instead of the old curl_multi_socket()
*/
#define curl_multi_socket(x,y,z) curl_multi_socket_action(x,y,0,z)
#endif
/*
* Name: curl_multi_timeout()
*
* Desc: Returns the maximum number of milliseconds the app is allowed to
* wait before curl_multi_socket() or curl_multi_perform() must be
* called (to allow libcurl's timed events to take place).
*
* Returns: CURLM error code.
*/
CURL_EXTERN CURLMcode curl_multi_timeout(CURLM *multi_handle,
long *milliseconds);
#undef CINIT /* re-using the same name as in curl.h */
#ifdef CURL_ISOCPP
#define CINIT(name,type,num) CURLMOPT_ ## name = CURLOPTTYPE_ ## type + num
#else
/* The macro "##" is ISO C, we assume pre-ISO C doesn't support it. */
#define LONG CURLOPTTYPE_LONG
#define OBJECTPOINT CURLOPTTYPE_OBJECTPOINT
#define FUNCTIONPOINT CURLOPTTYPE_FUNCTIONPOINT
#define OFF_T CURLOPTTYPE_OFF_T
#define CINIT(name,type,number) CURLMOPT_/**/name = type + number
#endif
typedef enum {
/* This is the socket callback function pointer */
CINIT(SOCKETFUNCTION, FUNCTIONPOINT, 1),
/* This is the argument passed to the socket callback */
CINIT(SOCKETDATA, OBJECTPOINT, 2),
/* set to 1 to enable pipelining for this multi handle */
CINIT(PIPELINING, LONG, 3),
/* This is the timer callback function pointer */
CINIT(TIMERFUNCTION, FUNCTIONPOINT, 4),
/* This is the argument passed to the timer callback */
CINIT(TIMERDATA, OBJECTPOINT, 5),
/* maximum number of entries in the connection cache */
CINIT(MAXCONNECTS, LONG, 6),
/* maximum number of (pipelining) connections to one host */
CINIT(MAX_HOST_CONNECTIONS, LONG, 7),
/* maximum number of requests in a pipeline */
CINIT(MAX_PIPELINE_LENGTH, LONG, 8),
/* a connection with a content-length longer than this
will not be considered for pipelining */
CINIT(CONTENT_LENGTH_PENALTY_SIZE, OFF_T, 9),
/* a connection with a chunk length longer than this
will not be considered for pipelining */
CINIT(CHUNK_LENGTH_PENALTY_SIZE, OFF_T, 10),
/* a list of site names(+port) that are blacklisted from
pipelining */
CINIT(PIPELINING_SITE_BL, OBJECTPOINT, 11),
/* a list of server types that are blacklisted from
pipelining */
CINIT(PIPELINING_SERVER_BL, OBJECTPOINT, 12),
/* maximum number of open connections in total */
CINIT(MAX_TOTAL_CONNECTIONS, LONG, 13),
/* This is the server push callback function pointer */
CINIT(PUSHFUNCTION, FUNCTIONPOINT, 14),
/* This is the argument passed to the server push callback */
CINIT(PUSHDATA, OBJECTPOINT, 15),
CURLMOPT_LASTENTRY /* the last unused */
} CURLMoption;
/*
* Name: curl_multi_setopt()
*
* Desc: Sets options for the multi handle.
*
* Returns: CURLM error code.
*/
CURL_EXTERN CURLMcode curl_multi_setopt(CURLM *multi_handle,
CURLMoption option, ...);
/*
* Name: curl_multi_assign()
*
* Desc: This function sets an association in the multi handle between the
* given socket and a private pointer of the application. This is
* (only) useful for curl_multi_socket uses.
*
* Returns: CURLM error code.
*/
CURL_EXTERN CURLMcode curl_multi_assign(CURLM *multi_handle,
curl_socket_t sockfd, void *sockp);
/*
* Name: curl_push_callback
*
* Desc: This callback gets called when a new stream is being pushed by the
* server. It approves or denies the new stream.
*
* Returns: CURL_PUSH_OK or CURL_PUSH_DENY.
*/
#define CURL_PUSH_OK 0
#define CURL_PUSH_DENY 1
struct curl_pushheaders; /* forward declaration only */
CURL_EXTERN char *curl_pushheader_bynum(struct curl_pushheaders *h,
size_t num);
CURL_EXTERN char *curl_pushheader_byname(struct curl_pushheaders *h,
const char *name);
typedef int (*curl_push_callback)(CURL *parent,
CURL *easy,
size_t num_headers,
struct curl_pushheaders *headers,
void *userp);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif

33
deps/libcurl/include/curl/stdcheaders.h vendored Normal file
View File

@ -0,0 +1,33 @@
#ifndef __STDC_HEADERS_H
#define __STDC_HEADERS_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2010, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#include <sys/types.h>
size_t fread (void *, size_t, size_t, FILE *);
size_t fwrite (const void *, size_t, size_t, FILE *);
int strcasecmp(const char *, const char *);
int strncasecmp(const char *, const char *, size_t);
#endif /* __STDC_HEADERS_H */

View File

@ -0,0 +1,622 @@
#ifndef __CURL_TYPECHECK_GCC_H
#define __CURL_TYPECHECK_GCC_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* wraps curl_easy_setopt() with typechecking */
/* To add a new kind of warning, add an
* if(_curl_is_sometype_option(_curl_opt))
* if(!_curl_is_sometype(value))
* _curl_easy_setopt_err_sometype();
* block and define _curl_is_sometype_option, _curl_is_sometype and
* _curl_easy_setopt_err_sometype below
*
* NOTE: We use two nested 'if' statements here instead of the && operator, in
* order to work around gcc bug #32061. It affects only gcc 4.3.x/4.4.x
* when compiling with -Wlogical-op.
*
* To add an option that uses the same type as an existing option, you'll just
* need to extend the appropriate _curl_*_option macro
*/
#define curl_easy_setopt(handle, option, value) \
__extension__ ({ \
__typeof__ (option) _curl_opt = option; \
if(__builtin_constant_p(_curl_opt)) { \
if(_curl_is_long_option(_curl_opt)) \
if(!_curl_is_long(value)) \
_curl_easy_setopt_err_long(); \
if(_curl_is_off_t_option(_curl_opt)) \
if(!_curl_is_off_t(value)) \
_curl_easy_setopt_err_curl_off_t(); \
if(_curl_is_string_option(_curl_opt)) \
if(!_curl_is_string(value)) \
_curl_easy_setopt_err_string(); \
if(_curl_is_write_cb_option(_curl_opt)) \
if(!_curl_is_write_cb(value)) \
_curl_easy_setopt_err_write_callback(); \
if((_curl_opt) == CURLOPT_READFUNCTION) \
if(!_curl_is_read_cb(value)) \
_curl_easy_setopt_err_read_cb(); \
if((_curl_opt) == CURLOPT_IOCTLFUNCTION) \
if(!_curl_is_ioctl_cb(value)) \
_curl_easy_setopt_err_ioctl_cb(); \
if((_curl_opt) == CURLOPT_SOCKOPTFUNCTION) \
if(!_curl_is_sockopt_cb(value)) \
_curl_easy_setopt_err_sockopt_cb(); \
if((_curl_opt) == CURLOPT_OPENSOCKETFUNCTION) \
if(!_curl_is_opensocket_cb(value)) \
_curl_easy_setopt_err_opensocket_cb(); \
if((_curl_opt) == CURLOPT_PROGRESSFUNCTION) \
if(!_curl_is_progress_cb(value)) \
_curl_easy_setopt_err_progress_cb(); \
if((_curl_opt) == CURLOPT_DEBUGFUNCTION) \
if(!_curl_is_debug_cb(value)) \
_curl_easy_setopt_err_debug_cb(); \
if((_curl_opt) == CURLOPT_SSL_CTX_FUNCTION) \
if(!_curl_is_ssl_ctx_cb(value)) \
_curl_easy_setopt_err_ssl_ctx_cb(); \
if(_curl_is_conv_cb_option(_curl_opt)) \
if(!_curl_is_conv_cb(value)) \
_curl_easy_setopt_err_conv_cb(); \
if((_curl_opt) == CURLOPT_SEEKFUNCTION) \
if(!_curl_is_seek_cb(value)) \
_curl_easy_setopt_err_seek_cb(); \
if(_curl_is_cb_data_option(_curl_opt)) \
if(!_curl_is_cb_data(value)) \
_curl_easy_setopt_err_cb_data(); \
if((_curl_opt) == CURLOPT_ERRORBUFFER) \
if(!_curl_is_error_buffer(value)) \
_curl_easy_setopt_err_error_buffer(); \
if((_curl_opt) == CURLOPT_STDERR) \
if(!_curl_is_FILE(value)) \
_curl_easy_setopt_err_FILE(); \
if(_curl_is_postfields_option(_curl_opt)) \
if(!_curl_is_postfields(value)) \
_curl_easy_setopt_err_postfields(); \
if((_curl_opt) == CURLOPT_HTTPPOST) \
if(!_curl_is_arr((value), struct curl_httppost)) \
_curl_easy_setopt_err_curl_httpost(); \
if(_curl_is_slist_option(_curl_opt)) \
if(!_curl_is_arr((value), struct curl_slist)) \
_curl_easy_setopt_err_curl_slist(); \
if((_curl_opt) == CURLOPT_SHARE) \
if(!_curl_is_ptr((value), CURLSH)) \
_curl_easy_setopt_err_CURLSH(); \
} \
curl_easy_setopt(handle, _curl_opt, value); \
})
/* wraps curl_easy_getinfo() with typechecking */
/* FIXME: don't allow const pointers */
#define curl_easy_getinfo(handle, info, arg) \
__extension__ ({ \
__typeof__ (info) _curl_info = info; \
if(__builtin_constant_p(_curl_info)) { \
if(_curl_is_string_info(_curl_info)) \
if(!_curl_is_arr((arg), char *)) \
_curl_easy_getinfo_err_string(); \
if(_curl_is_long_info(_curl_info)) \
if(!_curl_is_arr((arg), long)) \
_curl_easy_getinfo_err_long(); \
if(_curl_is_double_info(_curl_info)) \
if(!_curl_is_arr((arg), double)) \
_curl_easy_getinfo_err_double(); \
if(_curl_is_slist_info(_curl_info)) \
if(!_curl_is_arr((arg), struct curl_slist *)) \
_curl_easy_getinfo_err_curl_slist(); \
} \
curl_easy_getinfo(handle, _curl_info, arg); \
})
/* TODO: typechecking for curl_share_setopt() and curl_multi_setopt(),
* for now just make sure that the functions are called with three
* arguments
*/
#define curl_share_setopt(share,opt,param) curl_share_setopt(share,opt,param)
#define curl_multi_setopt(handle,opt,param) curl_multi_setopt(handle,opt,param)
/* the actual warnings, triggered by calling the _curl_easy_setopt_err*
* functions */
/* To define a new warning, use _CURL_WARNING(identifier, "message") */
#define _CURL_WARNING(id, message) \
static void __attribute__((__warning__(message))) \
__attribute__((__unused__)) __attribute__((__noinline__)) \
id(void) { __asm__(""); }
_CURL_WARNING(_curl_easy_setopt_err_long,
"curl_easy_setopt expects a long argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_curl_off_t,
"curl_easy_setopt expects a curl_off_t argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_string,
"curl_easy_setopt expects a "
"string (char* or char[]) argument for this option"
)
_CURL_WARNING(_curl_easy_setopt_err_write_callback,
"curl_easy_setopt expects a curl_write_callback argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_read_cb,
"curl_easy_setopt expects a curl_read_callback argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_ioctl_cb,
"curl_easy_setopt expects a curl_ioctl_callback argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_sockopt_cb,
"curl_easy_setopt expects a curl_sockopt_callback argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_opensocket_cb,
"curl_easy_setopt expects a "
"curl_opensocket_callback argument for this option"
)
_CURL_WARNING(_curl_easy_setopt_err_progress_cb,
"curl_easy_setopt expects a curl_progress_callback argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_debug_cb,
"curl_easy_setopt expects a curl_debug_callback argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_ssl_ctx_cb,
"curl_easy_setopt expects a curl_ssl_ctx_callback argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_conv_cb,
"curl_easy_setopt expects a curl_conv_callback argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_seek_cb,
"curl_easy_setopt expects a curl_seek_callback argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_cb_data,
"curl_easy_setopt expects a "
"private data pointer as argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_error_buffer,
"curl_easy_setopt expects a "
"char buffer of CURL_ERROR_SIZE as argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_FILE,
"curl_easy_setopt expects a FILE* argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_postfields,
"curl_easy_setopt expects a void* or char* argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_curl_httpost,
"curl_easy_setopt expects a struct curl_httppost* argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_curl_slist,
"curl_easy_setopt expects a struct curl_slist* argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_CURLSH,
"curl_easy_setopt expects a CURLSH* argument for this option")
_CURL_WARNING(_curl_easy_getinfo_err_string,
"curl_easy_getinfo expects a pointer to char * for this info")
_CURL_WARNING(_curl_easy_getinfo_err_long,
"curl_easy_getinfo expects a pointer to long for this info")
_CURL_WARNING(_curl_easy_getinfo_err_double,
"curl_easy_getinfo expects a pointer to double for this info")
_CURL_WARNING(_curl_easy_getinfo_err_curl_slist,
"curl_easy_getinfo expects a pointer to struct curl_slist * for this info")
/* groups of curl_easy_setops options that take the same type of argument */
/* To add a new option to one of the groups, just add
* (option) == CURLOPT_SOMETHING
* to the or-expression. If the option takes a long or curl_off_t, you don't
* have to do anything
*/
/* evaluates to true if option takes a long argument */
#define _curl_is_long_option(option) \
(0 < (option) && (option) < CURLOPTTYPE_OBJECTPOINT)
#define _curl_is_off_t_option(option) \
((option) > CURLOPTTYPE_OFF_T)
/* evaluates to true if option takes a char* argument */
#define _curl_is_string_option(option) \
((option) == CURLOPT_ACCEPT_ENCODING || \
(option) == CURLOPT_CAINFO || \
(option) == CURLOPT_CAPATH || \
(option) == CURLOPT_COOKIE || \
(option) == CURLOPT_COOKIEFILE || \
(option) == CURLOPT_COOKIEJAR || \
(option) == CURLOPT_COOKIELIST || \
(option) == CURLOPT_CRLFILE || \
(option) == CURLOPT_CUSTOMREQUEST || \
(option) == CURLOPT_DEFAULT_PROTOCOL || \
(option) == CURLOPT_DNS_INTERFACE || \
(option) == CURLOPT_DNS_LOCAL_IP4 || \
(option) == CURLOPT_DNS_LOCAL_IP6 || \
(option) == CURLOPT_DNS_SERVERS || \
(option) == CURLOPT_EGDSOCKET || \
(option) == CURLOPT_FTPPORT || \
(option) == CURLOPT_FTP_ACCOUNT || \
(option) == CURLOPT_FTP_ALTERNATIVE_TO_USER || \
(option) == CURLOPT_INTERFACE || \
(option) == CURLOPT_ISSUERCERT || \
(option) == CURLOPT_KEYPASSWD || \
(option) == CURLOPT_KRBLEVEL || \
(option) == CURLOPT_LOGIN_OPTIONS || \
(option) == CURLOPT_MAIL_AUTH || \
(option) == CURLOPT_MAIL_FROM || \
(option) == CURLOPT_NETRC_FILE || \
(option) == CURLOPT_NOPROXY || \
(option) == CURLOPT_PASSWORD || \
(option) == CURLOPT_PINNEDPUBLICKEY || \
(option) == CURLOPT_PROXY || \
(option) == CURLOPT_PROXYPASSWORD || \
(option) == CURLOPT_PROXYUSERNAME || \
(option) == CURLOPT_PROXYUSERPWD || \
(option) == CURLOPT_PROXY_SERVICE_NAME || \
(option) == CURLOPT_RANDOM_FILE || \
(option) == CURLOPT_RANGE || \
(option) == CURLOPT_REFERER || \
(option) == CURLOPT_RTSP_SESSION_ID || \
(option) == CURLOPT_RTSP_STREAM_URI || \
(option) == CURLOPT_RTSP_TRANSPORT || \
(option) == CURLOPT_SERVICE_NAME || \
(option) == CURLOPT_SOCKS5_GSSAPI_SERVICE || \
(option) == CURLOPT_SSH_HOST_PUBLIC_KEY_MD5 || \
(option) == CURLOPT_SSH_KNOWNHOSTS || \
(option) == CURLOPT_SSH_PRIVATE_KEYFILE || \
(option) == CURLOPT_SSH_PUBLIC_KEYFILE || \
(option) == CURLOPT_SSLCERT || \
(option) == CURLOPT_SSLCERTTYPE || \
(option) == CURLOPT_SSLENGINE || \
(option) == CURLOPT_SSLKEY || \
(option) == CURLOPT_SSLKEYTYPE || \
(option) == CURLOPT_SSL_CIPHER_LIST || \
(option) == CURLOPT_TLSAUTH_PASSWORD || \
(option) == CURLOPT_TLSAUTH_TYPE || \
(option) == CURLOPT_TLSAUTH_USERNAME || \
(option) == CURLOPT_UNIX_SOCKET_PATH || \
(option) == CURLOPT_URL || \
(option) == CURLOPT_USERAGENT || \
(option) == CURLOPT_USERNAME || \
(option) == CURLOPT_USERPWD || \
(option) == CURLOPT_XOAUTH2_BEARER || \
0)
/* evaluates to true if option takes a curl_write_callback argument */
#define _curl_is_write_cb_option(option) \
((option) == CURLOPT_HEADERFUNCTION || \
(option) == CURLOPT_WRITEFUNCTION)
/* evaluates to true if option takes a curl_conv_callback argument */
#define _curl_is_conv_cb_option(option) \
((option) == CURLOPT_CONV_TO_NETWORK_FUNCTION || \
(option) == CURLOPT_CONV_FROM_NETWORK_FUNCTION || \
(option) == CURLOPT_CONV_FROM_UTF8_FUNCTION)
/* evaluates to true if option takes a data argument to pass to a callback */
#define _curl_is_cb_data_option(option) \
((option) == CURLOPT_CHUNK_DATA || \
(option) == CURLOPT_CLOSESOCKETDATA || \
(option) == CURLOPT_DEBUGDATA || \
(option) == CURLOPT_FNMATCH_DATA || \
(option) == CURLOPT_HEADERDATA || \
(option) == CURLOPT_INTERLEAVEDATA || \
(option) == CURLOPT_IOCTLDATA || \
(option) == CURLOPT_OPENSOCKETDATA || \
(option) == CURLOPT_PRIVATE || \
(option) == CURLOPT_PROGRESSDATA || \
(option) == CURLOPT_READDATA || \
(option) == CURLOPT_SEEKDATA || \
(option) == CURLOPT_SOCKOPTDATA || \
(option) == CURLOPT_SSH_KEYDATA || \
(option) == CURLOPT_SSL_CTX_DATA || \
(option) == CURLOPT_WRITEDATA || \
0)
/* evaluates to true if option takes a POST data argument (void* or char*) */
#define _curl_is_postfields_option(option) \
((option) == CURLOPT_POSTFIELDS || \
(option) == CURLOPT_COPYPOSTFIELDS || \
0)
/* evaluates to true if option takes a struct curl_slist * argument */
#define _curl_is_slist_option(option) \
((option) == CURLOPT_HTTP200ALIASES || \
(option) == CURLOPT_HTTPHEADER || \
(option) == CURLOPT_MAIL_RCPT || \
(option) == CURLOPT_POSTQUOTE || \
(option) == CURLOPT_PREQUOTE || \
(option) == CURLOPT_PROXYHEADER || \
(option) == CURLOPT_QUOTE || \
(option) == CURLOPT_RESOLVE || \
(option) == CURLOPT_TELNETOPTIONS || \
0)
/* groups of curl_easy_getinfo infos that take the same type of argument */
/* evaluates to true if info expects a pointer to char * argument */
#define _curl_is_string_info(info) \
(CURLINFO_STRING < (info) && (info) < CURLINFO_LONG)
/* evaluates to true if info expects a pointer to long argument */
#define _curl_is_long_info(info) \
(CURLINFO_LONG < (info) && (info) < CURLINFO_DOUBLE)
/* evaluates to true if info expects a pointer to double argument */
#define _curl_is_double_info(info) \
(CURLINFO_DOUBLE < (info) && (info) < CURLINFO_SLIST)
/* true if info expects a pointer to struct curl_slist * argument */
#define _curl_is_slist_info(info) \
(CURLINFO_SLIST < (info))
/* typecheck helpers -- check whether given expression has requested type*/
/* For pointers, you can use the _curl_is_ptr/_curl_is_arr macros,
* otherwise define a new macro. Search for __builtin_types_compatible_p
* in the GCC manual.
* NOTE: these macros MUST NOT EVALUATE their arguments! The argument is
* the actual expression passed to the curl_easy_setopt macro. This
* means that you can only apply the sizeof and __typeof__ operators, no
* == or whatsoever.
*/
/* XXX: should evaluate to true iff expr is a pointer */
#define _curl_is_any_ptr(expr) \
(sizeof(expr) == sizeof(void*))
/* evaluates to true if expr is NULL */
/* XXX: must not evaluate expr, so this check is not accurate */
#define _curl_is_NULL(expr) \
(__builtin_types_compatible_p(__typeof__(expr), __typeof__(NULL)))
/* evaluates to true if expr is type*, const type* or NULL */
#define _curl_is_ptr(expr, type) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), type *) || \
__builtin_types_compatible_p(__typeof__(expr), const type *))
/* evaluates to true if expr is one of type[], type*, NULL or const type* */
#define _curl_is_arr(expr, type) \
(_curl_is_ptr((expr), type) || \
__builtin_types_compatible_p(__typeof__(expr), type []))
/* evaluates to true if expr is a string */
#define _curl_is_string(expr) \
(_curl_is_arr((expr), char) || \
_curl_is_arr((expr), signed char) || \
_curl_is_arr((expr), unsigned char))
/* evaluates to true if expr is a long (no matter the signedness)
* XXX: for now, int is also accepted (and therefore short and char, which
* are promoted to int when passed to a variadic function) */
#define _curl_is_long(expr) \
(__builtin_types_compatible_p(__typeof__(expr), long) || \
__builtin_types_compatible_p(__typeof__(expr), signed long) || \
__builtin_types_compatible_p(__typeof__(expr), unsigned long) || \
__builtin_types_compatible_p(__typeof__(expr), int) || \
__builtin_types_compatible_p(__typeof__(expr), signed int) || \
__builtin_types_compatible_p(__typeof__(expr), unsigned int) || \
__builtin_types_compatible_p(__typeof__(expr), short) || \
__builtin_types_compatible_p(__typeof__(expr), signed short) || \
__builtin_types_compatible_p(__typeof__(expr), unsigned short) || \
__builtin_types_compatible_p(__typeof__(expr), char) || \
__builtin_types_compatible_p(__typeof__(expr), signed char) || \
__builtin_types_compatible_p(__typeof__(expr), unsigned char))
/* evaluates to true if expr is of type curl_off_t */
#define _curl_is_off_t(expr) \
(__builtin_types_compatible_p(__typeof__(expr), curl_off_t))
/* evaluates to true if expr is abuffer suitable for CURLOPT_ERRORBUFFER */
/* XXX: also check size of an char[] array? */
#define _curl_is_error_buffer(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), char *) || \
__builtin_types_compatible_p(__typeof__(expr), char[]))
/* evaluates to true if expr is of type (const) void* or (const) FILE* */
#if 0
#define _curl_is_cb_data(expr) \
(_curl_is_ptr((expr), void) || \
_curl_is_ptr((expr), FILE))
#else /* be less strict */
#define _curl_is_cb_data(expr) \
_curl_is_any_ptr(expr)
#endif
/* evaluates to true if expr is of type FILE* */
#define _curl_is_FILE(expr) \
(__builtin_types_compatible_p(__typeof__(expr), FILE *))
/* evaluates to true if expr can be passed as POST data (void* or char*) */
#define _curl_is_postfields(expr) \
(_curl_is_ptr((expr), void) || \
_curl_is_arr((expr), char))
/* FIXME: the whole callback checking is messy...
* The idea is to tolerate char vs. void and const vs. not const
* pointers in arguments at least
*/
/* helper: __builtin_types_compatible_p distinguishes between functions and
* function pointers, hide it */
#define _curl_callback_compatible(func, type) \
(__builtin_types_compatible_p(__typeof__(func), type) || \
__builtin_types_compatible_p(__typeof__(func), type*))
/* evaluates to true if expr is of type curl_read_callback or "similar" */
#define _curl_is_read_cb(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), __typeof__(fread)) || \
__builtin_types_compatible_p(__typeof__(expr), curl_read_callback) || \
_curl_callback_compatible((expr), _curl_read_callback1) || \
_curl_callback_compatible((expr), _curl_read_callback2) || \
_curl_callback_compatible((expr), _curl_read_callback3) || \
_curl_callback_compatible((expr), _curl_read_callback4) || \
_curl_callback_compatible((expr), _curl_read_callback5) || \
_curl_callback_compatible((expr), _curl_read_callback6))
typedef size_t (_curl_read_callback1)(char *, size_t, size_t, void*);
typedef size_t (_curl_read_callback2)(char *, size_t, size_t, const void*);
typedef size_t (_curl_read_callback3)(char *, size_t, size_t, FILE*);
typedef size_t (_curl_read_callback4)(void *, size_t, size_t, void*);
typedef size_t (_curl_read_callback5)(void *, size_t, size_t, const void*);
typedef size_t (_curl_read_callback6)(void *, size_t, size_t, FILE*);
/* evaluates to true if expr is of type curl_write_callback or "similar" */
#define _curl_is_write_cb(expr) \
(_curl_is_read_cb(expr) || \
__builtin_types_compatible_p(__typeof__(expr), __typeof__(fwrite)) || \
__builtin_types_compatible_p(__typeof__(expr), curl_write_callback) || \
_curl_callback_compatible((expr), _curl_write_callback1) || \
_curl_callback_compatible((expr), _curl_write_callback2) || \
_curl_callback_compatible((expr), _curl_write_callback3) || \
_curl_callback_compatible((expr), _curl_write_callback4) || \
_curl_callback_compatible((expr), _curl_write_callback5) || \
_curl_callback_compatible((expr), _curl_write_callback6))
typedef size_t (_curl_write_callback1)(const char *, size_t, size_t, void*);
typedef size_t (_curl_write_callback2)(const char *, size_t, size_t,
const void*);
typedef size_t (_curl_write_callback3)(const char *, size_t, size_t, FILE*);
typedef size_t (_curl_write_callback4)(const void *, size_t, size_t, void*);
typedef size_t (_curl_write_callback5)(const void *, size_t, size_t,
const void*);
typedef size_t (_curl_write_callback6)(const void *, size_t, size_t, FILE*);
/* evaluates to true if expr is of type curl_ioctl_callback or "similar" */
#define _curl_is_ioctl_cb(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), curl_ioctl_callback) || \
_curl_callback_compatible((expr), _curl_ioctl_callback1) || \
_curl_callback_compatible((expr), _curl_ioctl_callback2) || \
_curl_callback_compatible((expr), _curl_ioctl_callback3) || \
_curl_callback_compatible((expr), _curl_ioctl_callback4))
typedef curlioerr (_curl_ioctl_callback1)(CURL *, int, void*);
typedef curlioerr (_curl_ioctl_callback2)(CURL *, int, const void*);
typedef curlioerr (_curl_ioctl_callback3)(CURL *, curliocmd, void*);
typedef curlioerr (_curl_ioctl_callback4)(CURL *, curliocmd, const void*);
/* evaluates to true if expr is of type curl_sockopt_callback or "similar" */
#define _curl_is_sockopt_cb(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), curl_sockopt_callback) || \
_curl_callback_compatible((expr), _curl_sockopt_callback1) || \
_curl_callback_compatible((expr), _curl_sockopt_callback2))
typedef int (_curl_sockopt_callback1)(void *, curl_socket_t, curlsocktype);
typedef int (_curl_sockopt_callback2)(const void *, curl_socket_t,
curlsocktype);
/* evaluates to true if expr is of type curl_opensocket_callback or
"similar" */
#define _curl_is_opensocket_cb(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), curl_opensocket_callback) ||\
_curl_callback_compatible((expr), _curl_opensocket_callback1) || \
_curl_callback_compatible((expr), _curl_opensocket_callback2) || \
_curl_callback_compatible((expr), _curl_opensocket_callback3) || \
_curl_callback_compatible((expr), _curl_opensocket_callback4))
typedef curl_socket_t (_curl_opensocket_callback1)
(void *, curlsocktype, struct curl_sockaddr *);
typedef curl_socket_t (_curl_opensocket_callback2)
(void *, curlsocktype, const struct curl_sockaddr *);
typedef curl_socket_t (_curl_opensocket_callback3)
(const void *, curlsocktype, struct curl_sockaddr *);
typedef curl_socket_t (_curl_opensocket_callback4)
(const void *, curlsocktype, const struct curl_sockaddr *);
/* evaluates to true if expr is of type curl_progress_callback or "similar" */
#define _curl_is_progress_cb(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), curl_progress_callback) || \
_curl_callback_compatible((expr), _curl_progress_callback1) || \
_curl_callback_compatible((expr), _curl_progress_callback2))
typedef int (_curl_progress_callback1)(void *,
double, double, double, double);
typedef int (_curl_progress_callback2)(const void *,
double, double, double, double);
/* evaluates to true if expr is of type curl_debug_callback or "similar" */
#define _curl_is_debug_cb(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), curl_debug_callback) || \
_curl_callback_compatible((expr), _curl_debug_callback1) || \
_curl_callback_compatible((expr), _curl_debug_callback2) || \
_curl_callback_compatible((expr), _curl_debug_callback3) || \
_curl_callback_compatible((expr), _curl_debug_callback4) || \
_curl_callback_compatible((expr), _curl_debug_callback5) || \
_curl_callback_compatible((expr), _curl_debug_callback6) || \
_curl_callback_compatible((expr), _curl_debug_callback7) || \
_curl_callback_compatible((expr), _curl_debug_callback8))
typedef int (_curl_debug_callback1) (CURL *,
curl_infotype, char *, size_t, void *);
typedef int (_curl_debug_callback2) (CURL *,
curl_infotype, char *, size_t, const void *);
typedef int (_curl_debug_callback3) (CURL *,
curl_infotype, const char *, size_t, void *);
typedef int (_curl_debug_callback4) (CURL *,
curl_infotype, const char *, size_t, const void *);
typedef int (_curl_debug_callback5) (CURL *,
curl_infotype, unsigned char *, size_t, void *);
typedef int (_curl_debug_callback6) (CURL *,
curl_infotype, unsigned char *, size_t, const void *);
typedef int (_curl_debug_callback7) (CURL *,
curl_infotype, const unsigned char *, size_t, void *);
typedef int (_curl_debug_callback8) (CURL *,
curl_infotype, const unsigned char *, size_t, const void *);
/* evaluates to true if expr is of type curl_ssl_ctx_callback or "similar" */
/* this is getting even messier... */
#define _curl_is_ssl_ctx_cb(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), curl_ssl_ctx_callback) || \
_curl_callback_compatible((expr), _curl_ssl_ctx_callback1) || \
_curl_callback_compatible((expr), _curl_ssl_ctx_callback2) || \
_curl_callback_compatible((expr), _curl_ssl_ctx_callback3) || \
_curl_callback_compatible((expr), _curl_ssl_ctx_callback4) || \
_curl_callback_compatible((expr), _curl_ssl_ctx_callback5) || \
_curl_callback_compatible((expr), _curl_ssl_ctx_callback6) || \
_curl_callback_compatible((expr), _curl_ssl_ctx_callback7) || \
_curl_callback_compatible((expr), _curl_ssl_ctx_callback8))
typedef CURLcode (_curl_ssl_ctx_callback1)(CURL *, void *, void *);
typedef CURLcode (_curl_ssl_ctx_callback2)(CURL *, void *, const void *);
typedef CURLcode (_curl_ssl_ctx_callback3)(CURL *, const void *, void *);
typedef CURLcode (_curl_ssl_ctx_callback4)(CURL *, const void *, const void *);
#ifdef HEADER_SSL_H
/* hack: if we included OpenSSL's ssl.h, we know about SSL_CTX
* this will of course break if we're included before OpenSSL headers...
*/
typedef CURLcode (_curl_ssl_ctx_callback5)(CURL *, SSL_CTX, void *);
typedef CURLcode (_curl_ssl_ctx_callback6)(CURL *, SSL_CTX, const void *);
typedef CURLcode (_curl_ssl_ctx_callback7)(CURL *, const SSL_CTX, void *);
typedef CURLcode (_curl_ssl_ctx_callback8)(CURL *, const SSL_CTX,
const void *);
#else
typedef _curl_ssl_ctx_callback1 _curl_ssl_ctx_callback5;
typedef _curl_ssl_ctx_callback1 _curl_ssl_ctx_callback6;
typedef _curl_ssl_ctx_callback1 _curl_ssl_ctx_callback7;
typedef _curl_ssl_ctx_callback1 _curl_ssl_ctx_callback8;
#endif
/* evaluates to true if expr is of type curl_conv_callback or "similar" */
#define _curl_is_conv_cb(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), curl_conv_callback) || \
_curl_callback_compatible((expr), _curl_conv_callback1) || \
_curl_callback_compatible((expr), _curl_conv_callback2) || \
_curl_callback_compatible((expr), _curl_conv_callback3) || \
_curl_callback_compatible((expr), _curl_conv_callback4))
typedef CURLcode (*_curl_conv_callback1)(char *, size_t length);
typedef CURLcode (*_curl_conv_callback2)(const char *, size_t length);
typedef CURLcode (*_curl_conv_callback3)(void *, size_t length);
typedef CURLcode (*_curl_conv_callback4)(const void *, size_t length);
/* evaluates to true if expr is of type curl_seek_callback or "similar" */
#define _curl_is_seek_cb(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), curl_seek_callback) || \
_curl_callback_compatible((expr), _curl_seek_callback1) || \
_curl_callback_compatible((expr), _curl_seek_callback2))
typedef CURLcode (*_curl_seek_callback1)(void *, curl_off_t, int);
typedef CURLcode (*_curl_seek_callback2)(const void *, curl_off_t, int);
#endif /* __CURL_TYPECHECK_GCC_H */

BIN
deps/libcurl/lib/libcurl.a vendored Normal file

Binary file not shown.

View File

@ -128,5 +128,18 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台
## [培训和FAQ](https://www.taosdata.com/cn/faq)
- [FAQ](https://www.taosdata.com/cn/documentation20/faq):常见问题与答案
- [应用案列](https://www.taosdata.com/cn/blog/?categories=4)一些使用实例来解释如何使用TDengine
<ul>
<li><a l href="https://www.taosdata.com/blog/2020/12/25/2126.html">技术公开课开源、高效的物联网大数据平台TDengine内核技术剖析</a></li>
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1941.html">TDengine视频教程-快速上手</a></li>
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1945.html">TDengine视频教程-数据建模</a></li>
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1961.html">TDengine视频教程-集群搭建</a></li>
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1951.html">TDengine视频教程-Go Connector</a></li>
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1955.html">TDengine视频教程-JDBC Connector</a></li>
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1957.html">TDengine视频教程-NodeJS Connector</a></li>
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1963.html">TDengine视频教程-Python Connector</a></li>
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1965.html">TDengine视频教程-RESTful Connector</a></li>
<li><a l href="https://www.taosdata.com/blog/2020/11/11/1959.html">TDengine视频教程-“零”代码运维监控</a></li>
<li><a l href="https://www.taosdata.com/cn/documentation20/faq">FAQ常见问题与答案</a></li>
<li><a l href="https://www.taosdata.com/cn/blog/?categories=4"> 应用案例一些使用实例来解释如何使用TDengine</a></li>
</ul>

View File

@ -20,7 +20,7 @@ TDengine的安装非常简单从下载到安装成功仅仅只要几秒钟。
- TDengine-server-2.0.10.0-Linux-x64.deb (2.7M)
- TDengine-server-2.0.10.0-Linux-x64.tar.gz (4.5M)
具体的安装过程,请参见<a href="https://www.taosdata.com/blog/2019/08/09/566.html">TDengine多种安装包的安装和卸载</a>
具体的安装过程,请参见<a href="https://www.taosdata.com/blog/2019/08/09/566.html">TDengine多种安装包的安装和卸载</a>以及<a href="https://www.taosdata.com/blog/2020/11/11/1941.html">视频教程</a>
## 轻松启动

View File

@ -4,6 +4,8 @@
TDengine采用关系型数据模型需要建库、建表。因此对于一个具体的应用场景需要考虑库的设计超级表和普通表的设计。本节不讨论细致的语法规则只介绍概念。
关于数据建模请参考<a href="https://www.taosdata.com/blog/2020/11/11/1945.html">视频教程</a>
## 创建库
不同类型的数据采集点往往具有不同的数据特征包括数据采集频率的高低数据保留时间的长短副本的数目数据块的大小是否允许更新数据等等。为让各种场景下TDengine都能最大效率的工作TDengine建议将不同数据特征的表创建在不同的库里因为每个库可以配置不同的存储策略。创建一个库时除SQL标准的选项外应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如
@ -59,3 +61,4 @@ INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 21
TDengine支持多列模型只要物理量是一个数据采集点同时采集的时间戳一致这些量就可以作为不同列放在一张超级表里。但还有一种极限的设计单列模型每个采集的物理量都单独建表因此每种类型的物理量都单独建立一超级表。比如电流、电压、相位就建三张超级表。
TDengine建议尽可能采用多列模型因为插入效率以及存储效率更高。但对于有些场景一个采集点的采集量的种类经常变化这个时候如果采用多列模型就需要频繁修改超级表的结构定义让应用变的复杂这个时候采用单列模型会显得简单。

View File

@ -6,6 +6,8 @@
TDengine的集群管理极其简单除添加和删除节点需要人工干预之外其他全部是自动完成最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。
关于集群搭建请参考<a href="https://www.taosdata.com/blog/2020/11/11/1961.html">视频教程</a>
## 准备工作
**第零步**规划集群所有物理节点的FQDN将规划好的FQDN分别添加到每个物理节点的/etc/hostname修改每个物理节点的/etc/hosts将所有集群物理节点的IP与FQDN的对应添加好。【如部署了DNS请联系网络管理员在DNS上做好相关配置】
@ -226,3 +228,4 @@ SHOW MNODES;
如果副本数为偶数当一个vnode group里一半vnode不工作时是无法从中选出master的。同理一半mnode不工作时是无法选出mnode的master的因为存在“split brain”问题。为解决这个问题TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作但只简单的负责网络连接不处理任何数据插入或访问。只要包含arbitrator在内超过半数的vnode或mnode工作那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形如果一个节点A离线但另外一个节点B正常而且能连接到arbitrator, 那么节点B就能正常工作。
TDengine提供一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。请点击[安装包下载](https://www.taosdata.com/cn/all-downloads/)在TDengine Arbitrator Linux一节中选择适合的版本下载并安装。该程序对系统资源几乎没有要求只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号缺省是6042。配置每个taosd实例时可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了当副本数为偶数数系统将自动连接配置的arbitrator。如果副本数为奇数即使配置了arbitrator, 系统也不会去建立连接。

View File

@ -187,7 +187,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时需要包含TDengine
- pass密码
- db数据库名字如果用户没有提供也可以正常连接用户可以通过该连接创建新的数据库如果用户提供了数据库名字则说明该数据库用户已经创建好缺省使用该数据库
- port端口号
返回值为空表示失败。应用程序需要保存返回的参数以便后续API调用。
- `char *taos_get_server_info(TAOS *taos)`
@ -336,7 +336,7 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线
- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用 `taos_free_result`以释放资源。
- `int taos_stmt_close(TAOS_STMT *stmt)`
执行完毕,释放所有资源。
@ -354,7 +354,7 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
* stime是流式计算开始的时间如果是0表示从现在开始如果不为零表示从指定的时间开始计算UTC时间从1970/1/1算起的毫秒数
* param是应用提供的用于回调的一个参数回调时提供给应用
* callback: 第二个回调函数,会在连续查询自动停止时被调用。
返回值为NULL表示创建成功返回值不为空表示成功。
- `void taos_close_stream (TAOS_STREAM *tstr)`
@ -394,6 +394,8 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时
## Python Connector
Python连接器的使用参见<a href="https://www.taosdata.com/blog/2020/11/11/1963.html">视频教程</a>
### 安装准备
* 应用驱动安装请参考<a href="https://www.taosdata.com/cn/documentation/connector/#安装连接器驱动步骤">安装连接器驱动步骤</a>
* 已安装python 2.7 or >= 3.4
@ -433,7 +435,7 @@ python -m pip install python3\
* 导入TDengine客户端模块
```python
import taos
import taos
```
* 获取连接并获取游标对象
```python
@ -445,7 +447,7 @@ c1 = conn.cursor()
* 写入数据
```python
import datetime
# 创建数据库
c1.execute('create database db')
c1.execute('use db')
@ -473,7 +475,7 @@ numOfRows = c1.rowcount
numOfCols = len(c1.description)
for irow in range(numOfRows):
print("Row%d: ts=%s, temperature=%d, humidity=%f" %(irow, data[irow][0], data[irow][1],data[irow][2]))
# 直接使用cursor 循环拉取查询结果
c1.execute('select * from tb')
for data in c1:
@ -534,7 +536,7 @@ conn.close()
## RESTful Connector
为支持各种不同类型平台的开发TDengine提供符合REST设计标准的API即RESTful API。为最大程度降低学习成本不同于其他数据库RESTful API的设计方法TDengine直接通过HTTP POST 请求BODY中包含的SQL语句来操作数据库仅需要一个URL。
为支持各种不同类型平台的开发TDengine提供符合REST设计标准的API即RESTful API。为最大程度降低学习成本不同于其他数据库RESTful API的设计方法TDengine直接通过HTTP POST 请求BODY中包含的SQL语句来操作数据库仅需要一个URL。RESTful连接器的使用参见<a href=https://www.taosdata.com/blog/2020/11/11/1965.html>视频教程</a>
### HTTP请求格式
@ -779,7 +781,7 @@ https://www.taosdata.com/blog/2020/11/02/1901.html
TDengine提供了GO驱动程序`taosSql`。 `taosSql`实现了GO语言的内置接口`database/sql/driver`。用户只需按如下方式引入包就可以在应用程序中访问TDengine, 详见`https://github.com/taosdata/driver-go/blob/develop/taosSql/driver_test.go`。
使用 Go 连接器的示例代码请参考 https://github.com/taosdata/TDengine/tree/develop/tests/examples/go。
使用 Go 连接器的示例代码请参考 https://github.com/taosdata/TDengine/tree/develop/tests/examples/go 以及<a href="https://www.taosdata.com/blog/2020/11/11/1951.html">视频教程</a>
```Go
import (
@ -796,7 +798,7 @@ go env -w GOPROXY=https://goproxy.io,direct
### 常用API
- sql.Open(DRIVER_NAME string, dataSourceName string) *DB`
- `sql.Open(DRIVER_NAME string, dataSourceName string) *DB`
该API用来打开DB返回一个类型为*DB的对象一般情况下DRIVER_NAME设置为字符串`taosSql`, dataSourceName设置为字符串`user:password@/tcp(host:port)/dbname`如果客户想要用多个goroutine并发访问TDengine, 那么需要在各个goroutine中分别创建一个sql.Open对象并用之访问TDengine
@ -835,6 +837,8 @@ Node.js连接器支持的系统有
| **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
| **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** |
Node.js连接器的使用参见<a href="https://www.taosdata.com/blog/2020/11/11/1957.html">视频教程</a>
### 安装准备
* 应用驱动安装请参考<a href="https://www.taosdata.com/cn/documentation/connector/#安装连接器驱动步骤">安装连接器驱动步骤</a>
@ -909,7 +913,7 @@ node nodejsChecker.js host=localhost
#### 建立连接
使用node.js连接器时必须先<em>require</em> ```td2.0-connector```,然后使用 ```taos.connect``` 函数。```taos.connect``` 函数必须提供的参数是```host```,其它参数在没有提供的情况下会使用如下的默认值。最后需要初始化```cursor``` 来和TDengine服务端通信
使用node.js连接器时必须先<em>require</em> `td2.0-connector`,然后使用 `taos.connect` 函数。`taos.connect` 函数必须提供的参数是`host`,其它参数在没有提供的情况下会使用如下的默认值。最后需要初始化`cursor` 来和TDengine服务端通信
```javascript
const taos = require('td2.0-connector');
@ -945,13 +949,13 @@ TDengine目前还不支持update和delete语句。
#### 查询
可通过 ```cursor.query``` 函数来查询数据库。
可通过 `cursor.query` 函数来查询数据库。
```javascript
var query = cursor.query('show databases;')
```
查询的结果可以通过 ```query.execute()``` 函数获取并打印出来
查询的结果可以通过 `query.execute()` 函数获取并打印出来
```javascript
var promise = query.execute();
@ -959,7 +963,7 @@ promise.then(function(result) {
result.pretty();
});
```
格式化查询语句还可以使用```query```的```bind```方法。如下面的示例:```query```会自动将提供的数值填入查询语句的```?```里。
格式化查询语句还可以使用`query`的`bind`方法。如下面的示例:`query`会自动将提供的数值填入查询语句的`?`里。
```javascript
var query = cursor.query('select * from meterinfo.meters where ts <= ? and areaid = ?;').bind(new Date(), 5);
@ -967,7 +971,7 @@ query.execute().then(function(result) {
result.pretty();
})
```
如果在```query```语句里提供第二个参数并设为```true```也可以立即获取查询结果。如下:
如果在`query`语句里提供第二个参数并设为`true`也可以立即获取查询结果。如下:
```javascript
var promise = cursor.query('select * from meterinfo.meters where v1 = 30;', true)
@ -991,4 +995,4 @@ promise2.then(function(result) {
### 示例
<a href="https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example.js">这里</a>提供了一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例
<a href="https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js">这里</a>同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`.
<a href="https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js">这里</a>同样是一个使用NodeJS 连接器建表,插入天气数据并查询插入的数据的代码示例,但和上面不同的是,该示例只使用`cursor`.

View File

@ -6,6 +6,8 @@ Java连接器支持的系统有
| **OS类型** | Linux | Win64 | Win32 | Linux | Linux |
| **支持与否** | **支持** | **支持** | **支持** | **支持** | **支持** |
Java连接器的使用请参见<a href=https://www.taosdata.com/blog/2020/11/11/1955.html>视频教程</a>
TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。
由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。

View File

@ -39,10 +39,10 @@
# number of management nodes in the system
# numOfMnodes 3
# enable/disable backuping vnode directory when removing dnode
# enable/disable backuping vnode directory when removing vnode
# vnodeBak 1
# if report installation / use information
# enable/disable installation / usage report
# telemetryReporting 1
# enable/disable load balancing
@ -81,7 +81,7 @@
# minimum time window, milli-second
# minIntervalTime 10
# maximum delay before launching a stream compution, milli-second
# maximum delay before launching a stream computation, milli-second
# maxStreamCompDelay 20000
# maximum delay before launching a stream computation for the first time, milli-second
@ -156,7 +156,7 @@
# max number of connections allowed in dnode
# maxShellConns 5000
# max numerber of connections allowed in client
# max number of connections allowed in client
# maxConnections 5000
# stop writing logs when the disk size of the log folder is less than this value
@ -187,7 +187,7 @@
# restfulRowLimit 10240
# The following parameter is used to limit the maximum number of lines in log files.
# max number of rows per log filters
# max number of lines per log filters
# numOfLogLines 10000000
# enable/disable async log
@ -199,7 +199,9 @@
# The following parameters are used for debug purpose only.
# debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
# 131: output warning and error, 135: output debug, warning and error, 143 : output trace, debug, warning and error to log.
# 131: output warning and error
# 135: output debug, warning and error
# 143: output trace, debug, warning and error to log
# 199: output debug, warning and error to both screen and file
# 207: output trace, debug, warning and error to both screen and file
@ -231,10 +233,10 @@
# cDebugFlag 131
# debug flag for JNI
# jniDebugflag 131
# jniDebugFlag 131
# debug flag for storage
# uDebugflag 131
# uDebugFlag 131
# debug flag for http server
# httpDebugFlag 131
@ -243,12 +245,12 @@
# monDebugFlag 131
# debug flag for query
# qDebugflag 131
# qDebugFlag 131
# debug flag for vnode
# vDebugflag 131
# vDebugFlag 131
# debug flag for http server
# debug flag for TSDB
# tsdbDebugFlag 131
# debug flag for continue query

View File

@ -26,6 +26,7 @@ else
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${cfg_link_dir}/* || :
${csudo} rm -f ${inc_link_dir}/taos.h || :

View File

@ -48,6 +48,7 @@ cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_pat
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosdemox ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin

View File

@ -58,6 +58,7 @@ cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/scri
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosdemox %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
@ -135,7 +136,8 @@ if [ $1 -eq 0 ];then
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
#${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${cfg_link_dir}/* || :
${csudo} rm -f ${inc_link_dir}/taos.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :

View File

@ -175,6 +175,7 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
@ -186,6 +187,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
[ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/taosdemox ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemox ${bin_link_dir}/taosdemox || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :

View File

@ -86,6 +86,7 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/taos || :
if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
fi
${csudo} rm -f ${bin_link_dir}/rmtaos || :
@ -97,6 +98,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
if [ "$osType" != "Darwin" ]; then
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/taosdemox ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemox ${bin_link_dir}/taosdemox || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
fi
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || :

View File

@ -85,8 +85,9 @@ function install_bin() {
# Remove links
${csudo} rm -f ${bin_link_dir}/power || :
if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/powerdemo || :
${csudo} rm -f ${bin_link_dir}/powerdump || :
${csudo} rm -f ${bin_link_dir}/powerdemo || :
${csudo} rm -f ${bin_link_dir}/powerdemox || :
${csudo} rm -f ${bin_link_dir}/powerdump || :
fi
${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/set_core || :
@ -97,6 +98,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/power ] && ${csudo} ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || :
if [ "$osType" != "Darwin" ]; then
[ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || :
[ -x ${install_main_dir}/bin/powerdemox ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemox ${bin_link_dir}/powerdemox || :
[ -x ${install_main_dir}/bin/powerdump ] && ${csudo} ln -s ${install_main_dir}/bin/powerdump ${bin_link_dir}/powerdump || :
fi
[ -x ${install_main_dir}/bin/remove_client_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_power.sh ${bin_link_dir}/rmpower || :

View File

@ -174,6 +174,7 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/power || :
${csudo} rm -f ${bin_link_dir}/powerd || :
${csudo} rm -f ${bin_link_dir}/powerdemo || :
${csudo} rm -f ${bin_link_dir}/powerdemox || :
${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || :
@ -184,6 +185,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/power ] && ${csudo} ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || :
[ -x ${install_main_dir}/bin/powerd ] && ${csudo} ln -s ${install_main_dir}/bin/powerd ${bin_link_dir}/powerd || :
[ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || :
[ -x ${install_main_dir}/bin/powerdemox ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemox ${bin_link_dir}/powerdemox || :
[ -x ${install_main_dir}/bin/remove_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_power.sh ${bin_link_dir}/rmpower || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :

View File

@ -45,7 +45,7 @@ if [ "$osType" != "Darwin" ]; then
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
else
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh"
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdemox ${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
else

View File

@ -77,6 +77,7 @@ if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdemox ${install_dir}/bin/powerdemox
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin

View File

@ -36,7 +36,7 @@ if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
else
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh"
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdemox ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"

View File

@ -77,6 +77,7 @@ else
cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdemox ${install_dir}/bin/powerdemox
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
cp ${script_dir}/set_core.sh ${install_dir}/bin

View File

@ -96,6 +96,8 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/set_core || :
@ -105,6 +107,8 @@ function install_bin() {
[ -x ${bin_dir}/taos ] && ${csudo} ln -s ${bin_dir}/taos ${bin_link_dir}/taos || :
[ -x ${bin_dir}/taosd ] && ${csudo} ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || :
[ -x ${bin_dir}/taosdemo ] && ${csudo} ln -s ${bin_dir}/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${bin_dir}/taosdemox ] && ${csudo} ln -s ${bin_dir}/taosdemox ${bin_link_dir}/taosdemox || :
[ -x ${bin_dir}/taosdump ] && ${csudo} ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || :
[ -x ${bin_dir}/set_core.sh ] && ${csudo} ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || :
}
@ -265,8 +269,14 @@ function install_config() {
[ -f ${cfg_dir}/taos.cfg ] && ${csudo} cp ${cfg_dir}/taos.cfg ${cfg_install_dir}
${csudo} chmod 644 ${cfg_install_dir}/*
fi
# Save standard input to 6 and open / dev / TTY on standard input
exec 6<&0 0</dev/tty
local_fqdn_check
# restore the backup standard input, and turn off 6
exec 0<&6 6<&-
${csudo} mv ${cfg_dir}/taos.cfg ${cfg_dir}/taos.cfg.org
${csudo} ln -s ${cfg_install_dir}/taos.cfg ${cfg_dir}
@ -422,7 +432,7 @@ function install_service() {
}
function install_TDengine() {
echo -e "${GREEN}Start to install TDEngine...${NC}"
echo -e "${GREEN}Start to install TDengine...${NC}"
#install log and data dir , then ln to /usr/local/taos
${csudo} mkdir -p ${log_dir} && ${csudo} chmod 777 ${log_dir}

View File

@ -119,4 +119,4 @@ if ((${service_mod}==2)); then
kill_taosd
fi
echo -e "${GREEN}TDEngine is removed successfully!${NC}"
echo -e "${GREEN}TDengine is removed successfully!${NC}"

View File

@ -72,6 +72,7 @@ function clean_bin() {
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :

View File

@ -38,6 +38,7 @@ function clean_bin() {
# Remove link
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/set_core || :

View File

@ -38,6 +38,7 @@ function clean_bin() {
# Remove link
${csudo} rm -f ${bin_link_dir}/power || :
${csudo} rm -f ${bin_link_dir}/powerdemo || :
${csudo} rm -f ${bin_link_dir}/powerdemox || :
${csudo} rm -f ${bin_link_dir}/powerdump || :
${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/set_core || :

View File

@ -72,6 +72,7 @@ function clean_bin() {
${csudo} rm -f ${bin_link_dir}/power || :
${csudo} rm -f ${bin_link_dir}/powerd || :
${csudo} rm -f ${bin_link_dir}/powerdemo || :
${csudo} rm -f ${bin_link_dir}/powerdemox || :
${csudo} rm -f ${bin_link_dir}/powerdump || :
${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :

View File

@ -2,19 +2,39 @@
#
# This file is used to set config for core when taosd crash
set -e
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
# set -e
# set -x
corePath=$1
csudo=""
if command -v sudo > /dev/null; then
csudo="sudo"
fi
#ulimit -c unlimited
if [[ ! -n ${corePath} ]]; then
echo -e -n "${GREEN}Please enter a file directory to save the coredump file${NC}:"
read corePath
while true; do
if [[ ! -z "$corePath" ]]; then
break
else
read -p "Please enter a file directory to save the coredump file:" corePath
fi
done
fi
ulimit -c unlimited
${csudo} sed -i '/ulimit -c unlimited/d' /etc/profile ||:
${csudo} sed -i '$a\ulimit -c unlimited' /etc/profile ||:
source /etc/profile
${csudo} mkdir -p /coredump ||:
${csudo} sysctl -w kernel.core_pattern='/coredump/core-%e-%p' ||:
${csudo} echo '/coredump/core-%e-%p' | ${csudo} tee /proc/sys/kernel/core_pattern ||:
${csudo} mkdir -p ${corePath} ||:
${csudo} sysctl -w kernel.core_pattern=${corePath}/core-%e-%p ||:
${csudo} echo "${corePath}/core-%e-%p" | ${csudo} tee /proc/sys/kernel/core_pattern ||:

View File

@ -1,6 +1,6 @@
name: tdengine
base: core18
version: '2.0.11.0'
version: '2.0.13.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@ -72,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.0.11.0
- usr/lib/libtaos.so.2.0.13.0
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so

View File

@ -66,6 +66,77 @@ static bool bnCheckFree(SDnodeObj *pDnode) {
return true;
}
static void bnSwapVnodeGid(SVnodeGid *pVnodeGid1, SVnodeGid *pVnodeGid2) {
SVnodeGid tmp = *pVnodeGid1;
*pVnodeGid1 = *pVnodeGid2;
*pVnodeGid2 = tmp;
}
static void bnAdjustVnodeIndex(SVgObj *pInVg) {
int32_t d0Id = pInVg->vnodeGid[0].dnodeId;
int32_t d1Id = pInVg->vnodeGid[1].dnodeId;
int32_t d2Id = pInVg->vnodeGid[2].dnodeId;
int32_t vgId = pInVg->vgId;
int32_t d0Num = 0;
int32_t d1Num = 0;
int32_t d2Num = 0;
void *pIter = NULL;
while (1) {
SVgObj *pVgroup = NULL;
pIter = mnodeGetNextVgroup(pIter, &pVgroup);
if (pVgroup == NULL) break;
if (pVgroup->vgId != vgId) {
if (pVgroup->vnodeGid[0].dnodeId == d0Id) d0Num++;
if (pVgroup->vnodeGid[0].dnodeId == d1Id) d1Num++;
if (pVgroup->vnodeGid[0].dnodeId == d2Id) d2Num++;
}
mnodeDecVgroupRef(pVgroup);
}
if (pInVg->numOfVnodes == 1) {
}
if (pInVg->numOfVnodes == 2) {
mDebug("vgId:%d, dnode:%d num:%d dnode:%d num:%d", pInVg->vgId, d0Id, d0Num, d1Id, d1Num);
if (d0Num > d1Num) {
mDebug("vgId:%d, adjust vnode index 0 to 1", pInVg->vgId);
bnSwapVnodeGid(&pInVg->vnodeGid[0], &pInVg->vnodeGid[1]);
}
}
if (pInVg->numOfVnodes >= 3) {
mDebug("vgId:%d, dnode:%d num:%d dnode:%d num:%d dnode:%d num:%d", pInVg->vgId, d0Id, d0Num, d1Id, d1Num, d2Id, d2Num);
if (d0Num <= d1Num && d0Num <= d2Num) {
if (d1Num > d2Num) {
mDebug("vgId:%d, adjust vnode index 1 to 2", pInVg->vgId);
bnSwapVnodeGid(&pInVg->vnodeGid[1], &pInVg->vnodeGid[2]);
}
} else if (d1Num <= d2Num && d1Num <= d0Num) {
mDebug("vgId:%d, adjust vnode index 0 to 1", pInVg->vgId);
bnSwapVnodeGid(&pInVg->vnodeGid[0], &pInVg->vnodeGid[1]);
if (d0Num > d2Num) {
mDebug("vgId:%d, adjust vnode index 1 to 2", pInVg->vgId);
bnSwapVnodeGid(&pInVg->vnodeGid[1], &pInVg->vnodeGid[2]);
}
} else {
mDebug("vgId:%d, adjust vnode index 0 to 2", pInVg->vgId);
bnSwapVnodeGid(&pInVg->vnodeGid[0], &pInVg->vnodeGid[2]);
if (d1Num > d0Num) {
mDebug("vgId:%d, adjust vnode index 1 to 2", pInVg->vgId);
bnSwapVnodeGid(&pInVg->vnodeGid[1], &pInVg->vnodeGid[2]);
}
}
}
for (int i = 0; i < pInVg->numOfVnodes; ++i) {
mDebug("vgId:%d index:%d dnodeId:%d", pInVg->vgId, i, pInVg->vnodeGid[i].dnodeId);
}
}
static void bnDiscardVnode(SVgObj *pVgroup, SVnodeGid *pVnodeGid) {
mDebug("vgId:%d, dnode:%d is dropping", pVgroup->vgId, pVnodeGid->dnodeId);
@ -88,15 +159,10 @@ static void bnDiscardVnode(SVgObj *pVgroup, SVnodeGid *pVnodeGid) {
memcpy(pVgroup->vnodeGid, vnodeGid, TSDB_MAX_REPLICA * sizeof(SVnodeGid));
pVgroup->numOfVnodes = numOfVnodes;
bnAdjustVnodeIndex(pVgroup);
mnodeUpdateVgroup(pVgroup);
}
static void bnSwapVnodeGid(SVnodeGid *pVnodeGid1, SVnodeGid *pVnodeGid2) {
SVnodeGid tmp = *pVnodeGid1;
*pVnodeGid1 = *pVnodeGid2;
*pVnodeGid2 = tmp;
}
int32_t bnAllocVnodes(SVgObj *pVgroup) {
int32_t dnode = 0;
int32_t vnodes = 0;
@ -147,6 +213,7 @@ int32_t bnAllocVnodes(SVgObj *pVgroup) {
}
}
bnAdjustVnodeIndex(pVgroup);
bnReleaseDnodes();
bnUnLock();
return TSDB_CODE_SUCCESS;
@ -157,17 +224,32 @@ static bool bnCheckVgroupReady(SVgObj *pVgroup, SVnodeGid *pRmVnode) {
return false;
}
int32_t rmVnodeVer = 0;
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SVnodeGid *pVnode = pVgroup->vnodeGid + i;
if (pVnode == pRmVnode) {
rmVnodeVer = mnodeGetVgidVer(pVnode->vver);
mTrace("vgId:%d, check vgroup status, vindex:%d dnode:%d status:%s role:%s vver:%d is watching", pVgroup->vgId, i,
pVnode->dnodeId, dnodeStatus[pVnode->pDnode->status], syncRole[pVnode->role], rmVnodeVer);
}
}
bool isReady = false;
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SVnodeGid *pVnode = pVgroup->vnodeGid + i;
SDnodeObj *pDnode = pVnode->pDnode;
if (pVnode == pRmVnode) continue;
int32_t vver = mnodeGetVgidVer(pVnode->vver);
mTrace("vgId:%d, check vgroup status, dnode:%d status:%d, vnode role:%s", pVgroup->vgId, pVnode->pDnode->dnodeId,
pVnode->pDnode->status, syncRole[pVnode->role]);
if (pVnode->pDnode->status == TAOS_DN_STATUS_DROPPING) continue;
if (pVnode->pDnode->status == TAOS_DN_STATUS_OFFLINE) continue;
mTrace("vgId:%d, check vgroup status, vindex:%d dnode:%d status:%s role:%s vver:%d, rmvver:%d", pVgroup->vgId, i,
pVnode->dnodeId, dnodeStatus[pDnode->status], syncRole[pVnode->role], vver, rmVnodeVer);
if (pDnode->status == TAOS_DN_STATUS_DROPPING) continue;
if (pDnode->status == TAOS_DN_STATUS_OFFLINE) continue;
if (pVnode->role != TAOS_SYNC_ROLE_SLAVE && pVnode->role != TAOS_SYNC_ROLE_MASTER) continue;
if (pVnode->role == TAOS_SYNC_ROLE_SLAVE || pVnode->role == TAOS_SYNC_ROLE_MASTER) {
if (rmVnodeVer == 0 || vver >= rmVnodeVer) {
mInfo("vgId:%d, is ready for vindex:%d in dnode:%d status:%s role:%s vver:%d larger than rmvver:%d",
pVgroup->vgId, i, pVnode->dnodeId, dnodeStatus[pDnode->status], syncRole[pVnode->role], vver, rmVnodeVer);
isReady = true;
}
}
@ -189,7 +271,7 @@ static int32_t bnRemoveVnode(SVgObj *pVgroup) {
mDebug("vgId:%d, is not ready", pVgroup->vgId);
return -1;
} else {
mDebug("vgId:%d, is ready, discard dnode:%d", pVgroup->vgId, pSelVnode->dnodeId);
mInfo("vgId:%d, is ready, discard dnode:%d", pVgroup->vgId, pSelVnode->dnodeId);
bnDiscardVnode(pVgroup, pSelVnode);
return TSDB_CODE_SUCCESS;
}
@ -233,6 +315,7 @@ static int32_t bnAddVnode(SVgObj *pVgroup, SDnodeObj *pSrcDnode, SDnodeObj *pDes
vnodeGids[numOfVnodes].pDnode = pDestDnode;
numOfVnodes++;
// move the src vnode to the end
for (int32_t v = 0; v < numOfVnodes; ++v) {
if (pSrcDnode != NULL && pSrcDnode->dnodeId == vnodeGids[v].dnodeId) {
bnSwapVnodeGid(&vnodeGids[v], &vnodeGids[numOfVnodes - 1]);
@ -241,6 +324,11 @@ static int32_t bnAddVnode(SVgObj *pVgroup, SDnodeObj *pSrcDnode, SDnodeObj *pDes
}
}
// adjust the vgroup postion
if (pSrcDnode == NULL) {
bnAdjustVnodeIndex(pVgroup);
}
memcpy(&pVgroup->vnodeGid, &vnodeGids, sizeof(SVnodeGid) * TSDB_MAX_REPLICA);
pVgroup->numOfVnodes = numOfVnodes;
atomic_add_fetch_32(&pDestDnode->openVnodes, 1);
@ -330,7 +418,7 @@ void bnReset() {
tsAccessSquence = 0;
}
static int32_t bnMonitorVgroups() {
static bool bnMonitorVgroups() {
void * pIter = NULL;
SVgObj *pVgroup = NULL;
bool hasUpdatingVgroup = false;
@ -489,6 +577,7 @@ void bnCheckStatus() {
mInfo("dnode:%d, set to offline state, access seq:%d last seq:%d laststat:%d", pDnode->dnodeId, tsAccessSquence,
pDnode->lastAccess, pDnode->status);
bnSetVgroupOffline(pDnode);
bnStartTimer(3000);
}
}
mnodeDecDnodeRef(pDnode);

View File

@ -31,7 +31,10 @@ static void *bnThreadFunc(void *arg) {
}
pthread_cond_wait(&tsBnThread.cond, &tsBnThread.mutex);
mDebug("balance thread wakes up to work");
bool updateSoon = bnStart();
mDebug("balance thread finished this poll, updateSoon:%d", updateSoon);
bnStartTimer(updateSoon ? 1000 : -1);
pthread_mutex_unlock(&(tsBnThread.mutex));
}
@ -101,8 +104,8 @@ static void bnProcessTimer(void *handle, void *tmrId) {
tsBnThread.timer = NULL;
tsAccessSquence++;
bnCheckStatus();
bnStartTimer(-1);
bnCheckStatus();
if (handle == NULL) {
if (tsAccessSquence % tsBalanceInterval == 0) {
@ -121,6 +124,7 @@ void bnStartTimer(int64_t mseconds) {
bool updateSoon = (mseconds != -1);
if (updateSoon) {
mTrace("balance function will be called after %" PRId64 " ms", mseconds);
taosTmrReset(bnProcessTimer, mseconds, (void *)mseconds, tsMnodeTmr, &tsBnThread.timer);
} else {
taosTmrReset(bnProcessTimer, tsStatusInterval * 1000, NULL, tsMnodeTmr, &tsBnThread.timer);

View File

@ -38,12 +38,6 @@ typedef struct SLocalDataSource {
tFilePage filePage;
} SLocalDataSource;
enum {
TSC_LOCALREDUCE_READY = 0x0,
TSC_LOCALREDUCE_IN_PROGRESS = 0x1,
TSC_LOCALREDUCE_TOBE_FREED = 0x2,
};
typedef struct SLocalReducer {
SLocalDataSource ** pLocalDataSrc;
int32_t numOfBuffer;
@ -56,7 +50,6 @@ typedef struct SLocalReducer {
tFilePage * pTempBuffer;
struct SQLFunctionCtx *pCtx;
int32_t rowSize; // size of each intermediate result.
int32_t status; // denote it is in reduce process, in reduce process, it
bool hasPrevRow; // cannot be released
bool hasUnprocessedRow;
tOrderDescriptor * pDesc;

View File

@ -22,8 +22,8 @@ extern "C" {
#include "tlog.h"
extern uint32_t cDebugFlag;
extern uint32_t tscEmbedded;
extern int32_t cDebugFlag;
extern int8_t tscEmbedded;
#define tscFatal(...) do { if (cDebugFlag & DEBUG_FATAL) { taosPrintLog("TSC FATAL ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }} while(0)
#define tscError(...) do { if (cDebugFlag & DEBUG_ERROR) { taosPrintLog("TSC ERROR ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }} while(0)

View File

@ -20,9 +20,6 @@
extern "C" {
#endif
/*
* @date 2018/09/30
*/
#include "exception.h"
#include "os.h"
#include "qExtbuffer.h"
@ -216,7 +213,7 @@ STableMetaInfo* tscGetMetaInfo(SQueryInfo *pQueryInfo, int32_t tableIndex);
SQueryInfo *tscGetQueryInfoDetail(SSqlCmd* pCmd, int32_t subClauseIndex);
SQueryInfo *tscGetQueryInfoDetailSafely(SSqlCmd *pCmd, int32_t subClauseIndex);
void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache);
void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo);
STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, STableMeta* pTableMeta,
SVgroupsInfo* vgroupList, SArray* pTagCols, SArray* pVgroupTables);
@ -276,7 +273,7 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex);
bool hasMoreVnodesToTry(SSqlObj *pSql);
bool hasMoreClauseToTry(SSqlObj* pSql);
void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeFromCache);
void tscFreeQueryInfo(SSqlCmd* pCmd);
void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp);
void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows);
@ -290,6 +287,14 @@ int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_
char* serializeTagData(STagData* pTagData, char* pMsg);
int32_t copyTagData(STagData* dst, const STagData* src);
STableMeta* createSuperTableMeta(STableMetaMsg* pChild);
uint32_t tscGetTableMetaSize(STableMeta* pTableMeta);
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta);
uint32_t tscGetTableMetaMaxSize();
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name);
STableMeta* tscTableMetaClone(STableMeta* pTableMeta);
void* malloc_throw(size_t size);
void* calloc_throw(size_t nmemb, size_t size);
char* strdup_throw(const char* str);

View File

@ -105,7 +105,10 @@ SSchema tscGetTbnameColumnSchema();
* @param size size of the table meta
* @return
*/
STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size);
STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg);
bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src);
SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg);
#ifdef __cplusplus
}

View File

@ -56,22 +56,28 @@ typedef struct STableComInfo {
int32_t rowSize;
} STableComInfo;
typedef struct SCorVgroupInfo {
int32_t version;
int8_t inUse;
int8_t numOfEps;
SEpAddr1 epAddr[TSDB_MAX_REPLICA];
} SCorVgroupInfo;
typedef struct SNewVgroupInfo {
int32_t vgId;
int8_t inUse;
int8_t numOfEps;
SEpAddrMsg ep[TSDB_MAX_REPLICA];
} SNewVgroupInfo;
typedef struct CChildTableMeta {
int32_t vgId;
STableId id;
uint8_t tableType;
char sTableName[TSDB_TABLE_FNAME_LEN];
} CChildTableMeta;
typedef struct STableMeta {
STableComInfo tableInfo;
int32_t vgId;
STableId id;
uint8_t tableType;
char sTableName[TSDB_TABLE_FNAME_LEN];
int16_t sversion;
int16_t tversion;
char sTableId[TSDB_TABLE_FNAME_LEN];
SVgroupInfo vgroupInfo;
SCorVgroupInfo corVgroupInfo;
STableId id;
STableComInfo tableInfo;
SSchema schema[]; // if the table is TSDB_CHILD_TABLE, schema is acquired by super table meta info
} STableMeta;
@ -170,7 +176,7 @@ typedef struct SParamInfo {
} SParamInfo;
typedef struct STableDataBlocks {
char tableId[TSDB_TABLE_FNAME_LEN];
char tableName[TSDB_TABLE_FNAME_LEN];
int8_t tsSource; // where does the UNIX timestamp come from, server or client
bool ordered; // if current rows are ordered or not
int64_t vgId; // virtual group id
@ -248,7 +254,7 @@ typedef struct {
int8_t submitSchema; // submit block is built with table schema
STagData tagData; // NOTE: pTagData->data is used as a variant length array
STableMeta **pTableMetaList; // all involved tableMeta list of current insert sql statement.
char **pTableNameList; // all involved tableMeta list of current insert sql statement.
int32_t numOfTables;
SHashObj *pTableBlockHashList; // data block for each table
@ -307,7 +313,7 @@ typedef struct STscObj {
SRpcCorEpSet *tscCorMgmtEpSet;
void* pDnodeConn;
pthread_mutex_t mutex;
T_REF_DECLARE()
int32_t numOfObj; // number of sqlObj from this tscObj
} STscObj;
typedef struct SSubqueryState {
@ -385,7 +391,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet);
int tscProcessSql(SSqlObj *pSql);
int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex);
void tscQueueAsyncRes(SSqlObj *pSql);
void tscAsyncResultOnError(SSqlObj *pSql);
void tscQueueAsyncError(void(*fp), void *param, int32_t code);
@ -399,7 +405,7 @@ void tscRestoreSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo);
void tscResetSqlCmdObj(SSqlCmd *pCmd, bool removeFromCache);
void tscResetSqlCmdObj(SSqlCmd *pCmd);
/**
* free query result of the sql object
@ -413,14 +419,13 @@ void tscFreeSqlResult(SSqlObj *pSql);
*/
void tscFreeSqlObj(SSqlObj *pSql);
void tscFreeRegisteredSqlObj(void *pSql);
void tscFreeTableMetaHelper(void *pTableMeta);
void tscCloseTscObj(void *pObj);
// todo move to taos? or create a new file: taos_internal.h
TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int),
void *param, TAOS **taos);
TAOS_RES* taos_query_h(TAOS* taos, const char *sqlstr, TAOS_RES** res);
TAOS_RES* taos_query_h(TAOS* taos, const char *sqlstr, int64_t* res);
void waitForQueryRsp(void *param, TAOS_RES *tres, int code);
void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, __async_cb_func_t fp, void *param, const char *sqlstr, size_t sqlLen);
@ -478,15 +483,16 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
}
}
extern SCacheObj* tscMetaCache;
extern int tscObjRef;
extern void * tscTmr;
extern void * tscQhandle;
extern int tscKeepConn[];
extern int tsInsertHeadSize;
extern int tscNumOfThreads;
extern int tscRefId;
extern int32_t sentinel;
extern SHashObj *tscVgroupMap;
extern SHashObj *tscTableMetaInfo;
extern int tscObjRef;
extern void *tscTmr;
extern void *tscQhandle;
extern int tscKeepConn[];
extern int tscRefId;
extern int tscNumOfObj; // number of existed sqlObj in current process.
extern int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo);

View File

@ -18,7 +18,6 @@
#include "tnote.h"
#include "trpc.h"
#include "tcache.h"
#include "tscLog.h"
#include "tscSubquery.h"
#include "tscLocalMerge.h"
@ -57,7 +56,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql);
pSql->res.code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
return;
}
@ -71,7 +70,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, __async_cb_func_t fp, void* para
if (code != TSDB_CODE_SUCCESS) {
pSql->res.code = code;
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
return;
}
@ -166,7 +165,7 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
pRes->code = numOfRows;
}
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
return;
}
@ -217,7 +216,7 @@ void taos_fetch_rows_a(TAOS_RES *taosa, __async_cb_func_t fp, void *param) {
pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE;
pSql->param = param;
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
return;
}
@ -280,7 +279,7 @@ void taos_fetch_row_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, TAOS_ROW),
pSql->param = param;
pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE;
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
return;
}
@ -382,16 +381,16 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) {
}
void tscQueueAsyncRes(SSqlObj *pSql) {
void tscAsyncResultOnError(SSqlObj *pSql) {
if (pSql == NULL || pSql->signature != pSql) {
tscDebug("%p SqlObj is freed, not add into queue async res", pSql);
return;
}
assert(pSql->res.code != TSDB_CODE_SUCCESS);
tscError("%p add into queued async res, code:%s", pSql, tstrerror(pSql->res.code));
SSqlRes *pRes = &pSql->res;
if (pSql->fp == NULL || pSql->fetchFp == NULL){
return;
}
@ -423,7 +422,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
// check if it is a sub-query of super table query first, if true, enter another routine
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) {
tscDebug("%p update table meta in local cache, continue to process sql and send the corresponding query", pSql);
tscDebug("%p update local table meta, continue to process sql and send the corresponding query", pSql);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
@ -440,7 +439,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
return;
} else { // continue to process normal async query
if (pCmd->parseFinished) {
tscDebug("%p update table meta in local cache, continue to process sql and send corresponding query", pSql);
tscDebug("%p update local table meta, continue to process sql and send corresponding query", pSql);
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
@ -455,7 +454,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
if (pCmd->command == TSDB_SQL_SELECT) {
tscDebug("%p redo parse sql string and proceed", pSql);
pCmd->parseFinished = false;
tscResetSqlCmdObj(pCmd, false);
tscResetSqlCmdObj(pCmd);
code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
@ -532,6 +531,6 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
_error:
if (code != TSDB_CODE_SUCCESS) {
pSql->res.code = code;
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
}
}

View File

@ -17,7 +17,6 @@
#include "taosmsg.h"
#include "taosdef.h"
#include "tcache.h"
#include "tname.h"
#include "tscLog.h"
#include "tscUtil.h"
@ -273,7 +272,7 @@ void tscSCreateCallBack(void *param, TAOS_RES *tres, int code) {
if (pRes->code != TSDB_CODE_SUCCESS) {
taos_free_result(pSql);
free(builder);
tscQueueAsyncRes(pParentSql);
tscAsyncResultOnError(pParentSql);
return;
}
@ -291,7 +290,7 @@ void tscSCreateCallBack(void *param, TAOS_RES *tres, int code) {
if (pRes->code == TSDB_CODE_SUCCESS) {
(*pParentSql->fp)(pParentSql->param, pParentSql, code);
} else {
tscQueueAsyncRes(pParentSql);
tscAsyncResultOnError(pParentSql);
}
}
}
@ -571,7 +570,7 @@ static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, ch
char fullName[TSDB_TABLE_FNAME_LEN * 2] = {0};
extractDBName(pTableMetaInfo->name, fullName);
extractTableName(pMeta->sTableId, param->sTableName);
extractTableName(pMeta->sTableName, param->sTableName);
snprintf(fullName + strlen(fullName), TSDB_TABLE_FNAME_LEN - strlen(fullName), ".%s", param->sTableName);
extractTableName(pTableMetaInfo->name, param->buf);
@ -901,7 +900,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
} else if (pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE) {
pRes->code = tscProcessShowCreateDatabase(pSql);
} else if (pCmd->command == TSDB_SQL_RESET_CACHE) {
taosCacheEmpty(tscMetaCache);
taosHashEmpty(tscTableMetaInfo);
pRes->code = TSDB_CODE_SUCCESS;
} else if (pCmd->command == TSDB_SQL_SERV_VERSION) {
pRes->code = tscProcessServerVer(pSql);
@ -925,7 +924,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
(*pSql->fp)(pSql->param, pSql, code);
} else if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS){
} else {
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
}
return code;
}

View File

@ -89,11 +89,11 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalReducer *pReducer, tOrderDesc
pCtx->startOffset = 0;
pCtx->size = 1;
pCtx->hasNull = true;
pCtx->currentStage = SECONDARY_STAGE_MERGE;
pCtx->currentStage = MERGE_STAGE;
// for top/bottom function, the output of timestamp is the first column
int32_t functionId = pExpr->functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
pCtx->ptsOutputBuf = pReducer->pCtx[0].aOutputBuf;
pCtx->param[2].i64Key = pQueryInfo->order.order;
pCtx->param[2].nType = TSDB_DATA_TYPE_BIGINT;
@ -493,13 +493,6 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
// there is no more result, so we release all allocated resource
SLocalReducer *pLocalReducer = (SLocalReducer *)atomic_exchange_ptr(&pRes->pLocalReducer, NULL);
if (pLocalReducer != NULL) {
int32_t status = 0;
while ((status = atomic_val_compare_exchange_32(&pLocalReducer->status, TSC_LOCALREDUCE_READY,
TSC_LOCALREDUCE_TOBE_FREED)) == TSC_LOCALREDUCE_IN_PROGRESS) {
taosMsleep(100);
tscDebug("%p waiting for delete procedure, status: %d", pSql, status);
}
pLocalReducer->pFillInfo = taosDestroyFillInfo(pLocalReducer->pFillInfo);
if (pLocalReducer->pCtx != NULL) {
@ -911,6 +904,13 @@ static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer,
}
}
if (pRes->numOfRowsGroup >= pQueryInfo->limit.limit && pQueryInfo->limit.limit > 0) {
pRes->numOfRows = 0;
pBeforeFillData->num = 0;
pLocalReducer->discard = true;
return;
}
pRes->numOfRowsGroup += pRes->numOfRows;
// impose the limitation of output rows on the final result
@ -1067,7 +1067,7 @@ static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer,
pCtx->param[0].i64Key = pExpr->param[0].i64Key;
}
pCtx->currentStage = SECONDARY_STAGE_MERGE;
pCtx->currentStage = MERGE_STAGE;
if (needInit) {
aAggs[pCtx->functionId].init(pCtx);
@ -1080,7 +1080,7 @@ static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer,
continue;
}
aAggs[functionId].distSecondaryMergeFunc(&pLocalReducer->pCtx[j]);
aAggs[functionId].mergeFunc(&pLocalReducer->pCtx[j]);
}
}
@ -1296,6 +1296,10 @@ void resetOutputBuf(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {// re
for (int32_t i = 0; i < t; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
pLocalReducer->pCtx[i].aOutputBuf = pLocalReducer->pResultBuf->data + pExpr->offset * pLocalReducer->resColModel->capacity;
if (pExpr->functionId == TSDB_FUNC_TOP || pExpr->functionId == TSDB_FUNC_BOTTOM || pExpr->functionId == TSDB_FUNC_DIFF) {
pLocalReducer->pCtx[i].ptsOutputBuf = pLocalReducer->pCtx[0].aOutputBuf;
}
}
memset(pLocalReducer->pResultBuf, 0, pLocalReducer->nResultBufSize + sizeof(tFilePage));
@ -1430,24 +1434,13 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
SLocalReducer *pLocalReducer = pRes->pLocalReducer;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// set the data merge in progress
int32_t prevStatus =
atomic_val_compare_exchange_32(&pLocalReducer->status, TSC_LOCALREDUCE_READY, TSC_LOCALREDUCE_IN_PROGRESS);
if (prevStatus != TSC_LOCALREDUCE_READY) {
assert(prevStatus == TSC_LOCALREDUCE_TOBE_FREED); // it is in tscDestroyLocalReducer function already
return TSDB_CODE_SUCCESS;
}
tFilePage *tmpBuffer = pLocalReducer->pTempBuffer;
tFilePage *tmpBuffer = pLocalReducer->pTempBuffer;
if (doHandleLastRemainData(pSql)) {
pLocalReducer->status = TSC_LOCALREDUCE_READY; // set the flag, taos_free_result can release this result.
return TSDB_CODE_SUCCESS;
}
if (doBuildFilledResultForGroup(pSql)) {
pLocalReducer->status = TSC_LOCALREDUCE_READY; // set the flag, taos_free_result can release this result.
return TSDB_CODE_SUCCESS;
}
@ -1503,7 +1496,6 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
pLocalReducer->discardData->num = 0;
if (saveGroupResultInfo(pSql)) {
pLocalReducer->status = TSC_LOCALREDUCE_READY;
return TSDB_CODE_SUCCESS;
}
@ -1549,7 +1541,6 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
// here we do not check the return value
adjustLoserTreeFromNewData(pLocalReducer, pOneDataSrc, pTree);
assert(pLocalReducer->status == TSC_LOCALREDUCE_IN_PROGRESS);
if (pRes->numOfRows == 0) {
handleUnprocessedRow(pCmd, pLocalReducer, tmpBuffer);
@ -1560,7 +1551,6 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
* If previous group is not skipped, keep it in pRes->numOfGroups
*/
if (notSkipped && saveGroupResultInfo(pSql)) {
pLocalReducer->status = TSC_LOCALREDUCE_READY;
return TSDB_CODE_SUCCESS;
}
@ -1580,7 +1570,6 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
if (pRes->numOfRows == 0) {
continue;
} else {
pLocalReducer->status = TSC_LOCALREDUCE_READY; // set the flag, taos_free_result can release this result.
return TSDB_CODE_SUCCESS;
}
} else { // result buffer is not full
@ -1605,9 +1594,6 @@ int32_t tscDoLocalMerge(SSqlObj *pSql) {
genFinalResults(pSql, pLocalReducer, true);
}
assert(pLocalReducer->status == TSC_LOCALREDUCE_IN_PROGRESS && pRes->row == 0);
pLocalReducer->status = TSC_LOCALREDUCE_READY; // set the flag, taos_free_result can release this result.
return TSDB_CODE_SUCCESS;
}
@ -1661,7 +1647,7 @@ int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_
// calculate the result from several other columns
if (pSup->pArithExprInfo != NULL) {
arithSup.pArithExpr = pSup->pArithExprInfo;
tExprTreeCalcTraverse(arithSup.pArithExpr->pExpr, (int32_t) pOutput->num, pbuf + pOutput->num*offset, &arithSup, TSDB_ORDER_ASC, getArithmeticInputSrc);
arithmeticTreeTraverse(arithSup.pArithExpr->pExpr, (int32_t) pOutput->num, pbuf + pOutput->num*offset, &arithSup, TSDB_ORDER_ASC, getArithmeticInputSrc);
} else {
SSqlExpr* pExpr = pSup->pSqlExpr;
memcpy(pbuf + pOutput->num * offset, pExpr->offset * pOutput->num + pOutput->data, (size_t)(pExpr->resBytes * pOutput->num));

View File

@ -731,7 +731,7 @@ static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, SParsedDataColI
return code;
}
dataBuf->vgId = pTableMeta->vgroupInfo.vgId;
dataBuf->vgId = pTableMeta->vgId;
dataBuf->numOfTables = 1;
*totalNum += numOfRows;
@ -1339,7 +1339,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
if (sqlstr == NULL || pSql->parseRetry >= 1 || ret != TSDB_CODE_TSC_INVALID_SQL) {
free(sqlstr);
} else {
tscResetSqlCmdObj(pCmd, true);
tscResetSqlCmdObj(pCmd);
free(pSql->sqlstr);
pSql->sqlstr = sqlstr;
pSql->parseRetry++;
@ -1351,7 +1351,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
SSqlInfo SQLInfo = qSQLParse(pSql->sqlstr);
ret = tscToSQLCmd(pSql, &SQLInfo);
if (ret == TSDB_CODE_TSC_INVALID_SQL && pSql->parseRetry == 0 && SQLInfo.type == TSDB_SQL_NULL) {
tscResetSqlCmdObj(pCmd, true);
tscResetSqlCmdObj(pCmd);
pSql->parseRetry++;
ret = tscToSQLCmd(pSql, &SQLInfo);
}
@ -1413,13 +1413,25 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
if (taos_errno(pSql) != TSDB_CODE_SUCCESS) { // handle error
assert(taos_errno(pSql) == code);
taos_free_result(pSql);
tfree(pSupporter);
fclose(fp);
do {
if (code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
assert(pSql->res.numOfRows == 0);
int32_t errc = fseek(fp, 0, SEEK_SET);
if (errc < 0) {
tscError("%p failed to seek SEEK_SET since:%s", pSql, tstrerror(errno));
} else {
break;
}
}
pParentSql->res.code = code;
tscQueueAsyncRes(pParentSql);
return;
taos_free_result(pSql);
tfree(pSupporter);
fclose(fp);
pParentSql->res.code = code;
tscAsyncResultOnError(pParentSql);
return;
} while (0);
}
// accumulate the total submit records
@ -1439,7 +1451,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
int32_t count = 0;
int32_t maxRows = 0;
tfree(pCmd->pTableMetaList);
tfree(pCmd->pTableNameList);
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
if (pCmd->pTableBlockHashList == NULL) {
@ -1488,7 +1500,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
code = doPackSendDataBlock(pSql, count, pTableDataBlock);
if (code != TSDB_CODE_SUCCESS) {
pParentSql->res.code = code;
tscQueueAsyncRes(pParentSql);
tscAsyncResultOnError(pParentSql);
return;
}
@ -1523,7 +1535,7 @@ void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) {
tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code));
tfree(pSupporter);
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
return;
}

View File

@ -75,11 +75,11 @@ static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SC
static int32_t convertFunctionId(int32_t optr, int16_t* functionId);
static uint8_t convertOptr(SStrToken *pToken);
static int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery);
static int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery, bool intervalQuery);
static bool validateIpAddress(const char* ip, size_t size);
static bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery);
static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool intervalQuery);
static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, SArray* pList, SSqlCmd* pCmd);
@ -666,6 +666,7 @@ int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySQL* pQ
const char* msg1 = "invalid query expression";
const char* msg2 = "interval cannot be less than 10 ms";
const char* msg3 = "sliding cannot be used without interval";
const char* msg4 = "top/bottom query does not support order by value in interval query";
SSqlCmd* pCmd = &pSql->cmd;
@ -712,6 +713,11 @@ int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySQL* pQ
return TSDB_CODE_TSC_INVALID_SQL;
}
int32_t colId = pQueryInfo->order.orderColId;
if (pQueryInfo->interval.interval > 0 && colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
return TSDB_CODE_SUCCESS;
}
@ -904,7 +910,7 @@ int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableNa
* that are corresponding to the old name for the new table name.
*/
if (strlen(oldName) > 0 && strncasecmp(oldName, pTableMetaInfo->name, tListLen(pTableMetaInfo->name)) != 0) {
tscClearTableMetaInfo(pTableMetaInfo, false);
tscClearTableMetaInfo(pTableMetaInfo);
}
return TSDB_CODE_SUCCESS;
@ -996,33 +1002,6 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC
return false;
}
int32_t nLen = 0;
for (int32_t i = 0; i < numOfTags; ++i) {
TAOS_FIELD* p = taosArrayGet(pTagsList, i);
if (p->bytes == 0) {
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
return false;
}
nLen += p->bytes;
}
// max tag row length must be less than TSDB_MAX_TAGS_LEN
if (nLen > TSDB_MAX_TAGS_LEN) {
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
return false;
}
// field name must be unique
for (int32_t i = 0; i < numOfTags; ++i) {
TAOS_FIELD* p = taosArrayGet(pTagsList, i);
if (has(pFieldList, 0, p->name) == true) {
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
return false;
}
}
/* timestamp in tag is not allowed */
for (int32_t i = 0; i < numOfTags; ++i) {
TAOS_FIELD* p = taosArrayGet(pTagsList, i);
@ -1054,6 +1033,33 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC
}
}
int32_t nLen = 0;
for (int32_t i = 0; i < numOfTags; ++i) {
TAOS_FIELD* p = taosArrayGet(pTagsList, i);
if (p->bytes == 0) {
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
return false;
}
nLen += p->bytes;
}
// max tag row length must be less than TSDB_MAX_TAGS_LEN
if (nLen > TSDB_MAX_TAGS_LEN) {
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
return false;
}
// field name must be unique
for (int32_t i = 0; i < numOfTags; ++i) {
TAOS_FIELD* p = taosArrayGet(pTagsList, i);
if (has(pFieldList, 0, p->name) == true) {
invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
return false;
}
}
return true;
}
@ -1469,7 +1475,7 @@ static void addPrimaryTsColIntoResult(SQueryInfo* pQueryInfo) {
pQueryInfo->type |= TSDB_QUERY_TYPE_PROJECTION_QUERY;
}
int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery) {
int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable, bool joinQuery, bool intervalQuery) {
assert(pSelection != NULL && pCmd != NULL);
const char* msg2 = "functions can not be mixed up";
@ -1525,7 +1531,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
addPrimaryTsColIntoResult(pQueryInfo);
}
if (!functionCompatibleCheck(pQueryInfo, joinQuery)) {
if (!functionCompatibleCheck(pQueryInfo, joinQuery, intervalQuery)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
@ -2804,7 +2810,7 @@ bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo)
return false;
}
static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) {
static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery, bool intervalQuery) {
int32_t startIdx = 0;
size_t numOfExpr = tscSqlExprNumOfExprs(pQueryInfo);
@ -2820,6 +2826,10 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) {
int32_t factor = functionCompatList[tscSqlExprGet(pQueryInfo, startIdx)->functionId];
if (tscSqlExprGet(pQueryInfo, 0)->functionId == TSDB_FUNC_LAST_ROW && (joinQuery || intervalQuery)) {
return false;
}
// diff function cannot be executed with other function
// arithmetic function can be executed with other arithmetic functions
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
@ -2844,7 +2854,7 @@ static bool functionCompatibleCheck(SQueryInfo* pQueryInfo, bool joinQuery) {
}
}
if (functionId == TSDB_FUNC_LAST_ROW && joinQuery) {
if (functionId == TSDB_FUNC_LAST_ROW && (joinQuery || intervalQuery)) {
return false;
}
}
@ -4013,6 +4023,7 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
if (pExpr->nSQLOptr == TK_LIKE) {
char* str = taosStringBuilderGetResult(sb, NULL);
pQueryInfo->tagCond.tbnameCond.cond = strdup(str);
pQueryInfo->tagCond.tbnameCond.len = (int32_t) strlen(str);
return TSDB_CODE_SUCCESS;
}
@ -4062,6 +4073,7 @@ static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo,
char* str = taosStringBuilderGetResult(&sb1, NULL);
pQueryInfo->tagCond.tbnameCond.cond = strdup(str);
pQueryInfo->tagCond.tbnameCond.len = (int32_t) strlen(str);
taosStringBuilderDestroy(&sb1);
tfree(segments);
@ -4472,6 +4484,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
const char* msg = "illegal value or data overflow";
const char* msg1 = "value is expected";
const char* msg2 = "invalid fill option";
const char* msg3 = "top/bottom not support fill";
if (pItem->pVar.nType != TSDB_DATA_TYPE_BINARY) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
@ -4554,6 +4567,14 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
for(int32_t i = 0; i < numOfExprs; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
if (pExpr->functionId == TSDB_FUNC_TOP || pExpr->functionId == TSDB_FUNC_BOTTOM) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
return TSDB_CODE_SUCCESS;
}
@ -4646,7 +4667,7 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
if (!(orderByTags || orderByTS) && !isTopBottomQuery(pQueryInfo)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
} else {
} else { // order by top/bottom result value column is not supported in case of interval query.
assert(!(orderByTags && orderByTS));
}
@ -4936,7 +4957,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
SUpdateTableTagValMsg* pUpdateMsg = (SUpdateTableTagValMsg*) pCmd->payload;
pUpdateMsg->head.vgId = htonl(pTableMeta->vgroupInfo.vgId);
pUpdateMsg->head.vgId = htonl(pTableMeta->vgId);
pUpdateMsg->tid = htonl(pTableMeta->id.tid);
pUpdateMsg->uid = htobe64(pTableMeta->id.uid);
pUpdateMsg->colId = htons(pTagsSchema->colId);
@ -5442,6 +5463,7 @@ static void setCreateDBOption(SCreateDbMsg* pMsg, SCreateDBInfo* pCreateDb) {
pMsg->quorum = pCreateDb->quorum;
pMsg->ignoreExist = pCreateDb->ignoreExists;
pMsg->update = pCreateDb->update;
pMsg->cacheLastRow = pCreateDb->cachelast;
}
int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql) {
@ -6302,7 +6324,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
}
bool isSTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
if (parseSelectClause(&pSql->cmd, 0, pQuerySql->pSelection, isSTable, false) != TSDB_CODE_SUCCESS) {
if (parseSelectClause(&pSql->cmd, 0, pQuerySql->pSelection, isSTable, false, false) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -6365,6 +6387,41 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
return TSDB_CODE_SUCCESS;
}
static int32_t checkQueryRangeForFill(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg3 = "start(end) time of query range required or time range too large";
if (pQueryInfo->interval.interval == 0) {
return TSDB_CODE_SUCCESS;
}
bool initialWindows = TSWINDOW_IS_EQUAL(pQueryInfo->window, TSWINDOW_INITIALIZER);
if (initialWindows) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey);
int64_t intervalRange = 0;
if (pQueryInfo->interval.intervalUnit == 'n' || pQueryInfo->interval.intervalUnit == 'y') {
int64_t f = 1;
if (pQueryInfo->interval.intervalUnit == 'n') {
f = 30L * MILLISECOND_PER_DAY;
} else if (pQueryInfo->interval.intervalUnit == 'y') {
f = 365L * MILLISECOND_PER_DAY;
}
intervalRange = pQueryInfo->interval.interval * f;
} else {
intervalRange = pQueryInfo->interval.interval;
}
// number of result is not greater than 10,000,000
if ((timeRange == 0) || (timeRange / intervalRange) >= MAX_INTERVAL_TIME_WINDOW) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
return TSDB_CODE_SUCCESS;
}
int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
assert(pQuerySql != NULL && (pQuerySql->from == NULL || taosArrayGetSize(pQuerySql->from) > 0));
@ -6512,7 +6569,9 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
int32_t joinQuery = (pQuerySql->from != NULL && taosArrayGetSize(pQuerySql->from) > 2);
if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable, joinQuery) != TSDB_CODE_SUCCESS) {
int32_t intervalQuery = !(pQuerySql->interval.type == 0 || pQuerySql->interval.n == 0);
if (parseSelectClause(pCmd, index, pQuerySql->pSelection, isSTable, joinQuery, intervalQuery) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -6560,31 +6619,21 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
tscFieldInfoUpdateOffset(pQueryInfo);
/*
* fill options are set at the end position, when all columns are set properly
* the columns may be increased due to group by operation
*/
if (pQuerySql->fillType != NULL) {
if (pQueryInfo->interval.interval == 0 && (!tscIsPointInterpQuery(pQueryInfo))) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (pQueryInfo->interval.interval > 0) {
bool initialWindows = TSWINDOW_IS_EQUAL(pQueryInfo->window, TSWINDOW_INITIALIZER);
if (initialWindows) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey);
// number of result is not greater than 10,000,000
if ((timeRange == 0) || (timeRange / pQueryInfo->interval.interval) > MAX_INTERVAL_TIME_WINDOW) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
/*
* fill options are set at the end position, when all columns are set properly
* the columns may be increased due to group by operation
*/
if ((code = checkQueryRangeForFill(pCmd, pQueryInfo)) != TSDB_CODE_SUCCESS) {
return code;
}
int32_t ret = parseFillClause(pCmd, pQueryInfo, pQuerySql);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
if ((code = parseFillClause(pCmd, pQueryInfo, pQuerySql)) != TSDB_CODE_SUCCESS) {
return code;
}
}

View File

@ -130,23 +130,15 @@ SSchema* tscGetColumnSchemaById(STableMeta* pTableMeta, int16_t colId) {
return NULL;
}
static void tscInitCorVgroupInfo(SCorVgroupInfo *corVgroupInfo, SVgroupInfo *vgroupInfo) {
corVgroupInfo->version = 0;
corVgroupInfo->inUse = 0;
corVgroupInfo->numOfEps = vgroupInfo->numOfEps;
for (int32_t i = 0; i < corVgroupInfo->numOfEps; i++) {
corVgroupInfo->epAddr[i].fqdn = strdup(vgroupInfo->epAddr[i].fqdn);
corVgroupInfo->epAddr[i].port = vgroupInfo->epAddr[i].port;
}
}
STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size) {
assert(pTableMetaMsg != NULL);
STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg) {
assert(pTableMetaMsg != NULL && pTableMetaMsg->numOfColumns >= 2 && pTableMetaMsg->numOfTags >= 0);
int32_t schemaSize = (pTableMetaMsg->numOfColumns + pTableMetaMsg->numOfTags) * sizeof(SSchema);
STableMeta* pTableMeta = calloc(1, sizeof(STableMeta) + schemaSize);
pTableMeta->tableType = pTableMetaMsg->tableType;
pTableMeta->vgId = pTableMetaMsg->vgroup.vgId;
pTableMeta->tableInfo = (STableComInfo) {
.numOfTags = pTableMetaMsg->numOfTags,
.precision = pTableMetaMsg->precision,
@ -156,22 +148,9 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
pTableMeta->id.tid = pTableMetaMsg->tid;
pTableMeta->id.uid = pTableMetaMsg->uid;
SVgroupInfo* pVgroupInfo = &pTableMeta->vgroupInfo;
pVgroupInfo->numOfEps = pTableMetaMsg->vgroup.numOfEps;
pVgroupInfo->vgId = pTableMetaMsg->vgroup.vgId;
for(int32_t i = 0; i < pVgroupInfo->numOfEps; ++i) {
SEpAddrMsg* pEpMsg = &pTableMetaMsg->vgroup.epAddr[i];
pVgroupInfo->epAddr[i].fqdn = strndup(pEpMsg->fqdn, tListLen(pEpMsg->fqdn));
pVgroupInfo->epAddr[i].port = pEpMsg->port;
}
tscInitCorVgroupInfo(&pTableMeta->corVgroupInfo, pVgroupInfo);
pTableMeta->sversion = pTableMetaMsg->sversion;
pTableMeta->tversion = pTableMetaMsg->tversion;
tstrncpy(pTableMeta->sTableId, pTableMetaMsg->sTableId, TSDB_TABLE_FNAME_LEN);
tstrncpy(pTableMeta->sTableName, pTableMetaMsg->sTableName, TSDB_TABLE_FNAME_LEN);
memcpy(pTableMeta->schema, pTableMetaMsg->schema, schemaSize);
@ -180,13 +159,44 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
pTableMeta->tableInfo.rowSize += pTableMeta->schema[i].bytes;
}
if (size != NULL) {
*size = sizeof(STableMeta) + schemaSize;
}
return pTableMeta;
}
bool vgroupInfoIdentical(SNewVgroupInfo *pExisted, SVgroupMsg* src) {
assert(pExisted != NULL && src != NULL);
if (pExisted->numOfEps != src->numOfEps) {
return false;
}
for(int32_t i = 0; i < pExisted->numOfEps; ++i) {
if (pExisted->ep[i].port != src->epAddr[i].port) {
return false;
}
if (strncmp(pExisted->ep[i].fqdn, src->epAddr[i].fqdn, tListLen(pExisted->ep[i].fqdn)) != 0) {
return false;
}
}
return true;
}
SNewVgroupInfo createNewVgroupInfo(SVgroupMsg *pVgroupMsg) {
assert(pVgroupMsg != NULL);
SNewVgroupInfo info = {0};
info.numOfEps = pVgroupMsg->numOfEps;
info.vgId = pVgroupMsg->vgId;
info.inUse = 0;
for(int32_t i = 0; i < pVgroupMsg->numOfEps; ++i) {
tstrncpy(info.ep[i].fqdn, pVgroupMsg->epAddr[i].fqdn, TSDB_FQDN_LEN);
info.ep[i].port = pVgroupMsg->epAddr[i].port;
}
return info;
}
// todo refactor
UNUSED_FUNC static FORCE_INLINE char* skipSegments(char* input, char delim, int32_t num) {
for (int32_t i = 0; i < num; ++i) {

View File

@ -14,7 +14,6 @@
*/
#include "os.h"
#include "tcache.h"
#include "tcmdtype.h"
#include "trpc.h"
#include "tscLocalMerge.h"
@ -45,32 +44,30 @@ static int32_t getWaitingTimeInterval(int32_t count) {
return 0;
}
return initial * (2<<(count - 2));
return initial * ((2u)<<(count - 2));
}
static void tscSetDnodeEpSet(SSqlObj* pSql, SVgroupInfo* pVgroupInfo) {
assert(pSql != NULL && pVgroupInfo != NULL && pVgroupInfo->numOfEps > 0);
SRpcEpSet* pEpSet = &pSql->epSet;
static void tscSetDnodeEpSet(SRpcEpSet* pEpSet, SVgroupInfo* pVgroupInfo) {
assert(pEpSet != NULL && pVgroupInfo != NULL && pVgroupInfo->numOfEps > 0);
// Issue the query to one of the vnode among a vgroup randomly.
// change the inUse property would not affect the isUse attribute of STableMeta
pEpSet->inUse = rand() % pVgroupInfo->numOfEps;
// apply the FQDN string length check here
bool hasFqdn = false;
bool existed = false;
pEpSet->numOfEps = pVgroupInfo->numOfEps;
for(int32_t i = 0; i < pVgroupInfo->numOfEps; ++i) {
tstrncpy(pEpSet->fqdn[i], pVgroupInfo->epAddr[i].fqdn, tListLen(pEpSet->fqdn[i]));
pEpSet->port[i] = pVgroupInfo->epAddr[i].port;
if (!hasFqdn) {
hasFqdn = (strlen(pEpSet->fqdn[i]) > 0);
int32_t len = (int32_t) strnlen(pVgroupInfo->epAddr[i].fqdn, TSDB_FQDN_LEN);
if (len > 0) {
tstrncpy(pEpSet->fqdn[i], pVgroupInfo->epAddr[i].fqdn, tListLen(pEpSet->fqdn[i]));
existed = true;
}
}
assert(hasFqdn);
assert(existed);
}
static void tscDumpMgmtEpSet(SSqlObj *pSql) {
@ -87,7 +84,8 @@ static void tscEpSetHtons(SRpcEpSet *s) {
bool tscEpSetIsEqual(SRpcEpSet *s1, SRpcEpSet *s2) {
if (s1->numOfEps != s2->numOfEps || s1->inUse != s2->inUse) {
return false;
}
}
for (int32_t i = 0; i < s1->numOfEps; i++) {
if (s1->port[i] != s2->port[i]
|| strncmp(s1->fqdn[i], s2->fqdn[i], TSDB_FQDN_LEN) != 0)
@ -95,6 +93,7 @@ bool tscEpSetIsEqual(SRpcEpSet *s1, SRpcEpSet *s2) {
}
return true;
}
void tscUpdateMgmtEpSet(SSqlObj *pSql, SRpcEpSet *pEpSet) {
// no need to update if equal
SRpcCorEpSet *pCorEpSet = pSql->pTscObj->tscCorMgmtEpSet;
@ -102,37 +101,45 @@ void tscUpdateMgmtEpSet(SSqlObj *pSql, SRpcEpSet *pEpSet) {
pCorEpSet->epSet = *pEpSet;
taosCorEndWrite(&pCorEpSet->version);
}
static void tscDumpEpSetFromVgroupInfo(SCorVgroupInfo *pVgroupInfo, SRpcEpSet *pEpSet) {
static void tscDumpEpSetFromVgroupInfo(SRpcEpSet *pEpSet, SNewVgroupInfo *pVgroupInfo) {
if (pVgroupInfo == NULL) { return;}
taosCorBeginRead(&pVgroupInfo->version);
int8_t inUse = pVgroupInfo->inUse;
pEpSet->inUse = (inUse >= 0 && inUse < TSDB_MAX_REPLICA) ? inUse: 0;
pEpSet->numOfEps = pVgroupInfo->numOfEps;
for (int32_t i = 0; i < pVgroupInfo->numOfEps; ++i) {
tstrncpy(pEpSet->fqdn[i], pVgroupInfo->epAddr[i].fqdn, sizeof(pEpSet->fqdn[i]));
pEpSet->port[i] = pVgroupInfo->epAddr[i].port;
tstrncpy(pEpSet->fqdn[i], pVgroupInfo->ep[i].fqdn, sizeof(pEpSet->fqdn[i]));
pEpSet->port[i] = pVgroupInfo->ep[i].port;
}
taosCorEndRead(&pVgroupInfo->version);
}
static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) {
SSqlCmd *pCmd = &pObj->cmd;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
if (pTableMetaInfo == NULL || pTableMetaInfo->pTableMeta == NULL) { return;}
SCorVgroupInfo *pVgroupInfo = &pTableMetaInfo->pTableMeta->corVgroupInfo;
taosCorBeginWrite(&pVgroupInfo->version);
tscDebug("before: Endpoint in use: %d", pVgroupInfo->inUse);
pVgroupInfo->inUse = pEpSet->inUse;
pVgroupInfo->numOfEps = pEpSet->numOfEps;
for (int32_t i = 0; i < pVgroupInfo->numOfEps; i++) {
tfree(pVgroupInfo->epAddr[i].fqdn);
pVgroupInfo->epAddr[i].fqdn = strndup(pEpSet->fqdn[i], tListLen(pEpSet->fqdn[i]));
pVgroupInfo->epAddr[i].port = pEpSet->port[i];
if (pTableMetaInfo == NULL || pTableMetaInfo->pTableMeta == NULL) {
return;
}
tscDebug("after: EndPoint in use: %d", pVgroupInfo->inUse);
taosCorEndWrite(&pVgroupInfo->version);
int32_t vgId = pTableMetaInfo->pTableMeta->vgId;
if (pTableMetaInfo->pTableMeta->tableType == TSDB_SUPER_TABLE) {
assert(vgId == 0);
return;
}
SNewVgroupInfo vgroupInfo = {.vgId = -1};
taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo));
assert(vgroupInfo.numOfEps > 0 && vgroupInfo.vgId > 0);
tscDebug("before: Endpoint in use:%d, numOfEps:%d", vgroupInfo.inUse, vgroupInfo.numOfEps);
vgroupInfo.inUse = pEpSet->inUse;
vgroupInfo.numOfEps = pEpSet->numOfEps;
for (int32_t i = 0; i < vgroupInfo.numOfEps; i++) {
strncpy(vgroupInfo.ep[i].fqdn, pEpSet->fqdn[i], TSDB_FQDN_LEN);
vgroupInfo.ep[i].port = pEpSet->port[i];
}
tscDebug("after: EndPoint in use:%d, numOfEps:%d", vgroupInfo.inUse, vgroupInfo.numOfEps);
taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(SNewVgroupInfo));
}
void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
@ -304,7 +311,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
return;
}
if (pEpSet) {
if (pEpSet) { // todo update this
if (!tscEpSetIsEqual(&pSql->epSet, pEpSet)) {
if (pCmd->command < TSDB_SQL_MGMT) {
tscUpdateVgroupInfo(pSql, pEpSet);
@ -438,7 +445,7 @@ int doProcessSql(SSqlObj *pSql) {
}
if (pRes->code != TSDB_CODE_SUCCESS) {
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
return pRes->code;
}
@ -447,7 +454,7 @@ int doProcessSql(SSqlObj *pSql) {
// NOTE: if code is TSDB_CODE_SUCCESS, pSql may have been released here already by other threads.
if (code != TSDB_CODE_SUCCESS) {
pRes->code = code;
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
return code;
}
@ -515,8 +522,8 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
} else {
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
pRetrieveMsg->header.vgId = htonl(pTableMeta->vgroupInfo.vgId);
tscDebug("%p build fetch msg from only one vgroup, vgId:%d", pSql, pTableMeta->vgroupInfo.vgId);
pRetrieveMsg->header.vgId = htonl(pTableMeta->vgId);
tscDebug("%p build fetch msg from only one vgroup, vgId:%d", pSql, pTableMeta->vgId);
}
pSql->cmd.payloadLen = sizeof(SRetrieveTableMsg);
@ -535,7 +542,6 @@ int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
// NOTE: shell message size should not include SMsgDesc
int32_t size = pSql->cmd.payloadLen - sizeof(SMsgDesc);
int32_t vgId = pTableMeta->vgroupInfo.vgId;
SMsgDesc* pMsgDesc = (SMsgDesc*) pMsg;
pMsgDesc->numOfVnodes = htonl(1); // always one vnode
@ -543,7 +549,7 @@ int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg += sizeof(SMsgDesc);
SSubmitMsg *pShellMsg = (SSubmitMsg *)pMsg;
pShellMsg->header.vgId = htonl(vgId);
pShellMsg->header.vgId = htonl(pTableMeta->vgId);
pShellMsg->header.contLen = htonl(size); // the length not includes the size of SMsgDesc
pShellMsg->length = pShellMsg->header.contLen;
@ -551,9 +557,12 @@ int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
// pSql->cmd.payloadLen is set during copying data into payload
pSql->cmd.msgType = TSDB_MSG_TYPE_SUBMIT;
tscDumpEpSetFromVgroupInfo(&pTableMeta->corVgroupInfo, &pSql->epSet);
tscDebug("%p build submit msg, vgId:%d numOfTables:%d numberOfEP:%d", pSql, vgId, pSql->cmd.numOfTablesInSubmit,
SNewVgroupInfo vgroupInfo = {0};
taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo));
tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo);
tscDebug("%p build submit msg, vgId:%d numOfTables:%d numberOfEP:%d", pSql, pTableMeta->vgId, pSql->cmd.numOfTablesInSubmit,
pSql->epSet.numOfEps);
return TSDB_CODE_SUCCESS;
}
@ -561,9 +570,11 @@ int tscBuildSubmitMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
/*
* for table query, simply return the size <= 1k
*/
static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd, int32_t clauseIndex) {
static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql, int32_t clauseIndex) {
const static int32_t MIN_QUERY_MSG_PKT_SIZE = TSDB_MAX_BYTES_PER_ROW * 5;
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex);
SSqlCmd* pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex);
int32_t srcColListSize = (int32_t)(taosArrayGetSize(pQueryInfo->colList) * sizeof(SColumnInfo));
@ -571,6 +582,8 @@ static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd, int32_t clauseIndex) {
int32_t exprSize = (int32_t)(sizeof(SSqlFuncMsg) * numOfExprs * 2);
int32_t tsBufSize = (pQueryInfo->tsBuf != NULL) ? pQueryInfo->tsBuf->fileSize : 0;
int32_t sqlLen = (int32_t) strlen(pSql->sqlstr) + 1;
int32_t tableSerialize = 0;
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@ -587,7 +600,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd, int32_t clauseIndex) {
}
return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + exprSize + tsBufSize +
tableSerialize + 4096;
tableSerialize + sqlLen + 4096;
}
static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg) {
@ -597,24 +610,31 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || pTableMetaInfo->pVgroupTables == NULL) {
SVgroupInfo* pVgroupInfo = NULL;
int32_t vgId = -1;
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
int32_t index = pTableMetaInfo->vgroupIndex;
assert(index >= 0);
SVgroupInfo* pVgroupInfo = NULL;
if (pTableMetaInfo->vgroupList->numOfVgroups > 0) {
assert(index < pTableMetaInfo->vgroupList->numOfVgroups);
pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index];
}
vgId = pVgroupInfo->vgId;
tscSetDnodeEpSet(&pSql->epSet, pVgroupInfo);
tscDebug("%p query on stable, vgIndex:%d, numOfVgroups:%d", pSql, index, pTableMetaInfo->vgroupList->numOfVgroups);
} else {
pVgroupInfo = &pTableMeta->vgroupInfo;
vgId = pTableMeta->vgId;
SNewVgroupInfo vgroupInfo = {0};
taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo));
tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo);
}
assert(pVgroupInfo != NULL);
pSql->epSet.inUse = rand()%pSql->epSet.numOfEps;
tscSetDnodeEpSet(pSql, pVgroupInfo);
pQueryMsg->head.vgId = htonl(pVgroupInfo->vgId);
pQueryMsg->head.vgId = htonl(vgId);
STableIdInfo *pTableIdInfo = (STableIdInfo *)pMsg;
pTableIdInfo->tid = htonl(pTableMeta->id.tid);
@ -633,7 +653,7 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, index);
// set the vgroup info
tscSetDnodeEpSet(pSql, &pTableIdList->vgInfo);
tscSetDnodeEpSet(&pSql->epSet, &pTableIdList->vgInfo);
pQueryMsg->head.vgId = htonl(pTableIdList->vgInfo.vgId);
int32_t numOfTables = (int32_t)taosArrayGetSize(pTableIdList->itemList);
@ -660,7 +680,7 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
int32_t size = tscEstimateQueryMsgSize(pCmd, pCmd->clauseIndex);
int32_t size = tscEstimateQueryMsgSize(pSql, pCmd->clauseIndex);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
tscError("%p failed to malloc for query msg", pSql);
@ -693,7 +713,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
tstrncpy(pQueryMsg->version, version, tListLen(pQueryMsg->version));
int32_t numOfTags = (int32_t)taosArrayGetSize(pTableMetaInfo->tagColList);
int32_t sqlLen = (int32_t) strlen(pSql->sqlstr);
if (pQueryInfo->order.order == TSDB_ORDER_ASC) {
pQueryMsg->window.skey = htobe64(pQueryInfo->window.skey);
pQueryMsg->window.ekey = htobe64(pQueryInfo->window.ekey);
@ -716,10 +737,12 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->interval.offsetUnit = pQueryInfo->interval.offsetUnit;
pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols);
pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType);
pQueryMsg->tbnameCondLen = htonl(pQueryInfo->tagCond.tbnameCond.len);
pQueryMsg->numOfTags = htonl(numOfTags);
pQueryMsg->queryType = htonl(pQueryInfo->type);
pQueryMsg->vgroupLimit = htobe64(pQueryInfo->vgroupLimit);
pQueryMsg->vgroupLimit = htobe64(pQueryInfo->vgroupLimit);
pQueryMsg->sqlstrLen = htonl(sqlLen);
size_t numOfOutput = tscSqlExprNumOfExprs(pQueryInfo);
pQueryMsg->numOfOutput = htons((int16_t)numOfOutput); // this is the stage one output column number
@ -888,13 +911,13 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
for (int32_t j = 0; j < pGroupbyExpr->numOfGroupCols; ++j) {
SColIndex* pCol = taosArrayGet(pGroupbyExpr->columnInfo, j);
*((int16_t *)pMsg) = pCol->colId;
*((int16_t *)pMsg) = htons(pCol->colId);
pMsg += sizeof(pCol->colId);
*((int16_t *)pMsg) += pCol->colIndex;
*((int16_t *)pMsg) += htons(pCol->colIndex);
pMsg += sizeof(pCol->colIndex);
*((int16_t *)pMsg) += pCol->flag;
*((int16_t *)pMsg) += htons(pCol->flag);
pMsg += sizeof(pCol->flag);
memcpy(pMsg, pCol->name, tListLen(pCol->name));
@ -952,13 +975,11 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg += pCond->len;
}
}
if (pQueryInfo->tagCond.tbnameCond.cond == NULL) {
*pMsg = 0;
pMsg++;
} else {
strcpy(pMsg, pQueryInfo->tagCond.tbnameCond.cond);
pMsg += strlen(pQueryInfo->tagCond.tbnameCond.cond) + 1;
SCond* pCond = &pQueryInfo->tagCond.tbnameCond;
if (pCond->len > 0) {
strncpy(pMsg, pCond->cond, pCond->len);
pMsg += pCond->len;
}
// compressed ts block
@ -979,6 +1000,9 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->tsNumOfBlocks = htonl(pQueryMsg->tsNumOfBlocks);
}
memcpy(pMsg, pSql->sqlstr, sqlLen);
pMsg += sqlLen;
int32_t msgLen = (int32_t)(pMsg - pCmd->payload);
tscDebug("%p msg built success, len:%d bytes", pSql, msgLen);
@ -1247,7 +1271,7 @@ int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pShowMsg->payloadLen = htons(pEpAddr->n);
}
pCmd->payloadLen = sizeof(SShowMsg) + pShowMsg->payloadLen;
pCmd->payloadLen = sizeof(SShowMsg) + htons(pShowMsg->payloadLen);
return TSDB_CODE_SUCCESS;
}
@ -1445,51 +1469,18 @@ int tscBuildUpdateTagMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
SUpdateTableTagValMsg* pUpdateMsg = (SUpdateTableTagValMsg*) pCmd->payload;
pCmd->payloadLen = htonl(pUpdateMsg->head.contLen);
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMeta *pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta;
tscDumpEpSetFromVgroupInfo(&pTableMetaInfo->pTableMeta->corVgroupInfo, &pSql->epSet);
SNewVgroupInfo vgroupInfo = {.vgId = -1};
taosHashGetClone(tscVgroupMap, &pTableMeta->vgId, sizeof(pTableMeta->vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo));
assert(vgroupInfo.vgId > 0);
tscDumpEpSetFromVgroupInfo(&pSql->epSet, &vgroupInfo);
return TSDB_CODE_SUCCESS;
}
//int tscBuildCancelQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
// SCancelQueryMsg *pCancelMsg = (SCancelQueryMsg*) pSql->cmd.payload;
// pCancelMsg->qhandle = htobe64(pSql->res.qhandle);
//
// SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
// STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
//
// if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
// int32_t vgIndex = pTableMetaInfo->vgroupIndex;
// if (pTableMetaInfo->pVgroupTables == NULL) {
// SVgroupsInfo *pVgroupInfo = pTableMetaInfo->vgroupList;
// assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups);
//
// pCancelMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId);
// tscDebug("%p build cancel query msg from vgId:%d, vgIndex:%d", pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex);
// } else {
// int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables);
// assert(vgIndex >= 0 && vgIndex < numOfVgroups);
//
// SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, vgIndex);
//
// pCancelMsg->header.vgId = htonl(pTableIdList->vgInfo.vgId);
// tscDebug("%p build cancel query msg from vgId:%d, vgIndex:%d", pSql, pTableIdList->vgInfo.vgId, vgIndex);
// }
// } else {
// STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
// pCancelMsg->header.vgId = htonl(pTableMeta->vgroupInfo.vgId);
// tscDebug("%p build cancel query msg from only one vgroup, vgId:%d", pSql, pTableMeta->vgroupInfo.vgId);
// }
//
// pSql->cmd.payloadLen = sizeof(SCancelQueryMsg);
// pSql->cmd.msgType = TSDB_MSG_TYPE_CANCEL_QUERY;
//
// pCancelMsg->header.contLen = htonl(sizeof(SCancelQueryMsg));
// return TSDB_CODE_SUCCESS;
//}
int tscAlterDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SAlterDbMsg);
@ -1551,7 +1542,7 @@ static int tscLocalResultCommonBuilder(SSqlObj *pSql, int32_t numOfRes) {
if (code == TSDB_CODE_SUCCESS) {
(*pSql->fp)(pSql->param, pSql, pSql->res.numOfRows);
} else {
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
}
}
@ -1580,7 +1571,7 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) {
int32_t code = pRes->code;
if (pRes->code != TSDB_CODE_SUCCESS) {
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
return code;
}
@ -1599,7 +1590,7 @@ int tscProcessRetrieveLocalMergeRsp(SSqlObj *pSql) {
if (pRes->code == TSDB_CODE_SUCCESS) {
(*pSql->fp)(pSql->param, pSql, pRes->numOfRows);
} else {
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
}
return code;
@ -1843,19 +1834,46 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
pSchema++;
}
size_t size = 0;
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg, &size);
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg);
// todo add one more function: taosAddDataIfNotExists();
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
assert(pTableMetaInfo->pTableMeta == NULL);
pTableMetaInfo->pTableMeta = (STableMeta *) taosCachePut(tscMetaCache, pTableMetaInfo->name,
strlen(pTableMetaInfo->name), pTableMeta, size, tsTableMetaKeepTimer * 1000);
if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
// check if super table hashmap or not
int32_t len = (int32_t) strnlen(pTableMeta->sTableName, TSDB_TABLE_FNAME_LEN);
if (pTableMetaInfo->pTableMeta == NULL) {
free(pTableMeta);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
// super tableMeta data alreay exists, create it according to tableMeta and add it to hash map
STableMeta* pSupTableMeta = createSuperTableMeta(pMetaMsg);
uint32_t size = tscGetTableMetaSize(pSupTableMeta);
int32_t code = taosHashPut(tscTableMetaInfo, pTableMeta->sTableName, len, pSupTableMeta, size);
assert(code == TSDB_CODE_SUCCESS);
tfree(pSupTableMeta);
CChildTableMeta* cMeta = tscCreateChildMeta(pTableMeta);
taosHashPut(tscTableMetaInfo, pTableMetaInfo->name, strlen(pTableMetaInfo->name), cMeta, sizeof(CChildTableMeta));
tfree(cMeta);
} else {
uint32_t s = tscGetTableMetaSize(pTableMeta);
taosHashPut(tscTableMetaInfo, pTableMetaInfo->name, strlen(pTableMetaInfo->name), pTableMeta, s);
}
// update the vgroupInfo if needed
if (pTableMeta->vgId > 0) {
int32_t vgId = pTableMeta->vgId;
assert(pTableMeta->tableType != TSDB_SUPER_TABLE);
SNewVgroupInfo vgroupInfo = {.inUse = -1};
taosHashGetClone(tscVgroupMap, &vgId, sizeof(vgId), NULL, &vgroupInfo, sizeof(SNewVgroupInfo));
if (((vgroupInfo.inUse >= 0) && !vgroupInfoIdentical(&vgroupInfo, &pMetaMsg->vgroup)) ||
(vgroupInfo.inUse < 0)) { // vgroup info exists, compare with it
vgroupInfo = createNewVgroupInfo(&pMetaMsg->vgroup);
taosHashPut(tscVgroupMap, &vgId, sizeof(vgId), &vgroupInfo, sizeof(vgroupInfo));
tscDebug("add new VgroupInfo, vgId:%d, total:%d", vgId, (int32_t) taosHashGetSize(tscVgroupMap));
}
}
tscDebug("%p recv table meta, uid:%"PRId64 ", tid:%d, name:%s", pSql, pTableMeta->id.uid, pTableMeta->id.tid, pTableMetaInfo->name);
@ -1866,8 +1884,8 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
/**
* multi table meta rsp pkg format:
* | STaosRsp | ieType | SMultiTableInfoMsg | SMeterMeta0 | SSchema0 | SMeterMeta1 | SSchema1 | SMeterMeta2 | SSchema2
* |...... 1B 1B 4B
* | STaosRsp | SMultiTableInfoMsg | SMeterMeta0 | SSchema0 | SMeterMeta1 | SSchema1 | SMeterMeta2 | SSchema2
* |...... 1B 4B
**/
int tscProcessMultiMeterMetaRsp(SSqlObj *pSql) {
#if 0
@ -2021,14 +2039,10 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) {
return pSql->res.code;
}
/*
* current process do not use the cache at all
*/
int tscProcessShowRsp(SSqlObj *pSql) {
STableMetaMsg *pMetaMsg;
SShowRsp * pShow;
SSchema * pSchema;
char key[20];
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
@ -2053,20 +2067,10 @@ int tscProcessShowRsp(SSqlObj *pSql) {
pSchema++;
}
key[0] = pCmd->msgType + 'a';
strcpy(key + 1, "showlist");
tfree(pTableMetaInfo->pTableMeta);
pTableMetaInfo->pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg);
if (pTableMetaInfo->pTableMeta != NULL) {
taosCacheRelease(tscMetaCache, (void *)&(pTableMetaInfo->pTableMeta), false);
}
size_t size = 0;
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg, &size);
pTableMetaInfo->pTableMeta = taosCachePut(tscMetaCache, key, strlen(key), (char *)pTableMeta, size,
tsTableMetaKeepTimer * 1000);
SSchema *pTableSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
if (pQueryInfo->colList == NULL) {
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
}
@ -2089,12 +2093,9 @@ int tscProcessShowRsp(SSqlObj *pSql) {
pCmd->numOfCols = pQueryInfo->fieldsInfo.numOfOutput;
tscFieldInfoUpdateOffset(pQueryInfo);
tfree(pTableMeta);
return 0;
}
// TODO multithread problem
static void createHBObj(STscObj* pObj) {
if (pObj->hbrid != 0) {
return;
@ -2176,51 +2177,34 @@ int tscProcessUseDbRsp(SSqlObj *pSql) {
int tscProcessDropDbRsp(SSqlObj *pSql) {
pSql->pTscObj->db[0] = 0;
taosCacheEmpty(tscMetaCache);
taosHashEmpty(tscTableMetaInfo);
return 0;
}
int tscProcessDropTableRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
STableMeta *pTableMeta = taosCacheAcquireByKey(tscMetaCache, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
if (pTableMeta == NULL) { /* not in cache, abort */
return 0;
}
//The cached tableMeta is expired in this case, so clean it in hash table
taosHashRemove(tscTableMetaInfo, pTableMetaInfo->name, strnlen(pTableMetaInfo->name, TSDB_TABLE_FNAME_LEN));
tscDebug("%p remove table meta after drop table:%s, numOfRemain:%d", pSql, pTableMetaInfo->name,
(int32_t) taosHashGetSize(tscTableMetaInfo));
/*
* 1. if a user drops one table, which is the only table in a vnode, remove operation will incur vnode to be removed.
* 2. Then, a user creates a new metric followed by a table with identical name of removed table but different schema,
* here the table will reside in a new vnode.
* The cached information is expired, however, we may have lost the ref of original meter. So, clear whole cache
* instead.
*/
tscDebug("%p force release table meta after drop table:%s", pSql, pTableMetaInfo->name);
taosCacheRelease(tscMetaCache, (void **)&pTableMeta, true);
assert(pTableMetaInfo->pTableMeta == NULL);
return 0;
}
int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
STableMeta *pTableMeta = taosCacheAcquireByKey(tscMetaCache, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
if (pTableMeta == NULL) { /* not in cache, abort */
return 0;
}
char* name = pTableMetaInfo->name;
tscDebug("%p remove tableMeta in hashMap after alter-table: %s", pSql, name);
tscDebug("%p force release metermeta in cache after alter-table: %s", pSql, pTableMetaInfo->name);
taosCacheRelease(tscMetaCache, (void **)&pTableMeta, true);
bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
tfree(pTableMetaInfo->pTableMeta);
if (pTableMetaInfo->pTableMeta) {
bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
taosCacheRelease(tscMetaCache, (void **)&(pTableMetaInfo->pTableMeta), true);
if (isSuperTable) { // if it is a super table, reset whole query cache
tscDebug("%p reset query cache since table:%s is stable", pSql, pTableMetaInfo->name);
taosCacheEmpty(tscMetaCache);
}
if (isSuperTable) { // if it is a super table, iterate the hashTable and remove all the childTableMeta
taosHashEmpty(tscTableMetaInfo);
}
return 0;
@ -2230,6 +2214,7 @@ int tscProcessAlterDbMsgRsp(SSqlObj *pSql) {
UNUSED(pSql);
return 0;
}
int tscProcessShowCreateRsp(SSqlObj *pSql) {
return tscLocalResultCommonBuilder(pSql, 1);
}
@ -2350,7 +2335,7 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
int32_t code = tscProcessSql(pNew);
if (code == TSDB_CODE_SUCCESS) {
code = TSDB_CODE_TSC_ACTION_IN_PROGRESS; // notify upper application that current process need to be terminated
code = TSDB_CODE_TSC_ACTION_IN_PROGRESS; // notify application that current process needs to be terminated
}
return code;
@ -2358,21 +2343,29 @@ static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaIn
int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
assert(strlen(pTableMetaInfo->name) != 0);
tfree(pTableMetaInfo->pTableMeta);
// If this STableMetaInfo owns a table meta, release it first
if (pTableMetaInfo->pTableMeta != NULL) {
taosCacheRelease(tscMetaCache, (void **)&(pTableMetaInfo->pTableMeta), false);
}
pTableMetaInfo->pTableMeta = (STableMeta *)taosCacheAcquireByKey(tscMetaCache, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
if (pTableMetaInfo->pTableMeta != NULL) {
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
tscDebug("%p retrieve table Meta from cache, the number of columns:%d, numOfTags:%d, %p", pSql, tinfo.numOfColumns,
tinfo.numOfTags, pTableMetaInfo->pTableMeta);
uint32_t size = tscGetTableMetaMaxSize();
pTableMetaInfo->pTableMeta = calloc(1, size);
pTableMetaInfo->pTableMeta->tableInfo.numOfColumns = -1;
int32_t len = (int32_t) strlen(pTableMetaInfo->name);
taosHashGetClone(tscTableMetaInfo, pTableMetaInfo->name, len, NULL, pTableMetaInfo->pTableMeta, -1);
// TODO resize the tableMeta
STableMeta* pMeta = pTableMetaInfo->pTableMeta;
if (pMeta->id.uid > 0) {
if (pMeta->tableType == TSDB_CHILD_TABLE) {
int32_t code = tscCreateTableMetaFromCChildMeta(pTableMetaInfo->pTableMeta, pTableMetaInfo->name);
if (code != TSDB_CODE_SUCCESS) {
return getTableMetaFromMnode(pSql, pTableMetaInfo);
}
}
return TSDB_CODE_SUCCESS;
}
return getTableMetaFromMnode(pSql, pTableMetaInfo);
}
@ -2382,7 +2375,7 @@ int tscGetTableMetaEx(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, bool create
}
/**
* retrieve table meta from mnode, and update the local table meta cache.
* retrieve table meta from mnode, and update the local table meta hashmap.
* @param pSql sql object
* @param tableIndex table index
* @return status code
@ -2390,16 +2383,18 @@ int tscGetTableMetaEx(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, bool create
int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
SSqlCmd *pCmd = &pSql->cmd;
SQueryInfo * pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, tableIndex);
const char* name = pTableMetaInfo->name;
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (pTableMetaInfo->pTableMeta) {
tscDebug("%p update table meta, old meta numOfTags:%d, numOfCols:%d, uid:%" PRId64 ", addr:%p", pSql,
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid, pTableMeta);
if (pTableMeta) {
tscDebug("%p update table meta:%s, old meta numOfTags:%d, numOfCols:%d, uid:%" PRId64, pSql, name,
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid);
}
taosCacheRelease(tscMetaCache, (void **)&(pTableMetaInfo->pTableMeta), true);
// remove stored tableMeta info in hash table
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
return getTableMetaFromMnode(pSql, pTableMetaInfo);
}
@ -2440,7 +2435,7 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex);
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo *pMInfo = tscGetMetaInfo(pQueryInfo, i);
STableMeta *pTableMeta = taosCacheAcquireByData(tscMetaCache, pMInfo->pTableMeta);
STableMeta* pTableMeta = tscTableMetaClone(pMInfo->pTableMeta);
tscAddTableMetaInfo(pNewQueryInfo, pMInfo->name, pTableMeta, NULL, pMInfo->tagColList, pMInfo->pVgroupTables);
}

View File

@ -115,9 +115,7 @@ static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pa
pObj->signature = pObj;
pObj->pDnodeConn = pDnodeConn;
T_REF_INIT_VAL(pObj, 1);
tstrncpy(pObj->user, user, sizeof(pObj->user));
secretEncryptLen = MIN(secretEncryptLen, sizeof(pObj->pass));
memcpy(pObj->pass, secretEncrypt, secretEncryptLen);
@ -172,11 +170,9 @@ static SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pa
if (taos != NULL) {
*taos = pObj;
}
registerSqlObj(pSql);
tsInsertHeadSize = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
pObj->rid = taosAddRef(tscRefId, pObj);
registerSqlObj(pSql);
return pSql;
}
@ -288,34 +284,21 @@ void taos_close(TAOS *taos) {
return;
}
// make sure that the close connection can only be executed once.
pObj->signature = NULL;
taosTmrStopA(&(pObj->pTimer));
if (pObj->hbrid > 0) {
if (RID_VALID(pObj->hbrid)) {
SSqlObj* pHb = (SSqlObj*)taosAcquireRef(tscObjRef, pObj->hbrid);
if (pHb != NULL) {
if (pHb->rpcRid > 0) { // wait for rsp from dnode
if (RID_VALID(pHb->rpcRid)) { // wait for rsp from dnode
rpcCancelRequest(pHb->rpcRid);
pHb->rpcRid = -1;
}
tscDebug("%p HB is freed", pHb);
taos_free_result(pHb);
taosReleaseRef(tscObjRef, pHb->self);
taos_free_result(pHb);
}
}
int32_t ref = T_REF_DEC(pObj);
assert(ref >= 0);
if (ref > 0) {
tscDebug("%p %d remain sqlObjs, not free tscObj and dnodeConn:%p", pObj, ref, pObj->pDnodeConn);
return;
}
tscDebug("%p all sqlObj are freed, free tscObj and close dnodeConn:%p", pObj, pObj->pDnodeConn);
taosRemoveRef(tscRefId, pObj->rid);
}
@ -331,7 +314,7 @@ static void waitForRetrieveRsp(void *param, TAOS_RES *tres, int numOfRows) {
tsem_post(&pSql->rspSem);
}
TAOS_RES* taos_query_c(TAOS *taos, const char *sqlstr, uint32_t sqlLen, TAOS_RES** res) {
TAOS_RES* taos_query_c(TAOS *taos, const char *sqlstr, uint32_t sqlLen, int64_t* res) {
STscObj *pObj = (STscObj *)taos;
if (pObj == NULL || pObj->signature != pObj) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
@ -357,7 +340,7 @@ TAOS_RES* taos_query_c(TAOS *taos, const char *sqlstr, uint32_t sqlLen, TAOS_RES
doAsyncQuery(pObj, pSql, waitForQueryRsp, taos, sqlstr, sqlLen);
if (res != NULL) {
*res = pSql;
atomic_store_64(res, pSql->self);
}
tsem_wait(&pSql->rspSem);
@ -368,7 +351,7 @@ TAOS_RES* taos_query(TAOS *taos, const char *sqlstr) {
return taos_query_c(taos, sqlstr, (uint32_t)strlen(sqlstr), NULL);
}
TAOS_RES* taos_query_h(TAOS* taos, const char *sqlstr, TAOS_RES** res) {
TAOS_RES* taos_query_h(TAOS* taos, const char *sqlstr, int64_t* res) {
return taos_query_c(taos, sqlstr, (uint32_t) strlen(sqlstr), res);
}
@ -726,7 +709,7 @@ static void tscKillSTableQuery(SSqlObj *pSql) {
pSubObj->rpcRid = -1;
}
tscQueueAsyncRes(pSubObj);
tscAsyncResultOnError(pSubObj);
taosReleaseRef(tscObjRef, pSubObj->self);
}
@ -762,7 +745,7 @@ void taos_stop_query(TAOS_RES *res) {
pSql->rpcRid = -1;
}
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
}
}
@ -926,7 +909,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t tblListLen) {
// must before clean the sqlcmd object
tscResetSqlCmdObj(&pSql->cmd, false);
tscResetSqlCmdObj(&pSql->cmd);
SSqlCmd *pCmd = &pSql->cmd;

View File

@ -167,7 +167,9 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
retryDelay);
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0);
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), true);
char* name = pTableMetaInfo->name;
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
@ -269,9 +271,8 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
tscDebug("%p stream:%p, query on:%s, fetch result completed, fetched rows:%" PRId64, pSql, pStream, pTableMetaInfo->name,
pStream->numOfRes);
// release the metric/meter meta information reference, so data in cache can be updated
tfree(pTableMetaInfo->pTableMeta);
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), false);
tscFreeSqlResult(pSql);
tfree(pSql->pSubs);
pSql->subState.numOfSub = 0;

View File

@ -69,14 +69,17 @@ TSKEY tscGetSubscriptionProgress(void* sub, int64_t uid, TSKEY dflt) {
}
void tscUpdateSubscriptionProgress(void* sub, int64_t uid, TSKEY ts) {
if( sub == NULL)
if( sub == NULL) {
return;
}
SSub* pSub = (SSub*)sub;
SSubscriptionProgress target = {.uid = uid, .key = ts};
SSubscriptionProgress* p = taosArraySearch(pSub->progress, &target, tscCompareSubscriptionProgress);
if (p != NULL) {
p->key = ts;
tscDebug("subscribe:%s, uid:%"PRIu64" update sub start ts:%"PRId64, pSub->topic, p->uid, p->key);
}
}
@ -502,6 +505,7 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
if (taosArrayGetSize(pSub->progress) > 0) { // fix crash in single tabel subscription
pQueryInfo->window.skey = ((SSubscriptionProgress*)taosArrayGet(pSub->progress, 0))->key;
tscDebug("subscribe:%s set subscribe skey:%"PRId64, pSub->topic, pQueryInfo->window.skey);
}
if (pSub->pTimer == NULL) {

View File

@ -779,7 +779,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pParentSql->res.code = numOfRows;
quitAllSubquery(pParentSql, pSupporter);
tscQueueAsyncRes(pParentSql);
tscAsyncResultOnError(pParentSql);
return;
}
@ -796,7 +796,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
quitAllSubquery(pParentSql, pSupporter);
tscQueueAsyncRes(pParentSql);
tscAsyncResultOnError(pParentSql);
return;
}
@ -845,7 +845,7 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
if (code != TSDB_CODE_SUCCESS) {
freeJoinSubqueryObj(pParentSql);
pParentSql->res.code = code;
tscQueueAsyncRes(pParentSql);
tscAsyncResultOnError(pParentSql);
taosArrayDestroy(s1);
taosArrayDestroy(s2);
@ -916,7 +916,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
pParentSql->res.code = numOfRows;
quitAllSubquery(pParentSql, pSupporter);
tscQueueAsyncRes(pParentSql);
tscAsyncResultOnError(pParentSql);
return;
}
@ -930,7 +930,7 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscError("%p invalid ts comp file from vnode, abort subquery, file size:%d", pSql, numOfRows);
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
tscQueueAsyncRes(pParentSql);
tscAsyncResultOnError(pParentSql);
return;
}
@ -1028,7 +1028,7 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR
pParentSql->res.code = numOfRows;
tscError("%p retrieve failed, index:%d, code:%s", pSql, pSupporter->subqueryIndex, tstrerror(numOfRows));
tscQueueAsyncRes(pParentSql);
tscAsyncResultOnError(pParentSql);
return;
}
@ -1155,7 +1155,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
if (pSql->res.code == TSDB_CODE_SUCCESS) {
(*pSql->fp)(pSql->param, pSql, 0);
} else {
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
}
return;
@ -1233,7 +1233,7 @@ void tscFetchDatablockForSubquery(SSqlObj* pSql) {
if (pSql->res.code == TSDB_CODE_SUCCESS) {
(*pSql->fp)(pSql->param, pSql, 0);
} else {
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
}
return;
@ -1344,7 +1344,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscError("%p abort query due to other subquery failure. code:%d, global code:%d", pSql, code, pParentSql->res.code);
quitAllSubquery(pParentSql, pSupporter);
tscQueueAsyncRes(pParentSql);
tscAsyncResultOnError(pParentSql);
return;
}
@ -1357,7 +1357,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
pParentSql->res.code = code;
quitAllSubquery(pParentSql, pSupporter);
tscQueueAsyncRes(pParentSql);
tscAsyncResultOnError(pParentSql);
return;
}
@ -1403,7 +1403,7 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) {
if (pParentSql->res.code == TSDB_CODE_SUCCESS) {
(*pParentSql->fp)(pParentSql->param, pParentSql, 0);
} else {
tscQueueAsyncRes(pParentSql);
tscAsyncResultOnError(pParentSql);
}
}
}
@ -1612,7 +1612,7 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) {
_error:
pRes->code = code;
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
}
static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs) {
@ -1666,7 +1666,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
int32_t ret = tscLocalReducerEnvCreate(pSql, &pMemoryBuf, &pDesc, &pModel, &pFinalModel, nBufferSize);
if (ret != 0) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
tfree(pMemoryBuf);
return ret;
}
@ -1680,7 +1680,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscLocalReducerEnvDestroy(pMemoryBuf, pDesc, pModel, pFinalModel,pState->numOfSub);
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
return ret;
}
@ -1890,7 +1890,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
(*pParentSql->fp)(pParentSql->param, pParentSql, pParentSql->res.code);
} else { // regular super table query
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
tscQueueAsyncRes(pParentSql);
tscAsyncResultOnError(pParentSql);
}
}
}
@ -1968,7 +1968,7 @@ static void tscAllDataRetrievedFromDnode(SRetrieveSupport *trsupport, SSqlObj* p
if (pParentSql->res.code == TSDB_CODE_SUCCESS) {
(*pParentSql->fp)(pParentSql->param, pParentSql, 0);
} else {
tscQueueAsyncRes(pParentSql);
tscAsyncResultOnError(pParentSql);
}
}
@ -2220,7 +2220,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
(*pParentObj->fp)(pParentObj->param, pParentObj, v);
} else {
if (!needRetryInsert(pParentObj, numOfSub)) {
tscQueueAsyncRes(pParentObj);
tscAsyncResultOnError(pParentObj);
return;
}
@ -2231,7 +2231,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
numOfFailed += 1;
// clean up tableMeta in cache
tscFreeQueryInfo(&pSql->cmd, true);
tscFreeQueryInfo(&pSql->cmd);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(&pSql->cmd, 0);
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentObj->cmd, pSql->cmd.clauseIndex, 0);
tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
@ -2243,15 +2243,16 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
tscError("%p Async insertion completed, total inserted:%d rows, numOfFailed:%d, numOfTotal:%d", pParentObj,
pParentObj->res.numOfRows, numOfFailed, numOfSub);
tscDebug("%p cleanup %d tableMeta in cache", pParentObj, pParentObj->cmd.numOfTables);
tscDebug("%p cleanup %d tableMeta in hashTable", pParentObj, pParentObj->cmd.numOfTables);
for(int32_t i = 0; i < pParentObj->cmd.numOfTables; ++i) {
taosCacheRelease(tscMetaCache, (void**)&(pParentObj->cmd.pTableMetaList[i]), true);
char* name = pParentObj->cmd.pTableNameList[i];
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
}
pParentObj->cmd.parseFinished = false;
pParentObj->subState.numOfRemain = numOfFailed;
tscResetSqlCmdObj(&pParentObj->cmd, false);
tscResetSqlCmdObj(&pParentObj->cmd);
// in case of insert, redo parsing the sql string and build new submit data block for two reasons:
// 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly.
@ -2264,7 +2265,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
if (code != TSDB_CODE_SUCCESS) {
pParentObj->res.code = code;
tscQueueAsyncRes(pParentObj);
tscAsyncResultOnError(pParentObj);
return;
}
@ -2288,7 +2289,7 @@ int32_t tscHandleInsertRetry(SSqlObj* pParent, SSqlObj* pSql) {
int32_t code = tscCopyDataBlockToPayload(pSql, pTableDataBlock);
if ((pRes->code = code)!= TSDB_CODE_SUCCESS) {
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
return code; // here the pSql may have been released already.
}
@ -2481,7 +2482,7 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) {
SSqlRes* pRes = &pSql->res;
if (pRes->code != TSDB_CODE_SUCCESS) {
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
return;
}
@ -2496,7 +2497,7 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) {
if (pRes->tsrow == NULL || pRes->buffer == NULL || pRes->length == NULL) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
return;
}
@ -2508,7 +2509,7 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) {
if (pRes->code == TSDB_CODE_SUCCESS) {
(*pSql->fp)(pSql->param, pSql, pRes->numOfRows);
} else {
tscQueueAsyncRes(pSql);
tscAsyncResultOnError(pSql);
}
}

View File

@ -31,18 +31,21 @@
#include "tlocale.h"
// global, not configurable
SCacheObj* tscMetaCache;
int tscObjRef = -1;
void * tscTmr;
void * tscQhandle;
void * tscCheckDiskUsageTmr;
int tsInsertHeadSize;
int tscRefId = -1;
#define TSC_VAR_NOT_RELEASE 1
#define TSC_VAR_RELEASED 0
int tscNumOfThreads;
int32_t sentinel = TSC_VAR_NOT_RELEASE;
SHashObj *tscVgroupMap; // hash map to keep the global vgroup info
SHashObj *tscTableMetaInfo; // table meta info
int32_t tscObjRef = -1;
void *tscTmr;
void *tscQhandle;
int32_t tscRefId = -1;
int32_t tscNumOfObj = 0; // number of sqlObj in current process.
static void *tscCheckDiskUsageTmr;
static pthread_once_t tscinit = PTHREAD_ONCE_INIT;
//void tscUpdateEpSet(void *ahandle, SRpcEpSet *pEpSet);
void tscCheckDiskUsage(void *UNUSED_PARAM(para), void* UNUSED_PARAM(param)) {
taosGetDisk();
@ -115,7 +118,7 @@ void taos_init_imp(void) {
int queueSize = tsMaxConnections*2;
double factor = (tscEmbedded == 0)? 2.0:4.0;
tscNumOfThreads = (int)(tsNumOfCores * tsNumOfThreadsPerCore / factor);
int32_t tscNumOfThreads = (int)(tsNumOfCores * tsNumOfThreadsPerCore / factor);
if (tscNumOfThreads < 2) {
tscNumOfThreads = 2;
}
@ -131,10 +134,11 @@ void taos_init_imp(void) {
taosTmrReset(tscCheckDiskUsage, 10, NULL, tscTmr, &tscCheckDiskUsageTmr);
}
int64_t refreshTime = 10; // 10 seconds by default
if (tscMetaCache == NULL) {
tscMetaCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, refreshTime, false, tscFreeTableMetaHelper, "tableMeta");
tscObjRef = taosOpenRef(40960, tscFreeRegisteredSqlObj);
if (tscTableMetaInfo == NULL) {
tscObjRef = taosOpenRef(40960, tscFreeRegisteredSqlObj);
tscVgroupMap = taosHashInit(256, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
tscTableMetaInfo = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
tscDebug("TableMeta:%p", tscTableMetaInfo);
}
tscRefId = taosOpenRef(200, tscCloseTscObj);
@ -152,30 +156,38 @@ void taos_init() { pthread_once(&tscinit, taos_init_imp); }
void taos_cleanup(void) {
tscDebug("start to cleanup client environment");
void* m = tscMetaCache;
if (m != NULL && atomic_val_compare_exchange_ptr(&tscMetaCache, m, 0) == m) {
taosCacheCleanup(m);
if (atomic_val_compare_exchange_32(&sentinel, TSC_VAR_NOT_RELEASE, TSC_VAR_RELEASED) != TSC_VAR_NOT_RELEASE) {
return;
}
int refId = atomic_exchange_32(&tscObjRef, -1);
if (refId != -1) {
taosCloseRef(refId);
}
taosHashCleanup(tscTableMetaInfo);
tscTableMetaInfo = NULL;
m = tscQhandle;
if (m != NULL && atomic_val_compare_exchange_ptr(&tscQhandle, m, 0) == m) {
taosCleanUpScheduler(m);
}
taosHashCleanup(tscVgroupMap);
tscVgroupMap = NULL;
int32_t id = tscObjRef;
tscObjRef = -1;
taosCloseRef(id);
void* p = tscQhandle;
tscQhandle = NULL;
taosCleanUpScheduler(p);
id = tscRefId;
tscRefId = -1;
taosCloseRef(id);
taosCloseRef(tscRefId);
taosCleanupKeywordsTable();
taosCloseLog();
if (tscEmbedded == 0) rpcCleanup();
m = tscTmr;
if (m != NULL && atomic_val_compare_exchange_ptr(&tscTmr, m, 0) == m) {
taosTmrCleanUp(m);
if (tscEmbedded == 0) {
rpcCleanup();
}
p = tscTmr;
tscTmr = NULL;
taosTmrCleanUp(p);
}
static int taos_options_imp(TSDB_OPTION option, const char *pStr) {

View File

@ -18,7 +18,6 @@
#include "os.h"
#include "qAst.h"
#include "taosmsg.h"
#include "tcache.h"
#include "tkey.h"
#include "tmd5.h"
#include "tscLocalMerge.h"
@ -31,7 +30,7 @@
#include "ttokendef.h"
static void freeQueryInfoImpl(SQueryInfo* pQueryInfo);
static void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool removeFromCache);
static void clearAllTableMetaInfo(SQueryInfo* pQueryInfo);
SCond* tsGetSTableQueryCond(STagCond* pTagCond, uint64_t uid) {
if (pTagCond->pCond == NULL) {
@ -379,17 +378,16 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) {
pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free
}
void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeFromCache) {
void tscFreeQueryInfo(SSqlCmd* pCmd) {
if (pCmd == NULL || pCmd->numOfClause == 0) {
return;
}
for (int32_t i = 0; i < pCmd->numOfClause; ++i) {
char* addr = (char*)pCmd - offsetof(SSqlObj, cmd);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, i);
freeQueryInfoImpl(pQueryInfo);
clearAllTableMetaInfo(pQueryInfo, (const char*)addr, removeFromCache);
clearAllTableMetaInfo(pQueryInfo);
tfree(pQueryInfo);
}
@ -397,7 +395,7 @@ void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeFromCache) {
tfree(pCmd->pQueryInfo);
}
void tscResetSqlCmdObj(SSqlCmd* pCmd, bool removeFromCache) {
void tscResetSqlCmdObj(SSqlCmd* pCmd) {
pCmd->command = 0;
pCmd->numOfCols = 0;
pCmd->count = 0;
@ -407,17 +405,17 @@ void tscResetSqlCmdObj(SSqlCmd* pCmd, bool removeFromCache) {
pCmd->autoCreated = 0;
for(int32_t i = 0; i < pCmd->numOfTables; ++i) {
if (pCmd->pTableMetaList && pCmd->pTableMetaList[i]) {
taosCacheRelease(tscMetaCache, (void**)&(pCmd->pTableMetaList[i]), false);
if (pCmd->pTableNameList && pCmd->pTableNameList[i]) {
tfree(pCmd->pTableNameList[i]);
}
}
pCmd->numOfTables = 0;
tfree(pCmd->pTableMetaList);
tfree(pCmd->pTableNameList);
pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList);
pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks);
tscFreeQueryInfo(pCmd, removeFromCache);
tscFreeQueryInfo(pCmd);
}
void tscFreeSqlResult(SSqlObj* pSql) {
@ -458,35 +456,15 @@ void tscFreeRegisteredSqlObj(void *pSql) {
SSqlObj* p = *(SSqlObj**)pSql;
STscObj* pTscObj = p->pTscObj;
assert(p->self != 0);
assert(RID_VALID(p->self));
int32_t num = atomic_sub_fetch_32(&pTscObj->numOfObj, 1);
int32_t total = atomic_sub_fetch_32(&tscNumOfObj, 1);
tscDebug("%p free SqlObj, total in tscObj:%d, total:%d", pSql, num, total);
tscFreeSqlObj(p);
taosReleaseRef(tscRefId, pTscObj->rid);
int32_t ref = T_REF_DEC(pTscObj);
assert(ref >= 0);
tscDebug("%p free sqlObj completed, tscObj:%p ref:%d", p, pTscObj, ref);
if (ref == 0) {
tscDebug("%p all sqlObj freed, free tscObj:%p", p, pTscObj);
taosRemoveRef(tscRefId, pTscObj->rid);
}
}
void tscFreeTableMetaHelper(void *pTableMeta) {
STableMeta* p = (STableMeta*) pTableMeta;
int32_t numOfEps = p->vgroupInfo.numOfEps;
assert(numOfEps >= 0 && numOfEps <= TSDB_MAX_REPLICA);
for(int32_t i = 0; i < numOfEps; ++i) {
tfree(p->vgroupInfo.epAddr[i].fqdn);
}
int32_t numOfEps1 = p->corVgroupInfo.numOfEps;
assert(numOfEps1 >= 0 && numOfEps1 <= TSDB_MAX_REPLICA);
for(int32_t i = 0; i < numOfEps1; ++i) {
tfree(p->corVgroupInfo.epAddr[i].fqdn);
}
}
void tscFreeSqlObj(SSqlObj* pSql) {
@ -516,7 +494,7 @@ void tscFreeSqlObj(SSqlObj* pSql) {
pSql->self = 0;
tscFreeSqlResult(pSql);
tscResetSqlCmdObj(pCmd, false);
tscResetSqlCmdObj(pCmd);
tfree(pCmd->tagData.data);
pCmd->tagData.dataLen = 0;
@ -539,7 +517,7 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock) {
// free the refcount for metermeta
if (pDataBlock->pTableMeta != NULL) {
taosCacheRelease(tscMetaCache, (void**)&(pDataBlock->pTableMeta), false);
tfree(pDataBlock->pTableMeta);
}
tfree(pDataBlock);
@ -610,15 +588,15 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
// set the correct table meta object, the table meta has been locked in pDataBlocks, so it must be in the cache
if (pTableMetaInfo->pTableMeta != pDataBlock->pTableMeta) {
tstrncpy(pTableMetaInfo->name, pDataBlock->tableId, sizeof(pTableMetaInfo->name));
tstrncpy(pTableMetaInfo->name, pDataBlock->tableName, sizeof(pTableMetaInfo->name));
if (pTableMetaInfo->pTableMeta != NULL) {
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), false);
tfree(pTableMetaInfo->pTableMeta);
}
pTableMetaInfo->pTableMeta = taosCacheTransfer(tscMetaCache, (void**)&pDataBlock->pTableMeta);
pTableMetaInfo->pTableMeta = tscTableMetaClone(pDataBlock->pTableMeta);
} else {
assert(strncmp(pTableMetaInfo->name, pDataBlock->tableId, tListLen(pDataBlock->tableId)) == 0);
assert(strncmp(pTableMetaInfo->name, pDataBlock->tableName, tListLen(pDataBlock->tableName)) == 0);
}
/*
@ -681,14 +659,10 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
dataBuf->size = startOffset;
dataBuf->tsSource = -1;
tstrncpy(dataBuf->tableId, name, sizeof(dataBuf->tableId));
tstrncpy(dataBuf->tableName, name, sizeof(dataBuf->tableName));
/*
* The table meta may be released since the table meta cache are completed clean by other thread
* due to operation such as drop database. So here we add the reference count directly instead of invoke
* taosGetDataFromCache, which may return NULL value.
*/
dataBuf->pTableMeta = taosCacheAcquireByData(tscMetaCache, pTableMeta);
//Here we keep the tableMeta to avoid it to be remove by other threads.
dataBuf->pTableMeta = tscTableMetaClone(pTableMeta);
assert(initialSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL);
*dataBlocks = dataBuf;
@ -794,15 +768,15 @@ static int32_t getRowExpandSize(STableMeta* pTableMeta) {
return result;
}
static void extractTableMeta(SSqlCmd* pCmd) {
static void extractTableNameList(SSqlCmd* pCmd) {
pCmd->numOfTables = (int32_t) taosHashGetSize(pCmd->pTableBlockHashList);
pCmd->pTableMetaList = calloc(pCmd->numOfTables, POINTER_BYTES);
pCmd->pTableNameList = calloc(pCmd->numOfTables, POINTER_BYTES);
STableDataBlocks **p1 = taosHashIterate(pCmd->pTableBlockHashList, NULL);
int32_t i = 0;
while(p1) {
STableDataBlocks* pBlocks = *p1;
pCmd->pTableMetaList[i++] = taosCacheTransfer(tscMetaCache, (void**) &pBlocks->pTableMeta);
pCmd->pTableNameList[i++] = strndup(pBlocks->tableName, TSDB_TABLE_FNAME_LEN);
p1 = taosHashIterate(pCmd->pTableBlockHashList, p1);
}
@ -810,6 +784,7 @@ static void extractTableMeta(SSqlCmd* pCmd) {
}
int32_t tscMergeTableDataBlocks(SSqlObj* pSql) {
const int INSERT_HEAD_SIZE = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
SSqlCmd* pCmd = &pSql->cmd;
void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false);
@ -824,7 +799,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql) {
STableDataBlocks* dataBuf = NULL;
int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE,
tsInsertHeadSize, 0, pOneTableBlock->tableId, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
INSERT_HEAD_SIZE, 0, pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList);
if (ret != TSDB_CODE_SUCCESS) {
tscError("%p failed to prepare the data block buffer for merging table data, code:%d", pSql, ret);
taosHashCleanup(pVnodeDataBlockHashList);
@ -858,7 +833,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql) {
tscSortRemoveDataBlockDupRows(pOneTableBlock);
char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1);
tscDebug("%p tableId:%s, sid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, pOneTableBlock->tableId,
tscDebug("%p name:%s, sid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, pOneTableBlock->tableName,
pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
@ -888,7 +863,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql) {
pOneTableBlock = *p;
}
extractTableMeta(pCmd);
extractTableNameList(pCmd);
// free the table data blocks;
pCmd->pDataBlocks = pVnodeDataBlockList;
@ -909,6 +884,7 @@ void tscCloseTscObj(void *param) {
rpcClose(pObj->pDnodeConn);
pObj->pDnodeConn = NULL;
}
tfree(pObj->tscCorMgmtEpSet);
pthread_mutex_destroy(&pObj->mutex);
@ -1537,6 +1513,7 @@ int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) {
}
dest->tbnameCond.uid = src->tbnameCond.uid;
dest->tbnameCond.len = src->tbnameCond.len;
memcpy(&dest->joinInfo, &src->joinInfo, sizeof(SJoinInfo));
dest->relType = src->relType;
@ -1832,14 +1809,12 @@ SArray* tscVgroupTableInfoClone(SArray* pVgroupTables) {
return pa;
}
void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, const char* address, bool removeFromCache) {
tscDebug("%p unref %d tables in the tableMeta cache", address, pQueryInfo->numOfTables);
void clearAllTableMetaInfo(SQueryInfo* pQueryInfo) {
for(int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, i);
tscFreeVgroupTableInfo(pTableMetaInfo->pVgroupTables);
tscClearTableMetaInfo(pTableMetaInfo, removeFromCache);
tscClearTableMetaInfo(pTableMetaInfo);
free(pTableMetaInfo);
}
@ -1893,14 +1868,12 @@ STableMetaInfo* tscAddEmptyMetaInfo(SQueryInfo* pQueryInfo) {
return tscAddTableMetaInfo(pQueryInfo, NULL, NULL, NULL, NULL, NULL);
}
void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache) {
void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo) {
if (pTableMetaInfo == NULL) {
return;
}
if (pTableMetaInfo->pTableMeta != NULL) {
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), removeFromCache);
}
tfree(pTableMetaInfo->pTableMeta);
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
tscColumnListDestroy(pTableMetaInfo->tagColList);
@ -1917,10 +1890,12 @@ void tscResetForNextRetrieve(SSqlRes* pRes) {
}
void registerSqlObj(SSqlObj* pSql) {
int32_t ref = T_REF_INC(pSql->pTscObj);
tscDebug("%p add to tscObj:%p, ref:%d", pSql, pSql->pTscObj, ref);
taosAcquireRef(tscRefId, pSql->pTscObj->rid);
pSql->self = taosAddRef(tscObjRef, pSql);
int32_t num = atomic_add_fetch_32(&pSql->pTscObj->numOfObj, 1);
int32_t total = atomic_add_fetch_32(&tscNumOfObj, 1);
tscDebug("%p new SqlObj from %p, total in tscObj:%d, total:%d", pSql, pSql->pTscObj, num, total);
}
SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cmd) {
@ -1950,30 +1925,24 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm
return NULL;
}
pNew->fp = fp;
pNew->fp = fp;
pNew->fetchFp = fp;
pNew->param = param;
pNew->param = param;
pNew->sqlstr = NULL;
pNew->maxRetry = TSDB_MAX_REPLICA;
pNew->sqlstr = strdup(pSql->sqlstr);
if (pNew->sqlstr == NULL) {
tscError("%p new subquery failed", pSql);
tscFreeSqlObj(pNew);
return NULL;
}
SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, 0);
assert(pSql->cmd.clauseIndex == 0);
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0);
tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL);
registerSqlObj(pNew);
return pNew;
}
static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pQueryInfo, SQueryInfo* pNewQueryInfo, int64_t uid) {
static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pNewQueryInfo, int64_t uid) {
int32_t numOfOutput = (int32_t)tscSqlExprNumOfExprs(pNewQueryInfo);
if (numOfOutput == 0) {
return;
@ -2026,15 +1995,9 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, tableIndex);
pNew->pTscObj = pSql->pTscObj;
pNew->pTscObj = pSql->pTscObj;
pNew->signature = pNew;
pNew->sqlstr = strdup(pSql->sqlstr);
if (pNew->sqlstr == NULL) {
tscError("%p new subquery failed, tableIndex:%d, vgroupIndex:%d", pSql, tableIndex, pTableMetaInfo->vgroupIndex);
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto _error;
}
pNew->sqlstr = strdup(pSql->sqlstr);
SSqlCmd* pnCmd = &pNew->cmd;
memcpy(pnCmd, pCmd, sizeof(SSqlCmd));
@ -2050,7 +2013,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
pnCmd->numOfTables = 0;
pnCmd->parseFinished = 1;
pnCmd->pTableMetaList = NULL;
pnCmd->pTableNameList = NULL;
pnCmd->pTableBlockHashList = NULL;
if (tscAddSubqueryInfo(pnCmd) != TSDB_CODE_SUCCESS) {
@ -2122,35 +2085,34 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
goto _error;
}
doSetSqlExprAndResultFieldInfo(pQueryInfo, pNewQueryInfo, uid);
doSetSqlExprAndResultFieldInfo(pNewQueryInfo, uid);
pNew->fp = fp;
pNew->fp = fp;
pNew->fetchFp = fp;
pNew->param = param;
pNew->param = param;
pNew->maxRetry = TSDB_MAX_REPLICA;
char* name = pTableMetaInfo->name;
STableMetaInfo* pFinalInfo = NULL;
if (pPrevSql == NULL) {
STableMeta* pTableMeta = taosCacheAcquireByData(tscMetaCache, pTableMetaInfo->pTableMeta); // get by name may failed due to the cache cleanup
STableMeta* pTableMeta = tscTableMetaClone(pTableMetaInfo->pTableMeta);
assert(pTableMeta != NULL);
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList,
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables);
} else { // transfer the ownership of pTableMeta to the newly create sql object.
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
STableMeta* pPrevTableMeta = taosCacheTransfer(tscMetaCache, (void**)&pPrevInfo->pTableMeta);
STableMeta* pPrevTableMeta = tscTableMetaClone(pPrevInfo->pTableMeta);
SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList;
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList,
pTableMetaInfo->pVgroupTables);
}
// this case cannot be happened
if (pFinalInfo->pTableMeta == NULL) {
tscError("%p new subquery failed since no tableMeta in cache, name:%s", pSql, name);
tscError("%p new subquery failed since no tableMeta, name:%s", pSql, name);
if (pPrevSql != NULL) { // pass the previous error to client
assert(pPrevSql->res.code != TSDB_CODE_SUCCESS);
@ -2523,7 +2485,7 @@ bool tscSetSqlOwner(SSqlObj* pSql) {
SSqlRes* pRes = &pSql->res;
// set the sql object owner
uint64_t threadId = taosGetPthreadId();
uint64_t threadId = taosGetSelfPthreadId();
if (atomic_val_compare_exchange_64(&pSql->owner, 0, threadId) != 0) {
pRes->code = TSDB_CODE_QRY_IN_EXEC;
return false;
@ -2577,6 +2539,7 @@ void* tscVgroupInfoClear(SVgroupsInfo *vgroupList) {
for(int32_t j = 0; j < pVgroupInfo->numOfEps; ++j) {
tfree(pVgroupInfo->epAddr[j].fqdn);
}
for(int32_t j = pVgroupInfo->numOfEps; j < TSDB_MAX_REPLICA; j++) {
assert( pVgroupInfo->epAddr[j].fqdn == NULL );
}
@ -2629,4 +2592,88 @@ int32_t copyTagData(STagData* dst, const STagData* src) {
}
return 0;
}
}
STableMeta* createSuperTableMeta(STableMetaMsg* pChild) {
assert(pChild != NULL);
int32_t total = pChild->numOfColumns + pChild->numOfTags;
STableMeta* pTableMeta = calloc(1, sizeof(STableMeta) + sizeof(SSchema) * total);
pTableMeta->tableType = TSDB_SUPER_TABLE;
pTableMeta->tableInfo.numOfTags = pChild->numOfTags;
pTableMeta->tableInfo.numOfColumns = pChild->numOfColumns;
pTableMeta->tableInfo.precision = pChild->precision;
pTableMeta->id.tid = 0;
pTableMeta->id.uid = pChild->suid;
pTableMeta->tversion = pChild->tversion;
pTableMeta->sversion = pChild->sversion;
memcpy(pTableMeta->schema, pChild->schema, sizeof(SSchema) * total);
int32_t num = pTableMeta->tableInfo.numOfColumns;
for(int32_t i = 0; i < num; ++i) {
pTableMeta->tableInfo.rowSize += pTableMeta->schema[i].bytes;
}
return pTableMeta;
}
uint32_t tscGetTableMetaSize(STableMeta* pTableMeta) {
assert(pTableMeta != NULL);
int32_t totalCols = pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags;
return sizeof(STableMeta) + totalCols * sizeof(SSchema);
}
CChildTableMeta* tscCreateChildMeta(STableMeta* pTableMeta) {
assert(pTableMeta != NULL);
CChildTableMeta* cMeta = calloc(1, sizeof(CChildTableMeta));
cMeta->tableType = TSDB_CHILD_TABLE;
cMeta->vgId = pTableMeta->vgId;
cMeta->id = pTableMeta->id;
tstrncpy(cMeta->sTableName, pTableMeta->sTableName, TSDB_TABLE_FNAME_LEN);
return cMeta;
}
int32_t tscCreateTableMetaFromCChildMeta(STableMeta* pChild, const char* name) {
assert(pChild != NULL);
uint32_t size = tscGetTableMetaMaxSize();
STableMeta* p = calloc(1, size);
taosHashGetClone(tscTableMetaInfo, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, p, -1);
if (p->id.uid > 0) { // tableMeta exists, build child table meta and return
pChild->sversion = p->sversion;
pChild->tversion = p->tversion;
memcpy(&pChild->tableInfo, &p->tableInfo, sizeof(STableInfo));
int32_t total = pChild->tableInfo.numOfColumns + pChild->tableInfo.numOfTags;
memcpy(pChild->schema, p->schema, sizeof(SSchema) *total);
tfree(p);
return TSDB_CODE_SUCCESS;
} else { // super table has been removed, current tableMeta is also expired. remove it here
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
tfree(p);
return -1;
}
}
uint32_t tscGetTableMetaMaxSize() {
return sizeof(STableMeta) + TSDB_MAX_COLUMNS * sizeof(SSchema);
}
STableMeta* tscTableMetaClone(STableMeta* pTableMeta) {
assert(pTableMeta != NULL);
uint32_t size = tscGetTableMetaSize(pTableMeta);
STableMeta* p = calloc(1, size);
memcpy(p, pTableMeta, size);
return p;
}

View File

@ -32,8 +32,8 @@ extern uint16_t tsSyncPort;
extern uint16_t tsArbitratorPort;
extern int32_t tsStatusInterval;
extern int32_t tsNumOfMnodes;
extern int32_t tsEnableVnodeBak;
extern int32_t tsEnableTelemetryReporting;
extern int8_t tsEnableVnodeBak;
extern int8_t tsEnableTelemetryReporting;
extern char tsEmail[];
extern char tsArbitrator[];
@ -51,7 +51,7 @@ extern int8_t tsDaylight;
extern char tsTimezone[];
extern char tsLocale[];
extern char tsCharset[]; // default encode string
extern int32_t tsEnableCoreFile;
extern int8_t tsEnableCoreFile;
extern int32_t tsCompressMsgSize;
extern char tsTempDir[];
@ -59,12 +59,12 @@ extern char tsTempDir[];
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer for each data node during query processing
extern int32_t tsRetrieveBlockingModel;// retrieve threads will be blocked
extern int32_t tsKeepOriginalColumnName;
extern int8_t tsKeepOriginalColumnName;
// client
extern int32_t tsTableMetaKeepTimer;
extern int32_t tsMaxSQLStringLen;
extern int32_t tsTscEnableRecordSql;
extern int8_t tsTscEnableRecordSql;
extern int32_t tsMaxNumOfOrderedResults;
extern int32_t tsMinSlidingTime;
extern int32_t tsMinIntervalTime;
@ -93,48 +93,51 @@ extern int16_t tsWAL;
extern int32_t tsFsyncPeriod;
extern int32_t tsReplications;
extern int32_t tsQuorum;
extern int32_t tsUpdate;
extern int8_t tsUpdate;
extern int8_t tsCacheLastRow;
// balance
extern int32_t tsEnableBalance;
extern int32_t tsAlternativeRole;
extern int8_t tsEnableBalance;
extern int8_t tsAlternativeRole;
extern int32_t tsBalanceInterval;
extern int32_t tsOfflineThreshold;
extern int32_t tsMnodeEqualVnodeNum;
extern int32_t tsFlowCtrl;
extern int8_t tsEnableFlowCtrl;
extern int8_t tsEnableSlaveQuery;
extern int8_t tsEnableAdjustMaster;
// restful
extern int32_t tsEnableHttpModule;
extern int8_t tsEnableHttpModule;
extern int32_t tsRestRowLimit;
extern uint16_t tsHttpPort;
extern int32_t tsHttpCacheSessions;
extern int32_t tsHttpSessionExpire;
extern int32_t tsHttpMaxThreads;
extern int32_t tsHttpEnableCompress;
extern int32_t tsHttpEnableRecordSql;
extern int32_t tsTelegrafUseFieldNum;
extern int8_t tsHttpEnableCompress;
extern int8_t tsHttpEnableRecordSql;
extern int8_t tsTelegrafUseFieldNum;
// mqtt
extern int32_t tsEnableMqttModule;
extern char tsMqttHostName[];
extern char tsMqttPort[];
extern char tsMqttUser[];
extern char tsMqttPass[];
extern char tsMqttClientId[];
extern char tsMqttTopic[];
extern int8_t tsEnableMqttModule;
extern char tsMqttHostName[];
extern char tsMqttPort[];
extern char tsMqttUser[];
extern char tsMqttPass[];
extern char tsMqttClientId[];
extern char tsMqttTopic[];
// monitor
extern int32_t tsEnableMonitorModule;
extern int8_t tsEnableMonitorModule;
extern char tsMonitorDbName[];
extern char tsInternalPass[];
extern int32_t tsMonitorInterval;
// stream
extern int32_t tsEnableStream;
extern int8_t tsEnableStream;
// internal
extern int32_t tsPrintAuth;
extern uint32_t tscEmbedded;
extern int8_t tsPrintAuth;
extern int8_t tscEmbedded;
extern char configDir[];
extern char tsVnodeDir[];
extern char tsDnodeDir[];
@ -161,7 +164,7 @@ extern float tsMinimalLogDirGB;
extern float tsReservedTmpDirectorySpace;
extern float tsMinimalDataDirGB;
extern int32_t tsTotalMemoryMB;
extern int32_t tsVersion;
extern uint32_t tsVersion;
// build info
extern char version[];
@ -171,13 +174,13 @@ extern char gitinfoOfInternal[];
extern char buildinfo[];
// log
extern int32_t tsAsyncLog;
extern int8_t tsAsyncLog;
extern int32_t tsNumOfLogLines;
extern int32_t tsLogKeepDays;
extern int32_t dDebugFlag;
extern int32_t vDebugFlag;
extern int32_t mDebugFlag;
extern uint32_t cDebugFlag;
extern int32_t cDebugFlag;
extern int32_t jniDebugFlag;
extern int32_t tmrDebugFlag;
extern int32_t sdbDebugFlag;
@ -187,7 +190,7 @@ extern int32_t monDebugFlag;
extern int32_t uDebugFlag;
extern int32_t rpcDebugFlag;
extern int32_t odbcDebugFlag;
extern uint32_t qDebugFlag;
extern int32_t qDebugFlag;
extern int32_t wDebugFlag;
extern int32_t cqDebugFlag;
extern int32_t debugFlag;

View File

@ -23,7 +23,7 @@ extern "C" {
#include "tlog.h"
extern int32_t uDebugFlag;
extern uint32_t tscEmbedded;
extern int8_t tscEmbedded;
#define uFatal(...) { if (uDebugFlag & DEBUG_FATAL) { taosPrintLog("UTL FATAL", tscEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
#define uError(...) { if (uDebugFlag & DEBUG_ERROR) { taosPrintLog("UTL ERROR ", tscEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}

View File

@ -39,8 +39,8 @@ uint16_t tsSyncPort = 6040;
uint16_t tsArbitratorPort = 6042;
int32_t tsStatusInterval = 1; // second
int32_t tsNumOfMnodes = 3;
int32_t tsEnableVnodeBak = 1;
int32_t tsEnableTelemetryReporting = 1;
int8_t tsEnableVnodeBak = 1;
int8_t tsEnableTelemetryReporting = 1;
char tsEmail[TSDB_FQDN_LEN] = {0};
// common
@ -56,7 +56,7 @@ int8_t tsDaylight = 0;
char tsTimezone[TSDB_TIMEZONE_LEN] = {0};
char tsLocale[TSDB_LOCALE_LEN] = {0};
char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string
int32_t tsEnableCoreFile = 0;
int8_t tsEnableCoreFile = 0;
int32_t tsMaxBinaryDisplayWidth = 30;
char tsTempDir[TSDB_FILENAME_LEN] = "/tmp/";
@ -73,7 +73,7 @@ int32_t tsCompressMsgSize = -1;
// client
int32_t tsTableMetaKeepTimer = 7200; // second
int32_t tsMaxSQLStringLen = TSDB_MAX_SQL_LEN;
int32_t tsTscEnableRecordSql = 0;
int8_t tsTscEnableRecordSql = 0;
// the maximum number of results for projection query on super table that are returned from
// one virtual node, to order according to timestamp
@ -110,7 +110,7 @@ int32_t tsQueryBufferSize = -1;
int32_t tsRetrieveBlockingModel = 0;
// last_row(*), first(*), last_row(ts, col1, col2) query, the result fields will be the original column name
int32_t tsKeepOriginalColumnName = 0;
int8_t tsKeepOriginalColumnName = 0;
// db parameters
int32_t tsCacheBlockSize = TSDB_DEFAULT_CACHE_BLOCK_SIZE;
@ -126,33 +126,36 @@ int16_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD;
int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION;
int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION;
int32_t tsUpdate = TSDB_DEFAULT_DB_UPDATE_OPTION;
int8_t tsUpdate = TSDB_DEFAULT_DB_UPDATE_OPTION;
int8_t tsCacheLastRow = TSDB_DEFAULT_CACHE_BLOCK_SIZE;
int32_t tsMaxVgroupsPerDb = 0;
int32_t tsMinTablePerVnode = TSDB_TABLES_STEP;
int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES;
int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
// balance
int32_t tsEnableBalance = 1;
int32_t tsAlternativeRole = 0;
int32_t tsBalanceInterval = 300; // seconds
int32_t tsOfflineThreshold = 86400*100; // seconds 10days
int8_t tsEnableBalance = 1;
int8_t tsAlternativeRole = 0;
int32_t tsBalanceInterval = 300; // seconds
int32_t tsOfflineThreshold = 86400 * 100; // seconds 10days
int32_t tsMnodeEqualVnodeNum = 4;
int32_t tsFlowCtrl = 1;
int8_t tsEnableFlowCtrl = 1;
int8_t tsEnableSlaveQuery = 1;
int8_t tsEnableAdjustMaster = 1;
// restful
int32_t tsEnableHttpModule = 1;
int8_t tsEnableHttpModule = 1;
int32_t tsRestRowLimit = 10240;
uint16_t tsHttpPort = 6041; // only tcp, range tcp[6041]
int32_t tsHttpCacheSessions = 1000;
int32_t tsHttpSessionExpire = 36000;
int32_t tsHttpMaxThreads = 2;
int32_t tsHttpEnableCompress = 1;
int32_t tsHttpEnableRecordSql = 0;
int32_t tsTelegrafUseFieldNum = 0;
int8_t tsHttpEnableCompress = 1;
int8_t tsHttpEnableRecordSql = 0;
int8_t tsTelegrafUseFieldNum = 0;
// mqtt
int32_t tsEnableMqttModule = 0; // not finished yet, not started it by default
int8_t tsEnableMqttModule = 0; // not finished yet, not started it by default
char tsMqttHostName[TSDB_MQTT_HOSTNAME_LEN] = "test.mosquitto.org";
char tsMqttPort[TSDB_MQTT_PORT_LEN] = "1883";
char tsMqttUser[TSDB_MQTT_USER_LEN] = {0};
@ -161,24 +164,24 @@ char tsMqttClientId[TSDB_MQTT_CLIENT_ID_LEN] = "TDengineMqttSubscriber";
char tsMqttTopic[TSDB_MQTT_TOPIC_LEN] = "/test"; // #
// monitor
int32_t tsEnableMonitorModule = 1;
int8_t tsEnableMonitorModule = 1;
char tsMonitorDbName[TSDB_DB_NAME_LEN] = "log";
char tsInternalPass[] = "secretkey";
int32_t tsMonitorInterval = 30; // seconds
// stream
int32_t tsEnableStream = 1;
int8_t tsEnableStream = 1;
// internal
int32_t tsPrintAuth = 0;
uint32_t tscEmbedded = 0;
char configDir[TSDB_FILENAME_LEN] = {0};
char tsVnodeDir[TSDB_FILENAME_LEN] = {0};
char tsDnodeDir[TSDB_FILENAME_LEN] = {0};
char tsMnodeDir[TSDB_FILENAME_LEN] = {0};
char tsDataDir[TSDB_FILENAME_LEN] = {0};
char tsScriptDir[TSDB_FILENAME_LEN] = {0};
char tsVnodeBakDir[TSDB_FILENAME_LEN] = {0};
int8_t tsPrintAuth = 0;
int8_t tscEmbedded = 0;
char configDir[TSDB_FILENAME_LEN] = {0};
char tsVnodeDir[TSDB_FILENAME_LEN] = {0};
char tsDnodeDir[TSDB_FILENAME_LEN] = {0};
char tsMnodeDir[TSDB_FILENAME_LEN] = {0};
char tsDataDir[TSDB_FILENAME_LEN] = {0};
char tsScriptDir[TSDB_FILENAME_LEN] = {0};
char tsVnodeBakDir[TSDB_FILENAME_LEN] = {0};
/*
* minimum scale for whole system, millisecond by default
@ -198,10 +201,10 @@ float tsTotalTmpDirGB = 0;
float tsTotalDataDirGB = 0;
float tsAvailTmpDirectorySpace = 0;
float tsAvailDataDirGB = 0;
float tsReservedTmpDirectorySpace = 0.1f;
float tsMinimalDataDirGB = 0.5f;
float tsReservedTmpDirectorySpace = 1.0f;
float tsMinimalDataDirGB = 1.0f;
int32_t tsTotalMemoryMB = 0;
int32_t tsVersion = 0;
uint32_t tsVersion = 0;
// log
int32_t tsNumOfLogLines = 10000000;
@ -209,13 +212,13 @@ int32_t mDebugFlag = 131;
int32_t sdbDebugFlag = 131;
int32_t dDebugFlag = 135;
int32_t vDebugFlag = 135;
uint32_t cDebugFlag = 131;
int32_t cDebugFlag = 131;
int32_t jniDebugFlag = 131;
int32_t odbcDebugFlag = 131;
int32_t httpDebugFlag = 131;
int32_t mqttDebugFlag = 131;
int32_t monDebugFlag = 131;
uint32_t qDebugFlag = 131;
int32_t qDebugFlag = 131;
int32_t rpcDebugFlag = 131;
int32_t uDebugFlag = 131;
int32_t debugFlag = 0;
@ -275,12 +278,16 @@ bool taosCfgDynamicOptions(char *msg) {
for (int32_t i = 0; i < tsGlobalConfigNum; ++i) {
SGlobalCfg *cfg = tsGlobalConfig + i;
//if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue;
if (cfg->valType != TAOS_CFG_VTYPE_INT32) continue;
if (cfg->valType != TAOS_CFG_VTYPE_INT32 && cfg->valType != TAOS_CFG_VTYPE_INT8) continue;
int32_t cfgLen = (int32_t)strlen(cfg->option);
if (cfgLen != olen) continue;
if (strncasecmp(option, cfg->option, olen) != 0) continue;
*((int32_t *)cfg->ptr) = vint;
if (cfg->valType != TAOS_CFG_VTYPE_INT32) {
*((int32_t *)cfg->ptr) = vint;
} else {
*((int8_t *)cfg->ptr) = (int8_t)vint;
}
if (strncasecmp(cfg->option, "monitor", olen) == 0) {
if (1 == vint) {
@ -468,7 +475,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "vnodeBak";
cfg.ptr = &tsEnableVnodeBak;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 1;
@ -478,7 +485,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "telemetryReporting";
cfg.ptr = &tsEnableTelemetryReporting;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 1;
@ -488,7 +495,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "balance";
cfg.ptr = &tsEnableBalance;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 1;
@ -509,7 +516,7 @@ static void doInitGlobalConfig(void) {
// 0-any; 1-mnode; 2-vnode
cfg.option = "role";
cfg.ptr = &tsAlternativeRole;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 0;
cfg.maxValue = 2;
@ -542,7 +549,7 @@ static void doInitGlobalConfig(void) {
cfg.ptr = &tsOfflineThreshold;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 5;
cfg.minValue = 3;
cfg.maxValue = 7200000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_SECOND;
@ -811,7 +818,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "update";
cfg.ptr = &tsUpdate;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_DB_UPDATE;
cfg.maxValue = TSDB_MAX_DB_UPDATE;
@ -901,7 +908,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "keepColumnName";
cfg.ptr = &tsKeepOriginalColumnName;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 0;
cfg.maxValue = 1;
@ -1004,8 +1011,28 @@ static void doInitGlobalConfig(void) {
// module configs
cfg.option = "flowctrl";
cfg.ptr = &tsFlowCtrl;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.ptr = &tsEnableFlowCtrl;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 1;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "slaveQuery";
cfg.ptr = &tsEnableSlaveQuery;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 1;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "adjustMaster";
cfg.ptr = &tsEnableAdjustMaster;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 1;
@ -1015,7 +1042,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "http";
cfg.ptr = &tsEnableHttpModule;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 1;
@ -1025,7 +1052,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "mqtt";
cfg.ptr = &tsEnableMqttModule;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 1;
@ -1035,7 +1062,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "monitor";
cfg.ptr = &tsEnableMonitorModule;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 1;
@ -1045,7 +1072,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "stream";
cfg.ptr = &tsEnableStream;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 1;
@ -1055,7 +1082,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "httpEnableRecordSql";
cfg.ptr = &tsHttpEnableRecordSql;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 0;
cfg.maxValue = 1;
@ -1065,7 +1092,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "telegrafUseFieldNum";
cfg.ptr = &tsTelegrafUseFieldNum;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 1;
@ -1116,7 +1143,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "asyncLog";
cfg.ptr = &tsAsyncLog;
cfg.valType = TAOS_CFG_VTYPE_INT16;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 0;
cfg.maxValue = 1;
@ -1317,7 +1344,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "enableRecordSql";
cfg.ptr = &tsTscEnableRecordSql;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 0;
cfg.maxValue = 1;
@ -1327,7 +1354,7 @@ static void doInitGlobalConfig(void) {
cfg.option = "enableCoreFile";
cfg.ptr = &tsEnableCoreFile;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 0;
cfg.maxValue = 1;
@ -1443,17 +1470,28 @@ int32_t taosCheckGlobalCfg() {
tsNumOfCores = 1;
}
if (tsMaxTablePerVnode < tsMinTablePerVnode) {
uError("maxTablesPerVnode(%d) < minTablesPerVnode(%d), reset to minTablesPerVnode(%d)",
tsMaxTablePerVnode, tsMinTablePerVnode, tsMinTablePerVnode);
tsMaxTablePerVnode = tsMinTablePerVnode;
}
// todo refactor
tsVersion = 0;
for (int i = 0; i < 10; i++) {
for (int ver = 0, i = 0; i < TSDB_VERSION_LEN; ++i) {
if (version[i] >= '0' && version[i] <= '9') {
tsVersion = tsVersion * 10 + (version[i] - '0');
ver = ver * 10 + (version[i] - '0');
} else if (version[i] == '.') {
tsVersion |= ver & 0xFF;
tsVersion <<= 8;
ver = 0;
} else if (version[i] == 0) {
tsVersion |= ver & 0xFF;
break;
}
}
tsVersion = 10 * tsVersion;
tsDnodeShellPort = tsServerPort + TSDB_PORT_DNODESHELL; // udp[6035-6039] tcp[6035]
tsDnodeDnodePort = tsServerPort + TSDB_PORT_DNODEDNODE; // udp/tcp

View File

@ -56,6 +56,12 @@
<scope>test</scope>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.47</version>
</dependency>
<!-- for restful -->
<dependency>
<groupId>org.apache.httpcomponents</groupId>
@ -80,6 +86,7 @@
</dependency>
</dependencies>
<build>
<plugins>
<plugin>

View File

@ -17,7 +17,7 @@ public class RestfulConnection implements Connection {
private final String host;
private final int port;
private final Properties props;
private final String database;
private volatile String database;
private final String url;
/******************************************************/
private boolean isClosed;
@ -139,7 +139,9 @@ public class RestfulConnection implements Connection {
public void setCatalog(String catalog) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
//nothing to do
synchronized (RestfulConnection.class) {
this.database = catalog;
}
}
@Override
@ -409,7 +411,9 @@ public class RestfulConnection implements Connection {
public void setSchema(String schema) throws SQLException {
if (isClosed())
throw new SQLException(CONNECTION_IS_CLOSED);
//nothing to do
synchronized (RestfulConnection.class) {
this.database = schema;
}
}
@Override

View File

@ -109,7 +109,7 @@ public class RestfulStatement implements Statement {
throw new SQLException("Database not specified or available");
final String url = "http://" + conn.getHost().trim() + ":" + conn.getPort() + "/rest/sql";
HttpClientPoolUtil.execute(url, "use " + conn.getDatabase());
// HttpClientPoolUtil.execute(url, "use " + conn.getDatabase());
String result = HttpClientPoolUtil.execute(url, sql);
JSONObject jsonObject = JSON.parseObject(result);
if (jsonObject.getString("status").equals("error")) {
@ -215,6 +215,7 @@ public class RestfulStatement implements Statement {
//如果执行了use操作应该将当前Statement的catalog设置为新的database
if (SqlSyntaxValidator.isUseSql(sql)) {
this.database = sql.trim().replace("use", "").trim();
this.conn.setCatalog(this.database);
}
if (this.database == null)
throw new SQLException("Database not specified or available");

View File

@ -96,6 +96,7 @@ public class HttpClientPoolUtil {
initPools();
}
method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0);
method.setHeader("Authorization", "Basic cm9vdDp0YW9zZGF0YQ==");
method.setHeader("Content-Type", "text/plain");
method.setEntity(new StringEntity(data, Charset.forName("UTF-8")));
HttpContext context = HttpClientContext.create();

View File

@ -144,18 +144,9 @@ function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro=false) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
let currOffset = 0;
// every 4 bytes, a character is encoded;
while (currOffset < data.length) {
let dataEntry = data.slice(currOffset, currOffset + nbytes); //one entry in a row under a column;
if (dataEntry.readInt64LE(0) == FieldTypes.C_NCHAR_NULL) {
res.push(null);
}
else {
res.push(dataEntry.toString("utf16le").replace(/\u0000/g, ""));
}
currOffset += nbytes;
}
let dataEntry = data.slice(0, nbytes); //one entry in a row under a column;
//TODO: should use the correct character encoding
res.push(dataEntry.toString("utf-8"));
return res;
}
@ -349,11 +340,13 @@ CTaosInterface.prototype.useResult = function useResult(result) {
return fields;
}
CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
let pblock = ref.ref(ref.ref(ref.NULL)); // equal to our raw data
let num_of_rows = this.libtaos.taos_fetch_block(result, pblock)
if (num_of_rows == 0) {
//let pblock = ref.ref(ref.ref(ref.NULL)); // equal to our raw data
let pblock = this.libtaos.taos_fetch_row(result);
let num_of_rows = 1;
if (ref.isNull(pblock) == true) {
return {block:null, num_of_rows:0};
}
var fieldL = this.libtaos.taos_fetch_lengths(result);
let isMicro = (this.libtaos.taos_result_precision(result) == FieldTypes.C_TIMESTAMP_MICRO);
@ -361,26 +354,28 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
var fieldlens = [];
if (ref.isNull(fieldL) == false) {
for (let i = 0; i < fields.length; i ++) {
let plen = ref.reinterpret(fieldL, 4, i*4);
let plen = ref.reinterpret(fieldL, 4, i*4);
let len = plen.readInt32LE(0);
fieldlens.push(len);
fieldlens.push(len);
}
}
let blocks = new Array(fields.length);
blocks.fill(null);
num_of_rows = Math.abs(num_of_rows);
//num_of_rows = Math.abs(num_of_rows);
let offset = 0;
pblock = pblock.deref();
for (let i = 0; i < fields.length; i++) {
pdata = ref.reinterpret(pblock,8,i*8);
pdata = ref.ref(pdata.readPointer());
if (!convertFunctions[fields[i]['type']] ) {
throw new errors.DatabaseError("Invalid data type returned from database");
}
blocks[i] = convertFunctions[fields[i]['type']](pdata, 1, fieldlens[i], offset, isMicro);
if(ref.isNull(pdata.readPointer())){
blocks[i] = new Array();
}else{
pdata = ref.ref(pdata.readPointer());
if (!convertFunctions[fields[i]['type']] ) {
throw new errors.DatabaseError("Invalid data type returned from database");
}
blocks[i] = convertFunctions[fields[i]['type']](pdata, 1, fieldlens[i], offset, isMicro);
}
}
return {blocks: blocks, num_of_rows:Math.abs(num_of_rows)}
}
@ -446,14 +441,18 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback,
}
if (numOfRows2 > 0){
for (let i = 0; i < fields.length; i++) {
if (!convertFunctions[fields[i]['type']] ) {
throw new errors.DatabaseError("Invalid data type returned from database");
}
let prow = ref.reinterpret(row,8,i*8);
prow = prow.readPointer();
prow = ref.ref(prow);
blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, isMicro);
//offset += fields[i]['bytes'] * numOfRows2;
if(ref.isNull(pdata.readPointer())){
blocks[i] = new Array();
}else{
if (!convertFunctions[fields[i]['type']] ) {
throw new errors.DatabaseError("Invalid data type returned from database");
}
let prow = ref.reinterpret(row,8,i*8);
prow = prow.readPointer();
prow = ref.ref(prow);
blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, isMicro);
//offset += fields[i]['bytes'] * numOfRows2;
}
}
}
callback(param2, result2, numOfRows2, blocks);

View File

@ -25,6 +25,7 @@ function TaosResult(data, fields) {
* @function pretty
* @since 1.0.6
*/
TaosResult.prototype.pretty = function pretty() {
let fieldsStr = "";
let sizing = [];
@ -46,8 +47,7 @@ TaosResult.prototype.pretty = function pretty() {
row.data.forEach((entry, i) => {
if (this.fields[i]._field.type == 9) {
entry = entry.toTaosString();
}
else {
} else {
entry = entry == null ? 'null' : entry.toString();
}
rowStr += entry

View File

@ -1,6 +1,6 @@
{
"name": "td2.0-connector",
"version": "2.0.1",
"version": "2.0.4",
"description": "A Node.js connector for TDengine.",
"main": "tdengine.js",
"scripts": {

View File

@ -48,6 +48,7 @@ for (let i = 0; i < 10000; i++) {
// Select
console.log('select * from td_connector_test.all_types limit 3 offset 100;');
c1.execute('select * from td_connector_test.all_types limit 2 offset 100;');
var d = c1.fetchall();
console.log(c1.fields);
console.log(d);
@ -77,13 +78,33 @@ c1.query('select stddev(_double), stddev(_bigint), stddev(_float) from all_types
})
// Binding arguments, and then using promise
var q = c1.query('select * from td_connector_test.all_types where ts >= ? and _int > ? limit 100 offset 40;').bind(new Date(1231), 100)
var q = c1.query('select _nchar from td_connector_test.all_types where ts >= ? and _int > ? limit 100 offset 40;').bind(new Date(1231), 100)
console.log(q.query);
q.execute().then(function(r) {
r.pretty();
});
// test query null value
c1.execute("create table if not exists td_connector_test.weather(ts timestamp, temperature float, humidity int) tags(location nchar(64))");
c1.execute("insert into t1 using weather tags('北京') values(now, 11.11, 11)");
c1.execute("insert into t1(ts, temperature) values(now, 22.22)");
c1.execute("insert into t1(ts, humidity) values(now, 33)");
c1.query('select * from test.t1', true).then(function (result) {
result.pretty();
});
var q = c1.query('select * from td_connector_test.weather');
console.log(q.query);
q.execute().then(function(r) {
r.pretty();
});
function sleep(sleepTime) {
for(var start = +new Date; +new Date - start <= sleepTime; ) { }
}
sleep(10000);
// Raw Async Testing (Callbacks, not promises)
function cb2(param, result, rowCount, rd) {
@ -129,16 +150,21 @@ setTimeout(function(){
c1.fetchall_a(thisRes, cb4, param);
},100);
// Async through promises
var aq = c1.query('select count(*) from td_connector_test.all_types;',false);
aq.execute_a().then(function(data) {
data.pretty();
});
c1.query('describe td_connector_test.stabletest;').execute_a().then(r=> r.pretty());
c1.query('describe td_connector_test.stabletest').execute_a().then(function(r){
r.pretty()
});
setTimeout(function(){
c1.query('drop database td_connector_test;');
},200);
setTimeout(function(){
conn.close();
},2000);

View File

@ -25,6 +25,7 @@ int32_t dnodeInitCfg();
void dnodeCleanupCfg();
void dnodeUpdateCfg(SDnodeCfg *cfg);
int32_t dnodeGetDnodeId();
void dnodeGetClusterId(char *clusterId);
void dnodeGetCfg(int32_t *dnodeId, char *clusterId);
#ifdef __cplusplus

View File

@ -51,6 +51,12 @@ int32_t dnodeGetDnodeId() {
return dnodeId;
}
void dnodeGetClusterId(char *clusterId) {
pthread_mutex_lock(&tsCfgMutex);
tstrncpy(clusterId, tsCfg.clusterId, TSDB_CLUSTER_ID_LEN);
pthread_mutex_unlock(&tsCfgMutex);
}
void dnodeGetCfg(int32_t *dnodeId, char *clusterId) {
pthread_mutex_lock(&tsCfgMutex);
*dnodeId = tsCfg.dnodeId;

View File

@ -311,6 +311,14 @@ void dnodeSendRedirectMsg(SRpcMsg *rpcMsg, bool forShell) {
for (int32_t i = 0; i < epSet.numOfEps; ++i) {
dDebug("mnode index:%d %s:%d", i, epSet.fqdn[i], epSet.port[i]);
if (strcmp(epSet.fqdn[i], tsLocalFqdn) == 0) {
if ((epSet.port[i] == tsServerPort + TSDB_PORT_DNODEDNODE && !forShell) ||
(epSet.port[i] == tsServerPort && forShell)) {
epSet.inUse = (i + 1) % epSet.numOfEps;
dDebug("mnode index:%d %s:%d set inUse to %d", i, epSet.fqdn[i], epSet.port[i], epSet.inUse);
}
}
epSet.port[i] = htons(epSet.port[i]);
}

View File

@ -16,9 +16,7 @@
#define _DEFAULT_SOURCE
#include "os.h"
#include "tqueue.h"
#include "twal.h"
#include "mnode.h"
#include "dnodeVMgmt.h"
#include "dnodeMInfos.h"
#include "dnodeMRead.h"

View File

@ -18,7 +18,6 @@
#include "ttimer.h"
#include "tqueue.h"
#include "mnode.h"
#include "dnodeVMgmt.h"
#include "dnodeMInfos.h"
#include "dnodeMWrite.h"
@ -185,7 +184,19 @@ void dnodeReprocessMWriteMsg(void *pMsg) {
dDebug("msg:%p, app:%p type:%s is redirected for mnode not running, retry times:%d", pWrite, pWrite->rpcMsg.ahandle,
taosMsg[pWrite->rpcMsg.msgType], pWrite->retry);
dnodeSendRedirectMsg(pMsg, true);
if (pWrite->pBatchMasterMsg) {
++pWrite->pBatchMasterMsg->received;
if (pWrite->pBatchMasterMsg->successed + pWrite->pBatchMasterMsg->received
>= pWrite->pBatchMasterMsg->expected) {
dnodeSendRedirectMsg(&pWrite->pBatchMasterMsg->rpcMsg, true);
dnodeFreeMWriteMsg(pWrite->pBatchMasterMsg);
}
mnodeDestroySubMsg(pWrite);
return;
}
dnodeSendRedirectMsg(&pWrite->rpcMsg, true);
dnodeFreeMWriteMsg(pWrite);
} else {
dDebug("msg:%p, app:%p type:%s is reput into mwrite queue:%p, retry times:%d", pWrite, pWrite->rpcMsg.ahandle,

View File

@ -113,6 +113,7 @@ static void dnodeCleanupTmr() {
int32_t dnodeInitSystem() {
dnodeSetRunStatus(TSDB_RUN_STATUS_INITIALIZE);
tscEmbedded = 1;
taosIgnSIGPIPE();
taosBlockSIGPIPE();
taosResolveCRC();
taosInitGlobalCfg();
@ -120,7 +121,6 @@ int32_t dnodeInitSystem() {
taosSetCoreDump();
taosInitNotes();
dnodeInitTmr();
signal(SIGPIPE, SIG_IGN);
if (dnodeCreateDir(tsLogDir) < 0) {
printf("failed to create dir: %s, reason: %s\n", tsLogDir, strerror(errno));

View File

@ -60,7 +60,7 @@ int32_t dnodeInitServer() {
rpcInit.label = "DND-S";
rpcInit.numOfThreads = 1;
rpcInit.cfp = dnodeProcessReqMsgFromDnode;
rpcInit.sessions = TSDB_MAX_VNODES;
rpcInit.sessions = TSDB_MAX_VNODES << 4;
rpcInit.connType = TAOS_CONN_SERVER;
rpcInit.idleTime = tsShellActivityTimer * 1000;
@ -123,7 +123,7 @@ int32_t dnodeInitClient() {
rpcInit.label = "DND-C";
rpcInit.numOfThreads = 1;
rpcInit.cfp = dnodeProcessRspFromDnode;
rpcInit.sessions = TSDB_MAX_VNODES;
rpcInit.sessions = TSDB_MAX_VNODES << 4;
rpcInit.connType = TAOS_CONN_CLIENT;
rpcInit.idleTime = tsShellActivityTimer * 1000;
rpcInit.user = "t";

View File

@ -195,7 +195,7 @@ static void addRuntimeInfo(SBufferWriter* bw) {
static void sendTelemetryReport() {
char buf[128];
uint32_t ip = taosGetIpFromFqdn(TELEMETRY_SERVER);
uint32_t ip = taosGetIpv4FromFqdn(TELEMETRY_SERVER);
if (ip == 0xffffffff) {
dTrace("failed to get IP address of " TELEMETRY_SERVER ", reason:%s", strerror(errno));
return;
@ -308,4 +308,4 @@ void dnodeCleanupTelemetry() {
pthread_join(tsTelemetryThread, NULL);
tsem_destroy(&tsExitSem);
}
}
}

View File

@ -143,7 +143,7 @@ static SCreateVnodeMsg* dnodeParseVnodeMsg(SRpcMsg *rpcMsg) {
pCreate->cfg.fsyncPeriod = htonl(pCreate->cfg.fsyncPeriod);
pCreate->cfg.commitTime = htonl(pCreate->cfg.commitTime);
for (int32_t j = 0; j < pCreate->cfg.replications; ++j) {
for (int32_t j = 0; j < pCreate->cfg.vgReplica; ++j) {
pCreate->nodes[j].nodeId = htonl(pCreate->nodes[j].nodeId);
}
@ -217,4 +217,4 @@ static int32_t dnodeProcessCreateMnodeMsg(SRpcMsg *pMsg) {
dnodeStartMnode(&pCfg->mnodes);
return TSDB_CODE_SUCCESS;
}
}

View File

@ -54,6 +54,7 @@ void dnodeCleanupVRead() {
void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) {
int32_t queuedMsgNum = 0;
int32_t leftLen = pMsg->contLen;
int32_t code = TSDB_CODE_VND_INVALID_VGROUP_ID;
char * pCont = pMsg->pCont;
while (leftLen > 0) {
@ -64,7 +65,7 @@ void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) {
assert(pHead->contLen > 0);
void *pVnode = vnodeAcquire(pHead->vgId);
if (pVnode != NULL) {
int32_t code = vnodeWriteToRQueue(pVnode, pCont, pHead->contLen, TAOS_QTYPE_RPC, pMsg);
code = vnodeWriteToRQueue(pVnode, pCont, pHead->contLen, TAOS_QTYPE_RPC, pMsg);
if (code == TSDB_CODE_SUCCESS) queuedMsgNum++;
vnodeRelease(pVnode);
}
@ -74,7 +75,7 @@ void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) {
}
if (queuedMsgNum == 0) {
SRpcMsg rpcRsp = {.handle = pMsg->handle, .code = TSDB_CODE_VND_INVALID_VGROUP_ID};
SRpcMsg rpcRsp = {.handle = pMsg->handle, .code = code};
rpcSendResponse(&rpcRsp);
}

View File

@ -188,6 +188,7 @@ static void *dnodeProcessVWriteQueue(void *wparam) {
int32_t numOfMsgs;
int32_t qtype;
taosBlockSIGPIPE();
dDebug("dnode vwrite worker:%d is running", pWorker->workerId);
while (1) {

View File

@ -73,7 +73,8 @@ static int32_t dnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) {
if (*numOfVnodes >= TSDB_MAX_VNODES) {
dError("vgId:%d, too many vnode directory in disk, exist:%d max:%d", vnode, *numOfVnodes, TSDB_MAX_VNODES);
continue;
closedir(dir);
return TSDB_CODE_DND_TOO_MANY_VNODES;
} else {
vnodeList[*numOfVnodes - 1] = vnode;
}
@ -245,12 +246,11 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) {
pStatus->lastReboot = htonl(tsRebootTime);
pStatus->numOfCores = htons((uint16_t) tsNumOfCores);
pStatus->diskAvailable = tsAvailDataDirGB;
pStatus->alternativeRole = (uint8_t) tsAlternativeRole;
pStatus->alternativeRole = tsAlternativeRole;
tstrncpy(pStatus->dnodeEp, tsLocalEp, TSDB_EP_LEN);
// fill cluster cfg parameters
pStatus->clusterCfg.numOfMnodes = htonl(tsNumOfMnodes);
pStatus->clusterCfg.enableBalance = htonl(tsEnableBalance);
pStatus->clusterCfg.mnodeEqualVnodeNum = htonl(tsMnodeEqualVnodeNum);
pStatus->clusterCfg.offlineThreshold = htonl(tsOfflineThreshold);
pStatus->clusterCfg.statusInterval = htonl(tsStatusInterval);
@ -262,7 +262,12 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) {
char timestr[32] = "1970-01-01 00:00:00.00";
(void)taosParseTime(timestr, &pStatus->clusterCfg.checkTime, strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0);
tstrncpy(pStatus->clusterCfg.locale, tsLocale, TSDB_LOCALE_LEN);
tstrncpy(pStatus->clusterCfg.charset, tsCharset, TSDB_LOCALE_LEN);
tstrncpy(pStatus->clusterCfg.charset, tsCharset, TSDB_LOCALE_LEN);
pStatus->clusterCfg.enableBalance = tsEnableBalance;
pStatus->clusterCfg.flowCtrl = tsEnableFlowCtrl;
pStatus->clusterCfg.slaveQuery = tsEnableSlaveQuery;
pStatus->clusterCfg.adjustMaster = tsEnableAdjustMaster;
vnodeBuildStatusMsg(pStatus);
contLen = sizeof(SStatusMsg) + pStatus->openVnodes * sizeof(SVnodeLoad);
@ -284,4 +289,4 @@ void dnodeSendStatusMsgToMnode() {
dInfo("force send status msg to mnode");
taosTmrReset(dnodeSendStatusMsg, 3, NULL, tsDnodeTmr, &tsStatusTimer);
}
}
}

View File

@ -36,6 +36,8 @@ bool dnodeIsMasterEp(char *ep);
void dnodeGetEpSetForPeer(SRpcEpSet *epSet);
void dnodeGetEpSetForShell(SRpcEpSet *epSet);
int32_t dnodeGetDnodeId();
void dnodeGetClusterId(char *clusterId);
void dnodeUpdateEp(int32_t dnodeId, char *ep, char *fqdn, uint16_t *port);
bool dnodeCheckEpChanged(int32_t dnodeId, char *epstr);
bool dnodeStartMnode(SMInfos *pMinfos);
@ -80,4 +82,4 @@ void dnodeReportStep(char *name, char *desc, int8_t finished);
}
#endif
#endif
#endif

View File

@ -42,11 +42,12 @@ typedef struct SMnodeMsg {
struct SVgObj * pVgroup;
struct STableObj *pTable;
struct SSTableObj*pSTable;
struct SMnodeMsg *pBatchMasterMsg;
SMnodeRsp rpcRsp;
int8_t received;
int8_t successed;
int8_t expected;
int8_t retry;
int16_t received;
int16_t successed;
int16_t expected;
int16_t retry;
int32_t incomingTs;
int32_t code;
void * pObj;
@ -57,6 +58,7 @@ typedef struct SMnodeMsg {
void * mnodeCreateMsg(SRpcMsg *pRpcMsg);
int32_t mnodeInitMsg(SMnodeMsg *pMsg);
void mnodeCleanupMsg(SMnodeMsg *pMsg);
void mnodeDestroySubMsg(SMnodeMsg *pSubMsg);
int32_t mnodeInitSystem();
int32_t mnodeStartSystem();

View File

@ -369,6 +369,10 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf
#define TSDB_MAX_DB_UPDATE 1
#define TSDB_DEFAULT_DB_UPDATE_OPTION 0
#define TSDB_MIN_DB_CACHE_LAST_ROW 0
#define TSDB_MAX_DB_CACHE_LAST_ROW 1
#define TSDB_DEFAULT_CACHE_LAST_ROW 0
#define TSDB_MIN_FSYNC_PERIOD 0
#define TSDB_MAX_FSYNC_PERIOD 180000 // millisecond
#define TSDB_DEFAULT_FSYNC_PERIOD 3000 // three second
@ -432,7 +436,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf
#define TSDB_PORT_HTTP 11
#define TSDB_PORT_ARBITRATOR 12
#define TSDB_MAX_WAL_SIZE (1024*1024)
#define TSDB_MAX_WAL_SIZE (1024*1024*2)
typedef enum {
TAOS_QTYPE_RPC = 0,

View File

@ -67,6 +67,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_RESPONSE_TYPE, 0, 0x0012, "Invalid re
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_TIME_STAMP, 0, 0x0013, "Client and server's time is not synchronized")
TAOS_DEFINE_ERROR(TSDB_CODE_APP_NOT_READY, 0, 0x0014, "Database not ready")
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_FQDN_ERROR, 0, 0x0015, "Unable to resolve FQDN")
TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_VERSION, 0, 0x0016, "Invalid app version")
//common & util
TAOS_DEFINE_ERROR(TSDB_CODE_COM_OPS_NOT_SUPPORT, 0, 0x0100, "Operation not supported")
@ -193,6 +194,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_DND_OUT_OF_MEMORY, 0, 0x0401, "Dnode out
TAOS_DEFINE_ERROR(TSDB_CODE_DND_NO_WRITE_ACCESS, 0, 0x0402, "No permission for disk files in dnode")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_INVALID_MSG_LEN, 0, 0x0403, "Invalid message length")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_ACTION_IN_PROGRESS, 0, 0x0404, "Action in progress")
TAOS_DEFINE_ERROR(TSDB_CODE_DND_TOO_MANY_VNODES, 0, 0x0405, "Too many vnode directories")
// vnode
TAOS_DEFINE_ERROR(TSDB_CODE_VND_ACTION_IN_PROGRESS, 0, 0x0500, "Action in progress")
@ -208,9 +210,11 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_APP_ERROR, 0, 0x0509, "Unexpected
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VRESION_FILE, 0, 0x050A, "Invalid version file")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FULL, 0, 0x050B, "Database memory is full for commit failed")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FLOWCTRL, 0, 0x050C, "Database memory is full for waiting commit")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_DROPPING, 0, 0x050D, "Database is dropping")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_BALANCING, 0, 0x050E, "Database is balancing")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, 0, 0x0511, "Database suspended")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0512, "Database write operation denied")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_SYNCING, 0, 0x0513, "Database is syncing")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_SYNCING, 0, 0x0513, "Database is syncing")
// tsdb
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, 0, 0x0600, "Invalid table ID")

View File

@ -324,6 +324,7 @@ typedef struct {
typedef struct {
char acctId[TSDB_ACCT_LEN];
char serverVersion[TSDB_VERSION_LEN];
char clusterId[TSDB_CLUSTER_ID_LEN];
int8_t writeAuth;
int8_t superAuth;
int8_t reserved1;
@ -475,6 +476,7 @@ typedef struct {
int16_t numOfCols; // the number of columns will be load from vnode
SInterval interval;
uint16_t tagCondLen; // tag length in current query
uint32_t tbnameCondLen; // table name filter condition string length
int16_t numOfGroupCols; // num of group by columns
int16_t orderByIdx;
int16_t orderType; // used in group by xx order by xxx
@ -493,6 +495,7 @@ typedef struct {
int32_t tsNumOfBlocks; // ts comp block numbers
int32_t tsOrder; // ts comp block order
int32_t numOfTags; // number of tags columns involved
int32_t sqlstrLen; // sql query string
SColumnInfo colList[];
} SQueryTableMsg;
@ -517,16 +520,17 @@ typedef struct SRetrieveTableRsp {
} SRetrieveTableRsp;
typedef struct {
int32_t vgId;
int32_t dbCfgVersion;
int64_t totalStorage;
int64_t compStorage;
int64_t pointsWritten;
uint8_t status;
uint8_t role;
uint8_t replica;
uint8_t reserved;
int32_t vgCfgVersion;
int32_t vgId;
int32_t dbCfgVersion;
int64_t totalStorage;
int64_t compStorage;
int64_t pointsWritten;
uint64_t vnodeVersion;
int32_t vgCfgVersion;
uint8_t status;
uint8_t role;
uint8_t replica;
uint8_t reserved;
} SVnodeLoad;
typedef struct {
@ -549,7 +553,8 @@ typedef struct {
int8_t quorum;
int8_t ignoreExist;
int8_t update;
int8_t reserve[9];
int8_t cacheLastRow;
int8_t reserve[8];
} SCreateDbMsg, SAlterDbMsg;
typedef struct {
@ -604,7 +609,6 @@ typedef struct {
typedef struct {
int32_t numOfMnodes; // tsNumOfMnodes
int32_t enableBalance; // tsEnableBalance
int32_t mnodeEqualVnodeNum; // tsMnodeEqualVnodeNum
int32_t offlineThreshold; // tsOfflineThreshold
int32_t statusInterval; // tsStatusInterval
@ -615,6 +619,11 @@ typedef struct {
int64_t checkTime; // 1970-01-01 00:00:00.000
char locale[TSDB_LOCALE_LEN]; // tsLocale
char charset[TSDB_LOCALE_LEN]; // tsCharset
int8_t enableBalance; // tsEnableBalance
int8_t flowCtrl;
int8_t slaveQuery;
int8_t adjustMaster;
int8_t reserved[4];
} SClusterCfg;
typedef struct {
@ -657,12 +666,14 @@ typedef struct {
int8_t precision;
int8_t compression;
int8_t walLevel;
int8_t replications;
int8_t vgReplica;
int8_t wals;
int8_t quorum;
int8_t update;
int8_t reserved[11];
int8_t cacheLastRow;
int32_t vgCfgVersion;
int8_t dbReplica;
int8_t reserved[9];
} SVnodeCfg;
typedef struct {
@ -716,7 +727,6 @@ typedef struct {
typedef struct STableMetaMsg {
int32_t contLen;
char tableId[TSDB_TABLE_FNAME_LEN]; // table id
char sTableId[TSDB_TABLE_FNAME_LEN];
uint8_t numOfTags;
uint8_t precision;
uint8_t tableType;
@ -726,6 +736,9 @@ typedef struct STableMetaMsg {
int32_t tid;
uint64_t uid;
SVgroupMsg vgroup;
char sTableName[TSDB_TABLE_FNAME_LEN];
uint64_t suid;
SSchema schema[];
} STableMetaMsg;

View File

@ -66,6 +66,7 @@ typedef struct {
int8_t precision;
int8_t compression;
int8_t update;
int8_t cacheLastRow;
} STsdbCfg;
// --------- TSDB REPOSITORY USAGE STATISTICS
@ -119,7 +120,7 @@ STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg);
int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg);
int tsdbDropTable(TSDB_REPO_T *pRepo, STableId tableId);
int tsdbUpdateTableTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg);
TSKEY tsdbGetTableLastKey(TSDB_REPO_T *repo, uint64_t uid);
// TSKEY tsdbGetTableLastKey(TSDB_REPO_T *repo, uint64_t uid);
uint32_t tsdbGetFileInfo(TSDB_REPO_T *repo, char *name, uint32_t *index, uint32_t eindex, int64_t *size);

View File

@ -114,114 +114,115 @@
#define TK_COMP 96
#define TK_PRECISION 97
#define TK_UPDATE 98
#define TK_LP 99
#define TK_RP 100
#define TK_TAGS 101
#define TK_USING 102
#define TK_AS 103
#define TK_COMMA 104
#define TK_NULL 105
#define TK_SELECT 106
#define TK_UNION 107
#define TK_ALL 108
#define TK_FROM 109
#define TK_VARIABLE 110
#define TK_INTERVAL 111
#define TK_FILL 112
#define TK_SLIDING 113
#define TK_ORDER 114
#define TK_BY 115
#define TK_ASC 116
#define TK_DESC 117
#define TK_GROUP 118
#define TK_HAVING 119
#define TK_LIMIT 120
#define TK_OFFSET 121
#define TK_SLIMIT 122
#define TK_SOFFSET 123
#define TK_WHERE 124
#define TK_NOW 125
#define TK_RESET 126
#define TK_QUERY 127
#define TK_ADD 128
#define TK_COLUMN 129
#define TK_TAG 130
#define TK_CHANGE 131
#define TK_SET 132
#define TK_KILL 133
#define TK_CONNECTION 134
#define TK_STREAM 135
#define TK_COLON 136
#define TK_ABORT 137
#define TK_AFTER 138
#define TK_ATTACH 139
#define TK_BEFORE 140
#define TK_BEGIN 141
#define TK_CASCADE 142
#define TK_CLUSTER 143
#define TK_CONFLICT 144
#define TK_COPY 145
#define TK_DEFERRED 146
#define TK_DELIMITERS 147
#define TK_DETACH 148
#define TK_EACH 149
#define TK_END 150
#define TK_EXPLAIN 151
#define TK_FAIL 152
#define TK_FOR 153
#define TK_IGNORE 154
#define TK_IMMEDIATE 155
#define TK_INITIALLY 156
#define TK_INSTEAD 157
#define TK_MATCH 158
#define TK_KEY 159
#define TK_OF 160
#define TK_RAISE 161
#define TK_REPLACE 162
#define TK_RESTRICT 163
#define TK_ROW 164
#define TK_STATEMENT 165
#define TK_TRIGGER 166
#define TK_VIEW 167
#define TK_COUNT 168
#define TK_SUM 169
#define TK_AVG 170
#define TK_MIN 171
#define TK_MAX 172
#define TK_FIRST 173
#define TK_LAST 174
#define TK_TOP 175
#define TK_BOTTOM 176
#define TK_STDDEV 177
#define TK_PERCENTILE 178
#define TK_APERCENTILE 179
#define TK_LEASTSQUARES 180
#define TK_HISTOGRAM 181
#define TK_DIFF 182
#define TK_SPREAD 183
#define TK_TWA 184
#define TK_INTERP 185
#define TK_LAST_ROW 186
#define TK_RATE 187
#define TK_IRATE 188
#define TK_SUM_RATE 189
#define TK_SUM_IRATE 190
#define TK_AVG_RATE 191
#define TK_AVG_IRATE 192
#define TK_TBID 193
#define TK_SEMI 194
#define TK_NONE 195
#define TK_PREV 196
#define TK_LINEAR 197
#define TK_IMPORT 198
#define TK_METRIC 199
#define TK_TBNAME 200
#define TK_JOIN 201
#define TK_METRICS 202
#define TK_STABLE 203
#define TK_INSERT 204
#define TK_INTO 205
#define TK_VALUES 206
#define TK_CACHELAST 99
#define TK_LP 100
#define TK_RP 101
#define TK_TAGS 102
#define TK_USING 103
#define TK_AS 104
#define TK_COMMA 105
#define TK_NULL 106
#define TK_SELECT 107
#define TK_UNION 108
#define TK_ALL 109
#define TK_FROM 110
#define TK_VARIABLE 111
#define TK_INTERVAL 112
#define TK_FILL 113
#define TK_SLIDING 114
#define TK_ORDER 115
#define TK_BY 116
#define TK_ASC 117
#define TK_DESC 118
#define TK_GROUP 119
#define TK_HAVING 120
#define TK_LIMIT 121
#define TK_OFFSET 122
#define TK_SLIMIT 123
#define TK_SOFFSET 124
#define TK_WHERE 125
#define TK_NOW 126
#define TK_RESET 127
#define TK_QUERY 128
#define TK_ADD 129
#define TK_COLUMN 130
#define TK_TAG 131
#define TK_CHANGE 132
#define TK_SET 133
#define TK_KILL 134
#define TK_CONNECTION 135
#define TK_STREAM 136
#define TK_COLON 137
#define TK_ABORT 138
#define TK_AFTER 139
#define TK_ATTACH 140
#define TK_BEFORE 141
#define TK_BEGIN 142
#define TK_CASCADE 143
#define TK_CLUSTER 144
#define TK_CONFLICT 145
#define TK_COPY 146
#define TK_DEFERRED 147
#define TK_DELIMITERS 148
#define TK_DETACH 149
#define TK_EACH 150
#define TK_END 151
#define TK_EXPLAIN 152
#define TK_FAIL 153
#define TK_FOR 154
#define TK_IGNORE 155
#define TK_IMMEDIATE 156
#define TK_INITIALLY 157
#define TK_INSTEAD 158
#define TK_MATCH 159
#define TK_KEY 160
#define TK_OF 161
#define TK_RAISE 162
#define TK_REPLACE 163
#define TK_RESTRICT 164
#define TK_ROW 165
#define TK_STATEMENT 166
#define TK_TRIGGER 167
#define TK_VIEW 168
#define TK_COUNT 169
#define TK_SUM 170
#define TK_AVG 171
#define TK_MIN 172
#define TK_MAX 173
#define TK_FIRST 174
#define TK_LAST 175
#define TK_TOP 176
#define TK_BOTTOM 177
#define TK_STDDEV 178
#define TK_PERCENTILE 179
#define TK_APERCENTILE 180
#define TK_LEASTSQUARES 181
#define TK_HISTOGRAM 182
#define TK_DIFF 183
#define TK_SPREAD 184
#define TK_TWA 185
#define TK_INTERP 186
#define TK_LAST_ROW 187
#define TK_RATE 188
#define TK_IRATE 189
#define TK_SUM_RATE 190
#define TK_SUM_IRATE 191
#define TK_AVG_RATE 192
#define TK_AVG_IRATE 193
#define TK_TBID 194
#define TK_SEMI 195
#define TK_NONE 196
#define TK_PREV 197
#define TK_LINEAR 198
#define TK_IMPORT 199
#define TK_METRIC 200
#define TK_TBNAME 201
#define TK_JOIN 202
#define TK_METRICS 203
#define TK_STABLE 204
#define TK_INSERT 205
#define TK_INTO 206
#define TK_VALUES 207
#define TK_SPACE 300

View File

@ -28,7 +28,7 @@ extern "C" {
default: \
(_v) = (_finalType)GET_INT32_VAL(_data); \
break; \
};
}
#ifdef __cplusplus
}

View File

@ -3,4 +3,5 @@ PROJECT(TDengine)
ADD_SUBDIRECTORY(shell)
ADD_SUBDIRECTORY(taosdemo)
ADD_SUBDIRECTORY(taosdemox)
ADD_SUBDIRECTORY(taosdump)

View File

@ -302,14 +302,12 @@ void shellRunCommandOnServer(TAOS *con, char command[]) {
st = taosGetTimestampUs();
TAOS_RES* tmpSql = NULL;
TAOS_RES* pSql = taos_query_h(con, command, &tmpSql);
TAOS_RES* pSql = taos_query_h(con, command, &result);
if (taos_errno(pSql)) {
taos_error(pSql, st);
return;
}
atomic_store_64(&result, ((SSqlObj*)tmpSql)->self);
int64_t oresult = atomic_load_64(&result);
if (regex_match(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) {

View File

@ -95,7 +95,7 @@ typedef struct DemoArguments {
{0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 3},
#endif
{0, 'd', "database", 0, "Destination database. Default is 'test'.", 3},
{0, 'a', "replica", 0, "Set the replica parameters of the database, Default 1, min: 1, max: 3.", 3},
{0, 'a', "replica", 0, "Set the replica parameters of the database, Default 1, min: 1, max: 3.", 3},
{0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 3},
{0, 's', "sql file", 0, "The select sql file.", 3},
{0, 'M', 0, 0, "Use metric flag.", 13},
@ -205,10 +205,10 @@ typedef struct DemoArguments {
arguments->tb_prefix = arg;
break;
case 'M':
arguments->use_metric = false;
arguments->use_metric = true;
break;
case 'x':
arguments->insert_only = false;
arguments->insert_only = true;
break;
case 'c':
if (wordexp(arg, &full_path, 0) != 0) {
@ -406,9 +406,9 @@ typedef struct DemoArguments {
} else if (strcmp(argv[i], "-m") == 0) {
arguments->tb_prefix = argv[++i];
} else if (strcmp(argv[i], "-M") == 0) {
arguments->use_metric = false;
arguments->use_metric = true;
} else if (strcmp(argv[i], "-x") == 0) {
arguments->insert_only = false;
arguments->insert_only = true;
} else if (strcmp(argv[i], "-c") == 0) {
strcpy(configDir, argv[++i]);
} else if (strcmp(argv[i], "-O") == 0) {
@ -476,6 +476,14 @@ typedef struct {
int notFinished;
tsem_t lock_sem;
int counter;
// insert delay statitics
int64_t cntDelay;
int64_t totalDelay;
int64_t avgDelay;
int64_t maxDelay;
int64_t minDelay;
} info;
typedef struct {
@ -575,7 +583,7 @@ int main(int argc, char *argv[]) {
arguments.num_of_DPT = 100000;
arguments.num_of_RPR = 1000;
arguments.use_metric = true;
arguments.insert_only = true;
arguments.insert_only = false;
// end change
parse_args(argc, argv, &arguments);
@ -739,6 +747,9 @@ int main(int argc, char *argv[]) {
printf("Inserting data......\n");
pthread_t *pids = malloc(threads * sizeof(pthread_t));
info *infos = malloc(threads * sizeof(info));
memset(pids, 0, threads * sizeof(pthread_t));
memset(infos, 0, threads * sizeof(info));
int a = ntables / threads;
if (a < 1) {
@ -768,6 +779,7 @@ int main(int argc, char *argv[]) {
t_info->end_table_id = i < b ? last + a : last + a - 1;
last = t_info->end_table_id + 1;
t_info->counter = 0;
t_info->minDelay = INT16_MAX;
tsem_init(&(t_info->mutex_sem), 0, 1);
t_info->notFinished = t_info->end_table_id - t_info->start_table_id + 1;
@ -799,12 +811,29 @@ int main(int argc, char *argv[]) {
t, (int64_t)ntables * nrecords_per_table, nrecords_per_request,
(int64_t)ntables * nrecords_per_table / t);
int64_t totalDelay = 0;
int64_t maxDelay = 0;
int64_t minDelay = INT16_MAX;
int64_t cntDelay = 0;
double avgDelay = 0;
for (int i = 0; i < threads; i++) {
info *t_info = infos + i;
taos_close(t_info->taos);
tsem_destroy(&(t_info->mutex_sem));
tsem_destroy(&(t_info->lock_sem));
totalDelay += t_info->totalDelay;
cntDelay += t_info->cntDelay;
if (t_info->maxDelay > maxDelay) maxDelay = t_info->maxDelay;
if (t_info->minDelay < minDelay) minDelay = t_info->minDelay;
}
avgDelay = (double)totalDelay / cntDelay;
fprintf(fp, "insert delay, avg:%10.6fms, max: %10.6fms, min: %10.6fms\n\n",
avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0);
printf("insert delay, avg: %10.6fms, max: %10.6fms, min: %10.6fms\n\n",
avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0);
free(pids);
free(infos);
@ -859,7 +888,7 @@ int main(int argc, char *argv[]) {
}
if (!insert_only) {
if (false == insert_only) {
// query data
pthread_t read_id;
info *rInfo = malloc(sizeof(info));
@ -998,7 +1027,7 @@ void * createTable(void *sarg)
/* Create all the tables; */
printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id);
for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) {
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s;", winfo->db_name, winfo->tb_prefix, i, winfo->cols);
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s);", winfo->db_name, winfo->tb_prefix, i, winfo->cols);
queryDB(winfo->taos, command);
}
} else {
@ -1204,6 +1233,41 @@ void *readMetric(void *sarg) {
return NULL;
}
static int queryDbExec(TAOS *taos, char *command, int type) {
int i;
TAOS_RES *res = NULL;
int32_t code = -1;
for (i = 0; i < 5; i++) {
if (NULL != res) {
taos_free_result(res);
res = NULL;
}
res = taos_query(taos, command);
code = taos_errno(res);
if (0 == code) {
break;
}
}
if (code != 0) {
fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(res));
taos_free_result(res);
//taos_close(taos);
return -1;
}
if (1 == type) {
int affectedRows = taos_affected_rows(res);
taos_free_result(res);
return affectedRows;
}
taos_free_result(res);
return 0;
}
void queryDB(TAOS *taos, char *command) {
int i;
TAOS_RES *pSql = NULL;
@ -1273,7 +1337,21 @@ void *syncWrite(void *sarg) {
}
/* puts(buffer); */
queryDB(winfo->taos, buffer);
int64_t startTs;
int64_t endTs;
startTs = taosGetTimestampUs();
//queryDB(winfo->taos, buffer);
int affectedRows = queryDbExec(winfo->taos, buffer, 1);
if (0 <= affectedRows){
endTs = taosGetTimestampUs();
int64_t delay = endTs - startTs;
if (delay > winfo->maxDelay) winfo->maxDelay = delay;
if (delay < winfo->minDelay) winfo->minDelay = delay;
winfo->cntDelay++;
winfo->totalDelay += delay;
//winfo->avgDelay = (double)winfo->totalDelay / winfo->cntDelay;
}
if (tID == winfo->end_table_id) {
i = inserted;

View File

@ -0,0 +1,25 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/include)
IF (TD_LINUX)
AUX_SOURCE_DIRECTORY(. SRC)
ADD_EXECUTABLE(taosdemox ${SRC})
#find_program(HAVE_CURL NAMES curl)
IF ((NOT TD_ARM_64) AND (NOT TD_ARM_32))
ADD_DEFINITIONS(-DTD_LOWA_CURL)
LINK_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/lib)
ADD_LIBRARY(curl STATIC IMPORTED)
SET_PROPERTY(TARGET curl PROPERTY IMPORTED_LOCATION ${TD_COMMUNITY_DIR}/deps/libcurl/lib/libcurl.a)
TARGET_LINK_LIBRARIES(taosdemox curl)
ENDIF ()
IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosdemox taos_static cJson)
ELSE ()
TARGET_LINK_LIBRARIES(taosdemox taos cJson)
ENDIF ()
ENDIF ()

View File

@ -0,0 +1,53 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 2,
"result_file": "./insert_res.txt",
"databases": [{
"dbinfo": {
"name": "db",
"drop": "no",
"replica": 1,
"days": 2,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb",
"child_table_exists":"no",
"childtable_count": 1,
"childtable_prefix": "stb_",
"auto_create_table": "no",
"data_source": "rand",
"insert_mode": "taosc",
"insert_rate": 0,
"insert_rows": 100000,
"multi_thread_write_one_tbl": "no",
"number_of_tbl_in_one_sql": 1,
"rows_per_tbl": 100,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,17 @@
{
"filetype":"query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"databases": "db01",
"super_table_query":
{"rate":1, "concurrent":1,
"sqls": [{"sql": "select count(*) from stb01", "result": "./query_res0.txt"}]
},
"sub_table_query":
{"stblname": "stb01", "rate":1, "threads":1,
"sqls": [{"sql": "select count(*) from xxxx", "result": "./query_res1.txt"}]
}
}

View File

@ -0,0 +1,17 @@
{
"filetype":"subscribe",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"databases": "db01",
"super_table_query":
{"concurrent":1, "mode":"sync", "interval":5000, "restart":"yes", "keepProgress":"yes",
"sqls": [{"sql": "select avg(c1) from stb01 where col1 > 1;", "result": "./subscribe_res0.txt"}]
},
"sub_table_query":
{"stblname": "stb01", "threads":1, "mode":"sync", "interval":10000, "restart":"yes", "keepProgress":"yes",
"sqls": [{"sql": "select col1 from xxxx where col1 > 10;", "result": "./subscribe_res1.txt"}]
}
}

File diff suppressed because it is too large Load Diff

View File

@ -332,6 +332,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
break;
case 'N':
arguments->data_batch = atoi(arg);
if (arguments->data_batch >= INT16_MAX) {
arguments->data_batch = INT16_MAX - 1;
}
break;
case 'L':
{

Some files were not shown because too many files have changed in this diff Show More