Merge branch 'develop' into feature/TD-2426

This commit is contained in:
Minglei Jin 2020-12-31 16:34:21 +08:00
commit 4afc6a9bad
100 changed files with 10004 additions and 1101 deletions

View File

@ -146,7 +146,7 @@ matrix:
branch_pattern: coverity_scan
- os: linux
dist: trusty
dist: xenial
language: c
git:
- depth: 1
@ -157,7 +157,7 @@ matrix:
- build-essential
- cmake
env:
- DESC="trusty/gcc-4.8 build"
- DESC="xenial build"
before_script:
- export TZ=Asia/Harbin
@ -227,7 +227,7 @@ matrix:
- os: linux
arch: arm64
dist: trusty
dist: xenial
language: c
git:
- depth: 1
@ -238,7 +238,7 @@ matrix:
- build-essential
- cmake
env:
- DESC="trusty/gcc-4.8 build"
- DESC="xenial build"
before_script:
- export TZ=Asia/Harbin

4
Jenkinsfile vendored
View File

@ -41,13 +41,15 @@ def pre_test(){
cd ${WKC}
git checkout develop
git reset --hard HEAD~10
git pull
git fetch
git checkout ${CHANGE_BRANCH}
git reset --hard HEAD~10
git pull
git merge develop
cd ${WK}
git reset --hard
git reset --hard HEAD~10
git checkout develop
git pull
cd ${WK}

2415
deps/libcurl/include/curl/curl.h vendored Normal file

File diff suppressed because it is too large Load Diff

198
deps/libcurl/include/curl/curlbuild.h vendored Normal file
View File

@ -0,0 +1,198 @@
/* include/curl/curlbuild.h. Generated from curlbuild.h.in by configure. */
#ifndef __CURL_CURLBUILD_H
#define __CURL_CURLBUILD_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* ================================================================ */
/* NOTES FOR CONFIGURE CAPABLE SYSTEMS */
/* ================================================================ */
/*
* NOTE 1:
* -------
*
* Nothing in this file is intended to be modified or adjusted by the
* curl library user nor by the curl library builder.
*
* If you think that something actually needs to be changed, adjusted
* or fixed in this file, then, report it on the libcurl development
* mailing list: http://cool.haxx.se/mailman/listinfo/curl-library/
*
* This header file shall only export symbols which are 'curl' or 'CURL'
* prefixed, otherwise public name space would be polluted.
*
* NOTE 2:
* -------
*
* Right now you might be staring at file include/curl/curlbuild.h.in or
* at file include/curl/curlbuild.h, this is due to the following reason:
*
* On systems capable of running the configure script, the configure process
* will overwrite the distributed include/curl/curlbuild.h file with one that
* is suitable and specific to the library being configured and built, which
* is generated from the include/curl/curlbuild.h.in template file.
*
*/
/* ================================================================ */
/* DEFINITION OF THESE SYMBOLS SHALL NOT TAKE PLACE ANYWHERE ELSE */
/* ================================================================ */
#ifdef CURL_SIZEOF_LONG
#error "CURL_SIZEOF_LONG shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_SIZEOF_LONG_already_defined
#endif
#ifdef CURL_TYPEOF_CURL_SOCKLEN_T
#error "CURL_TYPEOF_CURL_SOCKLEN_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_TYPEOF_CURL_SOCKLEN_T_already_defined
#endif
#ifdef CURL_SIZEOF_CURL_SOCKLEN_T
#error "CURL_SIZEOF_CURL_SOCKLEN_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_SIZEOF_CURL_SOCKLEN_T_already_defined
#endif
#ifdef CURL_TYPEOF_CURL_OFF_T
#error "CURL_TYPEOF_CURL_OFF_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_TYPEOF_CURL_OFF_T_already_defined
#endif
#ifdef CURL_FORMAT_CURL_OFF_T
#error "CURL_FORMAT_CURL_OFF_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_FORMAT_CURL_OFF_T_already_defined
#endif
#ifdef CURL_FORMAT_CURL_OFF_TU
#error "CURL_FORMAT_CURL_OFF_TU shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_FORMAT_CURL_OFF_TU_already_defined
#endif
#ifdef CURL_FORMAT_OFF_T
#error "CURL_FORMAT_OFF_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_FORMAT_OFF_T_already_defined
#endif
#ifdef CURL_SIZEOF_CURL_OFF_T
#error "CURL_SIZEOF_CURL_OFF_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_SIZEOF_CURL_OFF_T_already_defined
#endif
#ifdef CURL_SUFFIX_CURL_OFF_T
#error "CURL_SUFFIX_CURL_OFF_T shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_T_already_defined
#endif
#ifdef CURL_SUFFIX_CURL_OFF_TU
#error "CURL_SUFFIX_CURL_OFF_TU shall not be defined except in curlbuild.h"
Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_TU_already_defined
#endif
/* ================================================================ */
/* EXTERNAL INTERFACE SETTINGS FOR CONFIGURE CAPABLE SYSTEMS ONLY */
/* ================================================================ */
/* Configure process defines this to 1 when it finds out that system */
/* header file ws2tcpip.h must be included by the external interface. */
/* #undef CURL_PULL_WS2TCPIP_H */
#ifdef CURL_PULL_WS2TCPIP_H
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# include <winsock2.h>
# include <ws2tcpip.h>
#endif
/* Configure process defines this to 1 when it finds out that system */
/* header file sys/types.h must be included by the external interface. */
#define CURL_PULL_SYS_TYPES_H 1
#ifdef CURL_PULL_SYS_TYPES_H
# include <sys/types.h>
#endif
/* Configure process defines this to 1 when it finds out that system */
/* header file stdint.h must be included by the external interface. */
/* #undef CURL_PULL_STDINT_H */
#ifdef CURL_PULL_STDINT_H
# include <stdint.h>
#endif
/* Configure process defines this to 1 when it finds out that system */
/* header file inttypes.h must be included by the external interface. */
/* #undef CURL_PULL_INTTYPES_H */
#ifdef CURL_PULL_INTTYPES_H
# include <inttypes.h>
#endif
/* Configure process defines this to 1 when it finds out that system */
/* header file sys/socket.h must be included by the external interface. */
#define CURL_PULL_SYS_SOCKET_H 1
#ifdef CURL_PULL_SYS_SOCKET_H
# include <sys/socket.h>
#endif
/* Configure process defines this to 1 when it finds out that system */
/* header file sys/poll.h must be included by the external interface. */
/* #undef CURL_PULL_SYS_POLL_H */
#ifdef CURL_PULL_SYS_POLL_H
# include <sys/poll.h>
#endif
/* The size of `long', as computed by sizeof. */
#define CURL_SIZEOF_LONG 8
/* Integral data type used for curl_socklen_t. */
#define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t
/* The size of `curl_socklen_t', as computed by sizeof. */
#define CURL_SIZEOF_CURL_SOCKLEN_T 4
/* Data type definition of curl_socklen_t. */
typedef CURL_TYPEOF_CURL_SOCKLEN_T curl_socklen_t;
/* Signed integral data type used for curl_off_t. */
#define CURL_TYPEOF_CURL_OFF_T long
/* Data type definition of curl_off_t. */
typedef CURL_TYPEOF_CURL_OFF_T curl_off_t;
/* curl_off_t formatting string directive without "%" conversion specifier. */
#define CURL_FORMAT_CURL_OFF_T "ld"
/* unsigned curl_off_t formatting string without "%" conversion specifier. */
#define CURL_FORMAT_CURL_OFF_TU "lu"
/* curl_off_t formatting string directive with "%" conversion specifier. */
#define CURL_FORMAT_OFF_T "%ld"
/* The size of `curl_off_t', as computed by sizeof. */
#define CURL_SIZEOF_CURL_OFF_T 8
/* curl_off_t constant suffix. */
#define CURL_SUFFIX_CURL_OFF_T L
/* unsigned curl_off_t constant suffix. */
#define CURL_SUFFIX_CURL_OFF_TU UL
#endif /* __CURL_CURLBUILD_H */

262
deps/libcurl/include/curl/curlrules.h vendored Normal file
View File

@ -0,0 +1,262 @@
#ifndef __CURL_CURLRULES_H
#define __CURL_CURLRULES_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* ================================================================ */
/* COMPILE TIME SANITY CHECKS */
/* ================================================================ */
/*
* NOTE 1:
* -------
*
* All checks done in this file are intentionally placed in a public
* header file which is pulled by curl/curl.h when an application is
* being built using an already built libcurl library. Additionally
* this file is also included and used when building the library.
*
* If compilation fails on this file it is certainly sure that the
* problem is elsewhere. It could be a problem in the curlbuild.h
* header file, or simply that you are using different compilation
* settings than those used to build the library.
*
* Nothing in this file is intended to be modified or adjusted by the
* curl library user nor by the curl library builder.
*
* Do not deactivate any check, these are done to make sure that the
* library is properly built and used.
*
* You can find further help on the libcurl development mailing list:
* http://cool.haxx.se/mailman/listinfo/curl-library/
*
* NOTE 2
* ------
*
* Some of the following compile time checks are based on the fact
* that the dimension of a constant array can not be a negative one.
* In this way if the compile time verification fails, the compilation
* will fail issuing an error. The error description wording is compiler
* dependent but it will be quite similar to one of the following:
*
* "negative subscript or subscript is too large"
* "array must have at least one element"
* "-1 is an illegal array size"
* "size of array is negative"
*
* If you are building an application which tries to use an already
* built libcurl library and you are getting this kind of errors on
* this file, it is a clear indication that there is a mismatch between
* how the library was built and how you are trying to use it for your
* application. Your already compiled or binary library provider is the
* only one who can give you the details you need to properly use it.
*/
/*
* Verify that some macros are actually defined.
*/
#ifndef CURL_SIZEOF_LONG
# error "CURL_SIZEOF_LONG definition is missing!"
Error Compilation_aborted_CURL_SIZEOF_LONG_is_missing
#endif
#ifndef CURL_TYPEOF_CURL_SOCKLEN_T
# error "CURL_TYPEOF_CURL_SOCKLEN_T definition is missing!"
Error Compilation_aborted_CURL_TYPEOF_CURL_SOCKLEN_T_is_missing
#endif
#ifndef CURL_SIZEOF_CURL_SOCKLEN_T
# error "CURL_SIZEOF_CURL_SOCKLEN_T definition is missing!"
Error Compilation_aborted_CURL_SIZEOF_CURL_SOCKLEN_T_is_missing
#endif
#ifndef CURL_TYPEOF_CURL_OFF_T
# error "CURL_TYPEOF_CURL_OFF_T definition is missing!"
Error Compilation_aborted_CURL_TYPEOF_CURL_OFF_T_is_missing
#endif
#ifndef CURL_FORMAT_CURL_OFF_T
# error "CURL_FORMAT_CURL_OFF_T definition is missing!"
Error Compilation_aborted_CURL_FORMAT_CURL_OFF_T_is_missing
#endif
#ifndef CURL_FORMAT_CURL_OFF_TU
# error "CURL_FORMAT_CURL_OFF_TU definition is missing!"
Error Compilation_aborted_CURL_FORMAT_CURL_OFF_TU_is_missing
#endif
#ifndef CURL_FORMAT_OFF_T
# error "CURL_FORMAT_OFF_T definition is missing!"
Error Compilation_aborted_CURL_FORMAT_OFF_T_is_missing
#endif
#ifndef CURL_SIZEOF_CURL_OFF_T
# error "CURL_SIZEOF_CURL_OFF_T definition is missing!"
Error Compilation_aborted_CURL_SIZEOF_CURL_OFF_T_is_missing
#endif
#ifndef CURL_SUFFIX_CURL_OFF_T
# error "CURL_SUFFIX_CURL_OFF_T definition is missing!"
Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_T_is_missing
#endif
#ifndef CURL_SUFFIX_CURL_OFF_TU
# error "CURL_SUFFIX_CURL_OFF_TU definition is missing!"
Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_TU_is_missing
#endif
/*
* Macros private to this header file.
*/
#define CurlchkszEQ(t, s) sizeof(t) == s ? 1 : -1
#define CurlchkszGE(t1, t2) sizeof(t1) >= sizeof(t2) ? 1 : -1
/*
* Verify that the size previously defined and expected for long
* is the same as the one reported by sizeof() at compile time.
*/
typedef char
__curl_rule_01__
[CurlchkszEQ(long, CURL_SIZEOF_LONG)];
/*
* Verify that the size previously defined and expected for
* curl_off_t is actually the the same as the one reported
* by sizeof() at compile time.
*/
typedef char
__curl_rule_02__
[CurlchkszEQ(curl_off_t, CURL_SIZEOF_CURL_OFF_T)];
/*
* Verify at compile time that the size of curl_off_t as reported
* by sizeof() is greater or equal than the one reported for long
* for the current compilation.
*/
typedef char
__curl_rule_03__
[CurlchkszGE(curl_off_t, long)];
/*
* Verify that the size previously defined and expected for
* curl_socklen_t is actually the the same as the one reported
* by sizeof() at compile time.
*/
typedef char
__curl_rule_04__
[CurlchkszEQ(curl_socklen_t, CURL_SIZEOF_CURL_SOCKLEN_T)];
/*
* Verify at compile time that the size of curl_socklen_t as reported
* by sizeof() is greater or equal than the one reported for int for
* the current compilation.
*/
typedef char
__curl_rule_05__
[CurlchkszGE(curl_socklen_t, int)];
/* ================================================================ */
/* EXTERNALLY AND INTERNALLY VISIBLE DEFINITIONS */
/* ================================================================ */
/*
* CURL_ISOCPP and CURL_OFF_T_C definitions are done here in order to allow
* these to be visible and exported by the external libcurl interface API,
* while also making them visible to the library internals, simply including
* curl_setup.h, without actually needing to include curl.h internally.
* If some day this section would grow big enough, all this should be moved
* to its own header file.
*/
/*
* Figure out if we can use the ## preprocessor operator, which is supported
* by ISO/ANSI C and C++. Some compilers support it without setting __STDC__
* or __cplusplus so we need to carefully check for them too.
*/
#if defined(__STDC__) || defined(_MSC_VER) || defined(__cplusplus) || \
defined(__HP_aCC) || defined(__BORLANDC__) || defined(__LCC__) || \
defined(__POCC__) || defined(__SALFORDC__) || defined(__HIGHC__) || \
defined(__ILEC400__)
/* This compiler is believed to have an ISO compatible preprocessor */
#define CURL_ISOCPP
#else
/* This compiler is believed NOT to have an ISO compatible preprocessor */
#undef CURL_ISOCPP
#endif
/*
* Macros for minimum-width signed and unsigned curl_off_t integer constants.
*/
#if defined(__BORLANDC__) && (__BORLANDC__ == 0x0551)
# define __CURL_OFF_T_C_HLPR2(x) x
# define __CURL_OFF_T_C_HLPR1(x) __CURL_OFF_T_C_HLPR2(x)
# define CURL_OFF_T_C(Val) __CURL_OFF_T_C_HLPR1(Val) ## \
__CURL_OFF_T_C_HLPR1(CURL_SUFFIX_CURL_OFF_T)
# define CURL_OFF_TU_C(Val) __CURL_OFF_T_C_HLPR1(Val) ## \
__CURL_OFF_T_C_HLPR1(CURL_SUFFIX_CURL_OFF_TU)
#else
# ifdef CURL_ISOCPP
# define __CURL_OFF_T_C_HLPR2(Val,Suffix) Val ## Suffix
# else
# define __CURL_OFF_T_C_HLPR2(Val,Suffix) Val/**/Suffix
# endif
# define __CURL_OFF_T_C_HLPR1(Val,Suffix) __CURL_OFF_T_C_HLPR2(Val,Suffix)
# define CURL_OFF_T_C(Val) __CURL_OFF_T_C_HLPR1(Val,CURL_SUFFIX_CURL_OFF_T)
# define CURL_OFF_TU_C(Val) __CURL_OFF_T_C_HLPR1(Val,CURL_SUFFIX_CURL_OFF_TU)
#endif
/*
* Get rid of macros private to this header file.
*/
#undef CurlchkszEQ
#undef CurlchkszGE
/*
* Get rid of macros not intended to exist beyond this point.
*/
#undef CURL_PULL_WS2TCPIP_H
#undef CURL_PULL_SYS_TYPES_H
#undef CURL_PULL_SYS_SOCKET_H
#undef CURL_PULL_SYS_POLL_H
#undef CURL_PULL_STDINT_H
#undef CURL_PULL_INTTYPES_H
#undef CURL_TYPEOF_CURL_SOCKLEN_T
#undef CURL_TYPEOF_CURL_OFF_T
#ifdef CURL_NO_OLDIES
#undef CURL_FORMAT_OFF_T /* not required since 7.19.0 - obsoleted in 7.20.0 */
#endif
#endif /* __CURL_CURLRULES_H */

77
deps/libcurl/include/curl/curlver.h vendored Normal file
View File

@ -0,0 +1,77 @@
#ifndef __CURL_CURLVER_H
#define __CURL_CURLVER_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* This header file contains nothing but libcurl version info, generated by
a script at release-time. This was made its own header file in 7.11.2 */
/* This is the global package copyright */
#define LIBCURL_COPYRIGHT "1996 - 2015 Daniel Stenberg, <daniel@haxx.se>."
/* This is the version number of the libcurl package from which this header
file origins: */
#define LIBCURL_VERSION "7.47.0"
/* The numeric version number is also available "in parts" by using these
defines: */
#define LIBCURL_VERSION_MAJOR 7
#define LIBCURL_VERSION_MINOR 47
#define LIBCURL_VERSION_PATCH 0
/* This is the numeric version of the libcurl version number, meant for easier
parsing and comparions by programs. The LIBCURL_VERSION_NUM define will
always follow this syntax:
0xXXYYZZ
Where XX, YY and ZZ are the main version, release and patch numbers in
hexadecimal (using 8 bits each). All three numbers are always represented
using two digits. 1.2 would appear as "0x010200" while version 9.11.7
appears as "0x090b07".
This 6-digit (24 bits) hexadecimal number does not show pre-release number,
and it is always a greater number in a more recent release. It makes
comparisons with greater than and less than work.
Note: This define is the full hex number and _does not_ use the
CURL_VERSION_BITS() macro since curl's own configure script greps for it
and needs it to contain the full number.
*/
#define LIBCURL_VERSION_NUM 0x072f00
/*
* This is the date and time when the full source package was created. The
* timestamp is not stored in git, as the timestamp is properly set in the
* tarballs by the maketgz script.
*
* The format of the date should follow this template:
*
* "Mon Feb 12 11:35:33 UTC 2007"
*/
#define LIBCURL_TIMESTAMP "Wed Jan 27 07:32:44 UTC 2016"
#define CURL_VERSION_BITS(x,y,z) ((x)<<16|(y)<<8|z)
#define CURL_AT_LEAST_VERSION(x,y,z) \
(LIBCURL_VERSION_NUM >= CURL_VERSION_BITS(x, y, z))
#endif /* __CURL_CURLVER_H */

102
deps/libcurl/include/curl/easy.h vendored Normal file
View File

@ -0,0 +1,102 @@
#ifndef __CURL_EASY_H
#define __CURL_EASY_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2008, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#ifdef __cplusplus
extern "C" {
#endif
CURL_EXTERN CURL *curl_easy_init(void);
CURL_EXTERN CURLcode curl_easy_setopt(CURL *curl, CURLoption option, ...);
CURL_EXTERN CURLcode curl_easy_perform(CURL *curl);
CURL_EXTERN void curl_easy_cleanup(CURL *curl);
/*
* NAME curl_easy_getinfo()
*
* DESCRIPTION
*
* Request internal information from the curl session with this function. The
* third argument MUST be a pointer to a long, a pointer to a char * or a
* pointer to a double (as the documentation describes elsewhere). The data
* pointed to will be filled in accordingly and can be relied upon only if the
* function returns CURLE_OK. This function is intended to get used *AFTER* a
* performed transfer, all results from this function are undefined until the
* transfer is completed.
*/
CURL_EXTERN CURLcode curl_easy_getinfo(CURL *curl, CURLINFO info, ...);
/*
* NAME curl_easy_duphandle()
*
* DESCRIPTION
*
* Creates a new curl session handle with the same options set for the handle
* passed in. Duplicating a handle could only be a matter of cloning data and
* options, internal state info and things like persistent connections cannot
* be transferred. It is useful in multithreaded applications when you can run
* curl_easy_duphandle() for each new thread to avoid a series of identical
* curl_easy_setopt() invokes in every thread.
*/
CURL_EXTERN CURL* curl_easy_duphandle(CURL *curl);
/*
* NAME curl_easy_reset()
*
* DESCRIPTION
*
* Re-initializes a CURL handle to the default values. This puts back the
* handle to the same state as it was in when it was just created.
*
* It does keep: live connections, the Session ID cache, the DNS cache and the
* cookies.
*/
CURL_EXTERN void curl_easy_reset(CURL *curl);
/*
* NAME curl_easy_recv()
*
* DESCRIPTION
*
* Receives data from the connected socket. Use after successful
* curl_easy_perform() with CURLOPT_CONNECT_ONLY option.
*/
CURL_EXTERN CURLcode curl_easy_recv(CURL *curl, void *buffer, size_t buflen,
size_t *n);
/*
* NAME curl_easy_send()
*
* DESCRIPTION
*
* Sends data over the connected socket. Use after successful
* curl_easy_perform() with CURLOPT_CONNECT_ONLY option.
*/
CURL_EXTERN CURLcode curl_easy_send(CURL *curl, const void *buffer,
size_t buflen, size_t *n);
#ifdef __cplusplus
}
#endif
#endif

74
deps/libcurl/include/curl/mprintf.h vendored Normal file
View File

@ -0,0 +1,74 @@
#ifndef __CURL_MPRINTF_H
#define __CURL_MPRINTF_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#include <stdarg.h>
#include <stdio.h> /* needed for FILE */
#include "curl.h"
#ifdef __cplusplus
extern "C" {
#endif
CURL_EXTERN int curl_mprintf(const char *format, ...);
CURL_EXTERN int curl_mfprintf(FILE *fd, const char *format, ...);
CURL_EXTERN int curl_msprintf(char *buffer, const char *format, ...);
CURL_EXTERN int curl_msnprintf(char *buffer, size_t maxlength,
const char *format, ...);
CURL_EXTERN int curl_mvprintf(const char *format, va_list args);
CURL_EXTERN int curl_mvfprintf(FILE *fd, const char *format, va_list args);
CURL_EXTERN int curl_mvsprintf(char *buffer, const char *format, va_list args);
CURL_EXTERN int curl_mvsnprintf(char *buffer, size_t maxlength,
const char *format, va_list args);
CURL_EXTERN char *curl_maprintf(const char *format, ...);
CURL_EXTERN char *curl_mvaprintf(const char *format, va_list args);
#ifdef _MPRINTF_REPLACE
# undef printf
# undef fprintf
# undef sprintf
# undef vsprintf
# undef snprintf
# undef vprintf
# undef vfprintf
# undef vsnprintf
# undef aprintf
# undef vaprintf
# define printf curl_mprintf
# define fprintf curl_mfprintf
# define sprintf curl_msprintf
# define vsprintf curl_mvsprintf
# define snprintf curl_msnprintf
# define vprintf curl_mvprintf
# define vfprintf curl_mvfprintf
# define vsnprintf curl_mvsnprintf
# define aprintf curl_maprintf
# define vaprintf curl_mvaprintf
#endif
#ifdef __cplusplus
}
#endif
#endif /* __CURL_MPRINTF_H */

435
deps/libcurl/include/curl/multi.h vendored Normal file
View File

@ -0,0 +1,435 @@
#ifndef __CURL_MULTI_H
#define __CURL_MULTI_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/*
This is an "external" header file. Don't give away any internals here!
GOALS
o Enable a "pull" interface. The application that uses libcurl decides where
and when to ask libcurl to get/send data.
o Enable multiple simultaneous transfers in the same thread without making it
complicated for the application.
o Enable the application to select() on its own file descriptors and curl's
file descriptors simultaneous easily.
*/
/*
* This header file should not really need to include "curl.h" since curl.h
* itself includes this file and we expect user applications to do #include
* <curl/curl.h> without the need for especially including multi.h.
*
* For some reason we added this include here at one point, and rather than to
* break existing (wrongly written) libcurl applications, we leave it as-is
* but with this warning attached.
*/
#include "curl.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef void CURLM;
typedef enum {
CURLM_CALL_MULTI_PERFORM = -1, /* please call curl_multi_perform() or
curl_multi_socket*() soon */
CURLM_OK,
CURLM_BAD_HANDLE, /* the passed-in handle is not a valid CURLM handle */
CURLM_BAD_EASY_HANDLE, /* an easy handle was not good/valid */
CURLM_OUT_OF_MEMORY, /* if you ever get this, you're in deep sh*t */
CURLM_INTERNAL_ERROR, /* this is a libcurl bug */
CURLM_BAD_SOCKET, /* the passed in socket argument did not match */
CURLM_UNKNOWN_OPTION, /* curl_multi_setopt() with unsupported option */
CURLM_ADDED_ALREADY, /* an easy handle already added to a multi handle was
attempted to get added - again */
CURLM_LAST
} CURLMcode;
/* just to make code nicer when using curl_multi_socket() you can now check
for CURLM_CALL_MULTI_SOCKET too in the same style it works for
curl_multi_perform() and CURLM_CALL_MULTI_PERFORM */
#define CURLM_CALL_MULTI_SOCKET CURLM_CALL_MULTI_PERFORM
/* bitmask bits for CURLMOPT_PIPELINING */
#define CURLPIPE_NOTHING 0L
#define CURLPIPE_HTTP1 1L
#define CURLPIPE_MULTIPLEX 2L
typedef enum {
CURLMSG_NONE, /* first, not used */
CURLMSG_DONE, /* This easy handle has completed. 'result' contains
the CURLcode of the transfer */
CURLMSG_LAST /* last, not used */
} CURLMSG;
struct CURLMsg {
CURLMSG msg; /* what this message means */
CURL *easy_handle; /* the handle it concerns */
union {
void *whatever; /* message-specific data */
CURLcode result; /* return code for transfer */
} data;
};
typedef struct CURLMsg CURLMsg;
/* Based on poll(2) structure and values.
* We don't use pollfd and POLL* constants explicitly
* to cover platforms without poll(). */
#define CURL_WAIT_POLLIN 0x0001
#define CURL_WAIT_POLLPRI 0x0002
#define CURL_WAIT_POLLOUT 0x0004
struct curl_waitfd {
curl_socket_t fd;
short events;
short revents; /* not supported yet */
};
/*
* Name: curl_multi_init()
*
* Desc: inititalize multi-style curl usage
*
* Returns: a new CURLM handle to use in all 'curl_multi' functions.
*/
CURL_EXTERN CURLM *curl_multi_init(void);
/*
* Name: curl_multi_add_handle()
*
* Desc: add a standard curl handle to the multi stack
*
* Returns: CURLMcode type, general multi error code.
*/
CURL_EXTERN CURLMcode curl_multi_add_handle(CURLM *multi_handle,
CURL *curl_handle);
/*
* Name: curl_multi_remove_handle()
*
* Desc: removes a curl handle from the multi stack again
*
* Returns: CURLMcode type, general multi error code.
*/
CURL_EXTERN CURLMcode curl_multi_remove_handle(CURLM *multi_handle,
CURL *curl_handle);
/*
* Name: curl_multi_fdset()
*
* Desc: Ask curl for its fd_set sets. The app can use these to select() or
* poll() on. We want curl_multi_perform() called as soon as one of
* them are ready.
*
* Returns: CURLMcode type, general multi error code.
*/
CURL_EXTERN CURLMcode curl_multi_fdset(CURLM *multi_handle,
fd_set *read_fd_set,
fd_set *write_fd_set,
fd_set *exc_fd_set,
int *max_fd);
/*
* Name: curl_multi_wait()
*
* Desc: Poll on all fds within a CURLM set as well as any
* additional fds passed to the function.
*
* Returns: CURLMcode type, general multi error code.
*/
CURL_EXTERN CURLMcode curl_multi_wait(CURLM *multi_handle,
struct curl_waitfd extra_fds[],
unsigned int extra_nfds,
int timeout_ms,
int *ret);
/*
* Name: curl_multi_perform()
*
* Desc: When the app thinks there's data available for curl it calls this
* function to read/write whatever there is right now. This returns
* as soon as the reads and writes are done. This function does not
* require that there actually is data available for reading or that
* data can be written, it can be called just in case. It returns
* the number of handles that still transfer data in the second
* argument's integer-pointer.
*
* Returns: CURLMcode type, general multi error code. *NOTE* that this only
* returns errors etc regarding the whole multi stack. There might
* still have occurred problems on invidual transfers even when this
* returns OK.
*/
CURL_EXTERN CURLMcode curl_multi_perform(CURLM *multi_handle,
int *running_handles);
/*
* Name: curl_multi_cleanup()
*
* Desc: Cleans up and removes a whole multi stack. It does not free or
* touch any individual easy handles in any way. We need to define
* in what state those handles will be if this function is called
* in the middle of a transfer.
*
* Returns: CURLMcode type, general multi error code.
*/
CURL_EXTERN CURLMcode curl_multi_cleanup(CURLM *multi_handle);
/*
* Name: curl_multi_info_read()
*
* Desc: Ask the multi handle if there's any messages/informationals from
* the individual transfers. Messages include informationals such as
* error code from the transfer or just the fact that a transfer is
* completed. More details on these should be written down as well.
*
* Repeated calls to this function will return a new struct each
* time, until a special "end of msgs" struct is returned as a signal
* that there is no more to get at this point.
*
* The data the returned pointer points to will not survive calling
* curl_multi_cleanup().
*
* The 'CURLMsg' struct is meant to be very simple and only contain
* very basic informations. If more involved information is wanted,
* we will provide the particular "transfer handle" in that struct
* and that should/could/would be used in subsequent
* curl_easy_getinfo() calls (or similar). The point being that we
* must never expose complex structs to applications, as then we'll
* undoubtably get backwards compatibility problems in the future.
*
* Returns: A pointer to a filled-in struct, or NULL if it failed or ran out
* of structs. It also writes the number of messages left in the
* queue (after this read) in the integer the second argument points
* to.
*/
CURL_EXTERN CURLMsg *curl_multi_info_read(CURLM *multi_handle,
int *msgs_in_queue);
/*
* Name: curl_multi_strerror()
*
* Desc: The curl_multi_strerror function may be used to turn a CURLMcode
* value into the equivalent human readable error string. This is
* useful for printing meaningful error messages.
*
* Returns: A pointer to a zero-terminated error message.
*/
CURL_EXTERN const char *curl_multi_strerror(CURLMcode);
/*
* Name: curl_multi_socket() and
* curl_multi_socket_all()
*
* Desc: An alternative version of curl_multi_perform() that allows the
* application to pass in one of the file descriptors that have been
* detected to have "action" on them and let libcurl perform.
* See man page for details.
*/
#define CURL_POLL_NONE 0
#define CURL_POLL_IN 1
#define CURL_POLL_OUT 2
#define CURL_POLL_INOUT 3
#define CURL_POLL_REMOVE 4
#define CURL_SOCKET_TIMEOUT CURL_SOCKET_BAD
#define CURL_CSELECT_IN 0x01
#define CURL_CSELECT_OUT 0x02
#define CURL_CSELECT_ERR 0x04
typedef int (*curl_socket_callback)(CURL *easy, /* easy handle */
curl_socket_t s, /* socket */
int what, /* see above */
void *userp, /* private callback
pointer */
void *socketp); /* private socket
pointer */
/*
* Name: curl_multi_timer_callback
*
* Desc: Called by libcurl whenever the library detects a change in the
* maximum number of milliseconds the app is allowed to wait before
* curl_multi_socket() or curl_multi_perform() must be called
* (to allow libcurl's timed events to take place).
*
* Returns: The callback should return zero.
*/
typedef int (*curl_multi_timer_callback)(CURLM *multi, /* multi handle */
long timeout_ms, /* see above */
void *userp); /* private callback
pointer */
CURL_EXTERN CURLMcode curl_multi_socket(CURLM *multi_handle, curl_socket_t s,
int *running_handles);
CURL_EXTERN CURLMcode curl_multi_socket_action(CURLM *multi_handle,
curl_socket_t s,
int ev_bitmask,
int *running_handles);
CURL_EXTERN CURLMcode curl_multi_socket_all(CURLM *multi_handle,
int *running_handles);
#ifndef CURL_ALLOW_OLD_MULTI_SOCKET
/* This macro below was added in 7.16.3 to push users who recompile to use
the new curl_multi_socket_action() instead of the old curl_multi_socket()
*/
#define curl_multi_socket(x,y,z) curl_multi_socket_action(x,y,0,z)
#endif
/*
* Name: curl_multi_timeout()
*
* Desc: Returns the maximum number of milliseconds the app is allowed to
* wait before curl_multi_socket() or curl_multi_perform() must be
* called (to allow libcurl's timed events to take place).
*
* Returns: CURLM error code.
*/
CURL_EXTERN CURLMcode curl_multi_timeout(CURLM *multi_handle,
long *milliseconds);
#undef CINIT /* re-using the same name as in curl.h */
#ifdef CURL_ISOCPP
#define CINIT(name,type,num) CURLMOPT_ ## name = CURLOPTTYPE_ ## type + num
#else
/* The macro "##" is ISO C, we assume pre-ISO C doesn't support it. */
#define LONG CURLOPTTYPE_LONG
#define OBJECTPOINT CURLOPTTYPE_OBJECTPOINT
#define FUNCTIONPOINT CURLOPTTYPE_FUNCTIONPOINT
#define OFF_T CURLOPTTYPE_OFF_T
#define CINIT(name,type,number) CURLMOPT_/**/name = type + number
#endif
typedef enum {
/* This is the socket callback function pointer */
CINIT(SOCKETFUNCTION, FUNCTIONPOINT, 1),
/* This is the argument passed to the socket callback */
CINIT(SOCKETDATA, OBJECTPOINT, 2),
/* set to 1 to enable pipelining for this multi handle */
CINIT(PIPELINING, LONG, 3),
/* This is the timer callback function pointer */
CINIT(TIMERFUNCTION, FUNCTIONPOINT, 4),
/* This is the argument passed to the timer callback */
CINIT(TIMERDATA, OBJECTPOINT, 5),
/* maximum number of entries in the connection cache */
CINIT(MAXCONNECTS, LONG, 6),
/* maximum number of (pipelining) connections to one host */
CINIT(MAX_HOST_CONNECTIONS, LONG, 7),
/* maximum number of requests in a pipeline */
CINIT(MAX_PIPELINE_LENGTH, LONG, 8),
/* a connection with a content-length longer than this
will not be considered for pipelining */
CINIT(CONTENT_LENGTH_PENALTY_SIZE, OFF_T, 9),
/* a connection with a chunk length longer than this
will not be considered for pipelining */
CINIT(CHUNK_LENGTH_PENALTY_SIZE, OFF_T, 10),
/* a list of site names(+port) that are blacklisted from
pipelining */
CINIT(PIPELINING_SITE_BL, OBJECTPOINT, 11),
/* a list of server types that are blacklisted from
pipelining */
CINIT(PIPELINING_SERVER_BL, OBJECTPOINT, 12),
/* maximum number of open connections in total */
CINIT(MAX_TOTAL_CONNECTIONS, LONG, 13),
/* This is the server push callback function pointer */
CINIT(PUSHFUNCTION, FUNCTIONPOINT, 14),
/* This is the argument passed to the server push callback */
CINIT(PUSHDATA, OBJECTPOINT, 15),
CURLMOPT_LASTENTRY /* the last unused */
} CURLMoption;
/*
* Name: curl_multi_setopt()
*
* Desc: Sets options for the multi handle.
*
* Returns: CURLM error code.
*/
CURL_EXTERN CURLMcode curl_multi_setopt(CURLM *multi_handle,
CURLMoption option, ...);
/*
* Name: curl_multi_assign()
*
* Desc: This function sets an association in the multi handle between the
* given socket and a private pointer of the application. This is
* (only) useful for curl_multi_socket uses.
*
* Returns: CURLM error code.
*/
CURL_EXTERN CURLMcode curl_multi_assign(CURLM *multi_handle,
curl_socket_t sockfd, void *sockp);
/*
* Name: curl_push_callback
*
* Desc: This callback gets called when a new stream is being pushed by the
* server. It approves or denies the new stream.
*
* Returns: CURL_PUSH_OK or CURL_PUSH_DENY.
*/
#define CURL_PUSH_OK 0
#define CURL_PUSH_DENY 1
struct curl_pushheaders; /* forward declaration only */
CURL_EXTERN char *curl_pushheader_bynum(struct curl_pushheaders *h,
size_t num);
CURL_EXTERN char *curl_pushheader_byname(struct curl_pushheaders *h,
const char *name);
typedef int (*curl_push_callback)(CURL *parent,
CURL *easy,
size_t num_headers,
struct curl_pushheaders *headers,
void *userp);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif

33
deps/libcurl/include/curl/stdcheaders.h vendored Normal file
View File

@ -0,0 +1,33 @@
#ifndef __STDC_HEADERS_H
#define __STDC_HEADERS_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2010, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#include <sys/types.h>
size_t fread (void *, size_t, size_t, FILE *);
size_t fwrite (const void *, size_t, size_t, FILE *);
int strcasecmp(const char *, const char *);
int strncasecmp(const char *, const char *, size_t);
#endif /* __STDC_HEADERS_H */

View File

@ -0,0 +1,622 @@
#ifndef __CURL_TYPECHECK_GCC_H
#define __CURL_TYPECHECK_GCC_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* wraps curl_easy_setopt() with typechecking */
/* To add a new kind of warning, add an
* if(_curl_is_sometype_option(_curl_opt))
* if(!_curl_is_sometype(value))
* _curl_easy_setopt_err_sometype();
* block and define _curl_is_sometype_option, _curl_is_sometype and
* _curl_easy_setopt_err_sometype below
*
* NOTE: We use two nested 'if' statements here instead of the && operator, in
* order to work around gcc bug #32061. It affects only gcc 4.3.x/4.4.x
* when compiling with -Wlogical-op.
*
* To add an option that uses the same type as an existing option, you'll just
* need to extend the appropriate _curl_*_option macro
*/
#define curl_easy_setopt(handle, option, value) \
__extension__ ({ \
__typeof__ (option) _curl_opt = option; \
if(__builtin_constant_p(_curl_opt)) { \
if(_curl_is_long_option(_curl_opt)) \
if(!_curl_is_long(value)) \
_curl_easy_setopt_err_long(); \
if(_curl_is_off_t_option(_curl_opt)) \
if(!_curl_is_off_t(value)) \
_curl_easy_setopt_err_curl_off_t(); \
if(_curl_is_string_option(_curl_opt)) \
if(!_curl_is_string(value)) \
_curl_easy_setopt_err_string(); \
if(_curl_is_write_cb_option(_curl_opt)) \
if(!_curl_is_write_cb(value)) \
_curl_easy_setopt_err_write_callback(); \
if((_curl_opt) == CURLOPT_READFUNCTION) \
if(!_curl_is_read_cb(value)) \
_curl_easy_setopt_err_read_cb(); \
if((_curl_opt) == CURLOPT_IOCTLFUNCTION) \
if(!_curl_is_ioctl_cb(value)) \
_curl_easy_setopt_err_ioctl_cb(); \
if((_curl_opt) == CURLOPT_SOCKOPTFUNCTION) \
if(!_curl_is_sockopt_cb(value)) \
_curl_easy_setopt_err_sockopt_cb(); \
if((_curl_opt) == CURLOPT_OPENSOCKETFUNCTION) \
if(!_curl_is_opensocket_cb(value)) \
_curl_easy_setopt_err_opensocket_cb(); \
if((_curl_opt) == CURLOPT_PROGRESSFUNCTION) \
if(!_curl_is_progress_cb(value)) \
_curl_easy_setopt_err_progress_cb(); \
if((_curl_opt) == CURLOPT_DEBUGFUNCTION) \
if(!_curl_is_debug_cb(value)) \
_curl_easy_setopt_err_debug_cb(); \
if((_curl_opt) == CURLOPT_SSL_CTX_FUNCTION) \
if(!_curl_is_ssl_ctx_cb(value)) \
_curl_easy_setopt_err_ssl_ctx_cb(); \
if(_curl_is_conv_cb_option(_curl_opt)) \
if(!_curl_is_conv_cb(value)) \
_curl_easy_setopt_err_conv_cb(); \
if((_curl_opt) == CURLOPT_SEEKFUNCTION) \
if(!_curl_is_seek_cb(value)) \
_curl_easy_setopt_err_seek_cb(); \
if(_curl_is_cb_data_option(_curl_opt)) \
if(!_curl_is_cb_data(value)) \
_curl_easy_setopt_err_cb_data(); \
if((_curl_opt) == CURLOPT_ERRORBUFFER) \
if(!_curl_is_error_buffer(value)) \
_curl_easy_setopt_err_error_buffer(); \
if((_curl_opt) == CURLOPT_STDERR) \
if(!_curl_is_FILE(value)) \
_curl_easy_setopt_err_FILE(); \
if(_curl_is_postfields_option(_curl_opt)) \
if(!_curl_is_postfields(value)) \
_curl_easy_setopt_err_postfields(); \
if((_curl_opt) == CURLOPT_HTTPPOST) \
if(!_curl_is_arr((value), struct curl_httppost)) \
_curl_easy_setopt_err_curl_httpost(); \
if(_curl_is_slist_option(_curl_opt)) \
if(!_curl_is_arr((value), struct curl_slist)) \
_curl_easy_setopt_err_curl_slist(); \
if((_curl_opt) == CURLOPT_SHARE) \
if(!_curl_is_ptr((value), CURLSH)) \
_curl_easy_setopt_err_CURLSH(); \
} \
curl_easy_setopt(handle, _curl_opt, value); \
})
/* wraps curl_easy_getinfo() with typechecking */
/* FIXME: don't allow const pointers */
#define curl_easy_getinfo(handle, info, arg) \
__extension__ ({ \
__typeof__ (info) _curl_info = info; \
if(__builtin_constant_p(_curl_info)) { \
if(_curl_is_string_info(_curl_info)) \
if(!_curl_is_arr((arg), char *)) \
_curl_easy_getinfo_err_string(); \
if(_curl_is_long_info(_curl_info)) \
if(!_curl_is_arr((arg), long)) \
_curl_easy_getinfo_err_long(); \
if(_curl_is_double_info(_curl_info)) \
if(!_curl_is_arr((arg), double)) \
_curl_easy_getinfo_err_double(); \
if(_curl_is_slist_info(_curl_info)) \
if(!_curl_is_arr((arg), struct curl_slist *)) \
_curl_easy_getinfo_err_curl_slist(); \
} \
curl_easy_getinfo(handle, _curl_info, arg); \
})
/* TODO: typechecking for curl_share_setopt() and curl_multi_setopt(),
* for now just make sure that the functions are called with three
* arguments
*/
#define curl_share_setopt(share,opt,param) curl_share_setopt(share,opt,param)
#define curl_multi_setopt(handle,opt,param) curl_multi_setopt(handle,opt,param)
/* the actual warnings, triggered by calling the _curl_easy_setopt_err*
* functions */
/* To define a new warning, use _CURL_WARNING(identifier, "message") */
#define _CURL_WARNING(id, message) \
static void __attribute__((__warning__(message))) \
__attribute__((__unused__)) __attribute__((__noinline__)) \
id(void) { __asm__(""); }
_CURL_WARNING(_curl_easy_setopt_err_long,
"curl_easy_setopt expects a long argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_curl_off_t,
"curl_easy_setopt expects a curl_off_t argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_string,
"curl_easy_setopt expects a "
"string (char* or char[]) argument for this option"
)
_CURL_WARNING(_curl_easy_setopt_err_write_callback,
"curl_easy_setopt expects a curl_write_callback argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_read_cb,
"curl_easy_setopt expects a curl_read_callback argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_ioctl_cb,
"curl_easy_setopt expects a curl_ioctl_callback argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_sockopt_cb,
"curl_easy_setopt expects a curl_sockopt_callback argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_opensocket_cb,
"curl_easy_setopt expects a "
"curl_opensocket_callback argument for this option"
)
_CURL_WARNING(_curl_easy_setopt_err_progress_cb,
"curl_easy_setopt expects a curl_progress_callback argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_debug_cb,
"curl_easy_setopt expects a curl_debug_callback argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_ssl_ctx_cb,
"curl_easy_setopt expects a curl_ssl_ctx_callback argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_conv_cb,
"curl_easy_setopt expects a curl_conv_callback argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_seek_cb,
"curl_easy_setopt expects a curl_seek_callback argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_cb_data,
"curl_easy_setopt expects a "
"private data pointer as argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_error_buffer,
"curl_easy_setopt expects a "
"char buffer of CURL_ERROR_SIZE as argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_FILE,
"curl_easy_setopt expects a FILE* argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_postfields,
"curl_easy_setopt expects a void* or char* argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_curl_httpost,
"curl_easy_setopt expects a struct curl_httppost* argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_curl_slist,
"curl_easy_setopt expects a struct curl_slist* argument for this option")
_CURL_WARNING(_curl_easy_setopt_err_CURLSH,
"curl_easy_setopt expects a CURLSH* argument for this option")
_CURL_WARNING(_curl_easy_getinfo_err_string,
"curl_easy_getinfo expects a pointer to char * for this info")
_CURL_WARNING(_curl_easy_getinfo_err_long,
"curl_easy_getinfo expects a pointer to long for this info")
_CURL_WARNING(_curl_easy_getinfo_err_double,
"curl_easy_getinfo expects a pointer to double for this info")
_CURL_WARNING(_curl_easy_getinfo_err_curl_slist,
"curl_easy_getinfo expects a pointer to struct curl_slist * for this info")
/* groups of curl_easy_setops options that take the same type of argument */
/* To add a new option to one of the groups, just add
* (option) == CURLOPT_SOMETHING
* to the or-expression. If the option takes a long or curl_off_t, you don't
* have to do anything
*/
/* evaluates to true if option takes a long argument */
#define _curl_is_long_option(option) \
(0 < (option) && (option) < CURLOPTTYPE_OBJECTPOINT)
#define _curl_is_off_t_option(option) \
((option) > CURLOPTTYPE_OFF_T)
/* evaluates to true if option takes a char* argument */
#define _curl_is_string_option(option) \
((option) == CURLOPT_ACCEPT_ENCODING || \
(option) == CURLOPT_CAINFO || \
(option) == CURLOPT_CAPATH || \
(option) == CURLOPT_COOKIE || \
(option) == CURLOPT_COOKIEFILE || \
(option) == CURLOPT_COOKIEJAR || \
(option) == CURLOPT_COOKIELIST || \
(option) == CURLOPT_CRLFILE || \
(option) == CURLOPT_CUSTOMREQUEST || \
(option) == CURLOPT_DEFAULT_PROTOCOL || \
(option) == CURLOPT_DNS_INTERFACE || \
(option) == CURLOPT_DNS_LOCAL_IP4 || \
(option) == CURLOPT_DNS_LOCAL_IP6 || \
(option) == CURLOPT_DNS_SERVERS || \
(option) == CURLOPT_EGDSOCKET || \
(option) == CURLOPT_FTPPORT || \
(option) == CURLOPT_FTP_ACCOUNT || \
(option) == CURLOPT_FTP_ALTERNATIVE_TO_USER || \
(option) == CURLOPT_INTERFACE || \
(option) == CURLOPT_ISSUERCERT || \
(option) == CURLOPT_KEYPASSWD || \
(option) == CURLOPT_KRBLEVEL || \
(option) == CURLOPT_LOGIN_OPTIONS || \
(option) == CURLOPT_MAIL_AUTH || \
(option) == CURLOPT_MAIL_FROM || \
(option) == CURLOPT_NETRC_FILE || \
(option) == CURLOPT_NOPROXY || \
(option) == CURLOPT_PASSWORD || \
(option) == CURLOPT_PINNEDPUBLICKEY || \
(option) == CURLOPT_PROXY || \
(option) == CURLOPT_PROXYPASSWORD || \
(option) == CURLOPT_PROXYUSERNAME || \
(option) == CURLOPT_PROXYUSERPWD || \
(option) == CURLOPT_PROXY_SERVICE_NAME || \
(option) == CURLOPT_RANDOM_FILE || \
(option) == CURLOPT_RANGE || \
(option) == CURLOPT_REFERER || \
(option) == CURLOPT_RTSP_SESSION_ID || \
(option) == CURLOPT_RTSP_STREAM_URI || \
(option) == CURLOPT_RTSP_TRANSPORT || \
(option) == CURLOPT_SERVICE_NAME || \
(option) == CURLOPT_SOCKS5_GSSAPI_SERVICE || \
(option) == CURLOPT_SSH_HOST_PUBLIC_KEY_MD5 || \
(option) == CURLOPT_SSH_KNOWNHOSTS || \
(option) == CURLOPT_SSH_PRIVATE_KEYFILE || \
(option) == CURLOPT_SSH_PUBLIC_KEYFILE || \
(option) == CURLOPT_SSLCERT || \
(option) == CURLOPT_SSLCERTTYPE || \
(option) == CURLOPT_SSLENGINE || \
(option) == CURLOPT_SSLKEY || \
(option) == CURLOPT_SSLKEYTYPE || \
(option) == CURLOPT_SSL_CIPHER_LIST || \
(option) == CURLOPT_TLSAUTH_PASSWORD || \
(option) == CURLOPT_TLSAUTH_TYPE || \
(option) == CURLOPT_TLSAUTH_USERNAME || \
(option) == CURLOPT_UNIX_SOCKET_PATH || \
(option) == CURLOPT_URL || \
(option) == CURLOPT_USERAGENT || \
(option) == CURLOPT_USERNAME || \
(option) == CURLOPT_USERPWD || \
(option) == CURLOPT_XOAUTH2_BEARER || \
0)
/* evaluates to true if option takes a curl_write_callback argument */
#define _curl_is_write_cb_option(option) \
((option) == CURLOPT_HEADERFUNCTION || \
(option) == CURLOPT_WRITEFUNCTION)
/* evaluates to true if option takes a curl_conv_callback argument */
#define _curl_is_conv_cb_option(option) \
((option) == CURLOPT_CONV_TO_NETWORK_FUNCTION || \
(option) == CURLOPT_CONV_FROM_NETWORK_FUNCTION || \
(option) == CURLOPT_CONV_FROM_UTF8_FUNCTION)
/* evaluates to true if option takes a data argument to pass to a callback */
#define _curl_is_cb_data_option(option) \
((option) == CURLOPT_CHUNK_DATA || \
(option) == CURLOPT_CLOSESOCKETDATA || \
(option) == CURLOPT_DEBUGDATA || \
(option) == CURLOPT_FNMATCH_DATA || \
(option) == CURLOPT_HEADERDATA || \
(option) == CURLOPT_INTERLEAVEDATA || \
(option) == CURLOPT_IOCTLDATA || \
(option) == CURLOPT_OPENSOCKETDATA || \
(option) == CURLOPT_PRIVATE || \
(option) == CURLOPT_PROGRESSDATA || \
(option) == CURLOPT_READDATA || \
(option) == CURLOPT_SEEKDATA || \
(option) == CURLOPT_SOCKOPTDATA || \
(option) == CURLOPT_SSH_KEYDATA || \
(option) == CURLOPT_SSL_CTX_DATA || \
(option) == CURLOPT_WRITEDATA || \
0)
/* evaluates to true if option takes a POST data argument (void* or char*) */
#define _curl_is_postfields_option(option) \
((option) == CURLOPT_POSTFIELDS || \
(option) == CURLOPT_COPYPOSTFIELDS || \
0)
/* evaluates to true if option takes a struct curl_slist * argument */
#define _curl_is_slist_option(option) \
((option) == CURLOPT_HTTP200ALIASES || \
(option) == CURLOPT_HTTPHEADER || \
(option) == CURLOPT_MAIL_RCPT || \
(option) == CURLOPT_POSTQUOTE || \
(option) == CURLOPT_PREQUOTE || \
(option) == CURLOPT_PROXYHEADER || \
(option) == CURLOPT_QUOTE || \
(option) == CURLOPT_RESOLVE || \
(option) == CURLOPT_TELNETOPTIONS || \
0)
/* groups of curl_easy_getinfo infos that take the same type of argument */
/* evaluates to true if info expects a pointer to char * argument */
#define _curl_is_string_info(info) \
(CURLINFO_STRING < (info) && (info) < CURLINFO_LONG)
/* evaluates to true if info expects a pointer to long argument */
#define _curl_is_long_info(info) \
(CURLINFO_LONG < (info) && (info) < CURLINFO_DOUBLE)
/* evaluates to true if info expects a pointer to double argument */
#define _curl_is_double_info(info) \
(CURLINFO_DOUBLE < (info) && (info) < CURLINFO_SLIST)
/* true if info expects a pointer to struct curl_slist * argument */
#define _curl_is_slist_info(info) \
(CURLINFO_SLIST < (info))
/* typecheck helpers -- check whether given expression has requested type*/
/* For pointers, you can use the _curl_is_ptr/_curl_is_arr macros,
* otherwise define a new macro. Search for __builtin_types_compatible_p
* in the GCC manual.
* NOTE: these macros MUST NOT EVALUATE their arguments! The argument is
* the actual expression passed to the curl_easy_setopt macro. This
* means that you can only apply the sizeof and __typeof__ operators, no
* == or whatsoever.
*/
/* XXX: should evaluate to true iff expr is a pointer */
#define _curl_is_any_ptr(expr) \
(sizeof(expr) == sizeof(void*))
/* evaluates to true if expr is NULL */
/* XXX: must not evaluate expr, so this check is not accurate */
#define _curl_is_NULL(expr) \
(__builtin_types_compatible_p(__typeof__(expr), __typeof__(NULL)))
/* evaluates to true if expr is type*, const type* or NULL */
#define _curl_is_ptr(expr, type) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), type *) || \
__builtin_types_compatible_p(__typeof__(expr), const type *))
/* evaluates to true if expr is one of type[], type*, NULL or const type* */
#define _curl_is_arr(expr, type) \
(_curl_is_ptr((expr), type) || \
__builtin_types_compatible_p(__typeof__(expr), type []))
/* evaluates to true if expr is a string */
#define _curl_is_string(expr) \
(_curl_is_arr((expr), char) || \
_curl_is_arr((expr), signed char) || \
_curl_is_arr((expr), unsigned char))
/* evaluates to true if expr is a long (no matter the signedness)
* XXX: for now, int is also accepted (and therefore short and char, which
* are promoted to int when passed to a variadic function) */
#define _curl_is_long(expr) \
(__builtin_types_compatible_p(__typeof__(expr), long) || \
__builtin_types_compatible_p(__typeof__(expr), signed long) || \
__builtin_types_compatible_p(__typeof__(expr), unsigned long) || \
__builtin_types_compatible_p(__typeof__(expr), int) || \
__builtin_types_compatible_p(__typeof__(expr), signed int) || \
__builtin_types_compatible_p(__typeof__(expr), unsigned int) || \
__builtin_types_compatible_p(__typeof__(expr), short) || \
__builtin_types_compatible_p(__typeof__(expr), signed short) || \
__builtin_types_compatible_p(__typeof__(expr), unsigned short) || \
__builtin_types_compatible_p(__typeof__(expr), char) || \
__builtin_types_compatible_p(__typeof__(expr), signed char) || \
__builtin_types_compatible_p(__typeof__(expr), unsigned char))
/* evaluates to true if expr is of type curl_off_t */
#define _curl_is_off_t(expr) \
(__builtin_types_compatible_p(__typeof__(expr), curl_off_t))
/* evaluates to true if expr is abuffer suitable for CURLOPT_ERRORBUFFER */
/* XXX: also check size of an char[] array? */
#define _curl_is_error_buffer(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), char *) || \
__builtin_types_compatible_p(__typeof__(expr), char[]))
/* evaluates to true if expr is of type (const) void* or (const) FILE* */
#if 0
#define _curl_is_cb_data(expr) \
(_curl_is_ptr((expr), void) || \
_curl_is_ptr((expr), FILE))
#else /* be less strict */
#define _curl_is_cb_data(expr) \
_curl_is_any_ptr(expr)
#endif
/* evaluates to true if expr is of type FILE* */
#define _curl_is_FILE(expr) \
(__builtin_types_compatible_p(__typeof__(expr), FILE *))
/* evaluates to true if expr can be passed as POST data (void* or char*) */
#define _curl_is_postfields(expr) \
(_curl_is_ptr((expr), void) || \
_curl_is_arr((expr), char))
/* FIXME: the whole callback checking is messy...
* The idea is to tolerate char vs. void and const vs. not const
* pointers in arguments at least
*/
/* helper: __builtin_types_compatible_p distinguishes between functions and
* function pointers, hide it */
#define _curl_callback_compatible(func, type) \
(__builtin_types_compatible_p(__typeof__(func), type) || \
__builtin_types_compatible_p(__typeof__(func), type*))
/* evaluates to true if expr is of type curl_read_callback or "similar" */
#define _curl_is_read_cb(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), __typeof__(fread)) || \
__builtin_types_compatible_p(__typeof__(expr), curl_read_callback) || \
_curl_callback_compatible((expr), _curl_read_callback1) || \
_curl_callback_compatible((expr), _curl_read_callback2) || \
_curl_callback_compatible((expr), _curl_read_callback3) || \
_curl_callback_compatible((expr), _curl_read_callback4) || \
_curl_callback_compatible((expr), _curl_read_callback5) || \
_curl_callback_compatible((expr), _curl_read_callback6))
typedef size_t (_curl_read_callback1)(char *, size_t, size_t, void*);
typedef size_t (_curl_read_callback2)(char *, size_t, size_t, const void*);
typedef size_t (_curl_read_callback3)(char *, size_t, size_t, FILE*);
typedef size_t (_curl_read_callback4)(void *, size_t, size_t, void*);
typedef size_t (_curl_read_callback5)(void *, size_t, size_t, const void*);
typedef size_t (_curl_read_callback6)(void *, size_t, size_t, FILE*);
/* evaluates to true if expr is of type curl_write_callback or "similar" */
#define _curl_is_write_cb(expr) \
(_curl_is_read_cb(expr) || \
__builtin_types_compatible_p(__typeof__(expr), __typeof__(fwrite)) || \
__builtin_types_compatible_p(__typeof__(expr), curl_write_callback) || \
_curl_callback_compatible((expr), _curl_write_callback1) || \
_curl_callback_compatible((expr), _curl_write_callback2) || \
_curl_callback_compatible((expr), _curl_write_callback3) || \
_curl_callback_compatible((expr), _curl_write_callback4) || \
_curl_callback_compatible((expr), _curl_write_callback5) || \
_curl_callback_compatible((expr), _curl_write_callback6))
typedef size_t (_curl_write_callback1)(const char *, size_t, size_t, void*);
typedef size_t (_curl_write_callback2)(const char *, size_t, size_t,
const void*);
typedef size_t (_curl_write_callback3)(const char *, size_t, size_t, FILE*);
typedef size_t (_curl_write_callback4)(const void *, size_t, size_t, void*);
typedef size_t (_curl_write_callback5)(const void *, size_t, size_t,
const void*);
typedef size_t (_curl_write_callback6)(const void *, size_t, size_t, FILE*);
/* evaluates to true if expr is of type curl_ioctl_callback or "similar" */
#define _curl_is_ioctl_cb(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), curl_ioctl_callback) || \
_curl_callback_compatible((expr), _curl_ioctl_callback1) || \
_curl_callback_compatible((expr), _curl_ioctl_callback2) || \
_curl_callback_compatible((expr), _curl_ioctl_callback3) || \
_curl_callback_compatible((expr), _curl_ioctl_callback4))
typedef curlioerr (_curl_ioctl_callback1)(CURL *, int, void*);
typedef curlioerr (_curl_ioctl_callback2)(CURL *, int, const void*);
typedef curlioerr (_curl_ioctl_callback3)(CURL *, curliocmd, void*);
typedef curlioerr (_curl_ioctl_callback4)(CURL *, curliocmd, const void*);
/* evaluates to true if expr is of type curl_sockopt_callback or "similar" */
#define _curl_is_sockopt_cb(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), curl_sockopt_callback) || \
_curl_callback_compatible((expr), _curl_sockopt_callback1) || \
_curl_callback_compatible((expr), _curl_sockopt_callback2))
typedef int (_curl_sockopt_callback1)(void *, curl_socket_t, curlsocktype);
typedef int (_curl_sockopt_callback2)(const void *, curl_socket_t,
curlsocktype);
/* evaluates to true if expr is of type curl_opensocket_callback or
"similar" */
#define _curl_is_opensocket_cb(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), curl_opensocket_callback) ||\
_curl_callback_compatible((expr), _curl_opensocket_callback1) || \
_curl_callback_compatible((expr), _curl_opensocket_callback2) || \
_curl_callback_compatible((expr), _curl_opensocket_callback3) || \
_curl_callback_compatible((expr), _curl_opensocket_callback4))
typedef curl_socket_t (_curl_opensocket_callback1)
(void *, curlsocktype, struct curl_sockaddr *);
typedef curl_socket_t (_curl_opensocket_callback2)
(void *, curlsocktype, const struct curl_sockaddr *);
typedef curl_socket_t (_curl_opensocket_callback3)
(const void *, curlsocktype, struct curl_sockaddr *);
typedef curl_socket_t (_curl_opensocket_callback4)
(const void *, curlsocktype, const struct curl_sockaddr *);
/* evaluates to true if expr is of type curl_progress_callback or "similar" */
#define _curl_is_progress_cb(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), curl_progress_callback) || \
_curl_callback_compatible((expr), _curl_progress_callback1) || \
_curl_callback_compatible((expr), _curl_progress_callback2))
typedef int (_curl_progress_callback1)(void *,
double, double, double, double);
typedef int (_curl_progress_callback2)(const void *,
double, double, double, double);
/* evaluates to true if expr is of type curl_debug_callback or "similar" */
#define _curl_is_debug_cb(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), curl_debug_callback) || \
_curl_callback_compatible((expr), _curl_debug_callback1) || \
_curl_callback_compatible((expr), _curl_debug_callback2) || \
_curl_callback_compatible((expr), _curl_debug_callback3) || \
_curl_callback_compatible((expr), _curl_debug_callback4) || \
_curl_callback_compatible((expr), _curl_debug_callback5) || \
_curl_callback_compatible((expr), _curl_debug_callback6) || \
_curl_callback_compatible((expr), _curl_debug_callback7) || \
_curl_callback_compatible((expr), _curl_debug_callback8))
typedef int (_curl_debug_callback1) (CURL *,
curl_infotype, char *, size_t, void *);
typedef int (_curl_debug_callback2) (CURL *,
curl_infotype, char *, size_t, const void *);
typedef int (_curl_debug_callback3) (CURL *,
curl_infotype, const char *, size_t, void *);
typedef int (_curl_debug_callback4) (CURL *,
curl_infotype, const char *, size_t, const void *);
typedef int (_curl_debug_callback5) (CURL *,
curl_infotype, unsigned char *, size_t, void *);
typedef int (_curl_debug_callback6) (CURL *,
curl_infotype, unsigned char *, size_t, const void *);
typedef int (_curl_debug_callback7) (CURL *,
curl_infotype, const unsigned char *, size_t, void *);
typedef int (_curl_debug_callback8) (CURL *,
curl_infotype, const unsigned char *, size_t, const void *);
/* evaluates to true if expr is of type curl_ssl_ctx_callback or "similar" */
/* this is getting even messier... */
#define _curl_is_ssl_ctx_cb(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), curl_ssl_ctx_callback) || \
_curl_callback_compatible((expr), _curl_ssl_ctx_callback1) || \
_curl_callback_compatible((expr), _curl_ssl_ctx_callback2) || \
_curl_callback_compatible((expr), _curl_ssl_ctx_callback3) || \
_curl_callback_compatible((expr), _curl_ssl_ctx_callback4) || \
_curl_callback_compatible((expr), _curl_ssl_ctx_callback5) || \
_curl_callback_compatible((expr), _curl_ssl_ctx_callback6) || \
_curl_callback_compatible((expr), _curl_ssl_ctx_callback7) || \
_curl_callback_compatible((expr), _curl_ssl_ctx_callback8))
typedef CURLcode (_curl_ssl_ctx_callback1)(CURL *, void *, void *);
typedef CURLcode (_curl_ssl_ctx_callback2)(CURL *, void *, const void *);
typedef CURLcode (_curl_ssl_ctx_callback3)(CURL *, const void *, void *);
typedef CURLcode (_curl_ssl_ctx_callback4)(CURL *, const void *, const void *);
#ifdef HEADER_SSL_H
/* hack: if we included OpenSSL's ssl.h, we know about SSL_CTX
* this will of course break if we're included before OpenSSL headers...
*/
typedef CURLcode (_curl_ssl_ctx_callback5)(CURL *, SSL_CTX, void *);
typedef CURLcode (_curl_ssl_ctx_callback6)(CURL *, SSL_CTX, const void *);
typedef CURLcode (_curl_ssl_ctx_callback7)(CURL *, const SSL_CTX, void *);
typedef CURLcode (_curl_ssl_ctx_callback8)(CURL *, const SSL_CTX,
const void *);
#else
typedef _curl_ssl_ctx_callback1 _curl_ssl_ctx_callback5;
typedef _curl_ssl_ctx_callback1 _curl_ssl_ctx_callback6;
typedef _curl_ssl_ctx_callback1 _curl_ssl_ctx_callback7;
typedef _curl_ssl_ctx_callback1 _curl_ssl_ctx_callback8;
#endif
/* evaluates to true if expr is of type curl_conv_callback or "similar" */
#define _curl_is_conv_cb(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), curl_conv_callback) || \
_curl_callback_compatible((expr), _curl_conv_callback1) || \
_curl_callback_compatible((expr), _curl_conv_callback2) || \
_curl_callback_compatible((expr), _curl_conv_callback3) || \
_curl_callback_compatible((expr), _curl_conv_callback4))
typedef CURLcode (*_curl_conv_callback1)(char *, size_t length);
typedef CURLcode (*_curl_conv_callback2)(const char *, size_t length);
typedef CURLcode (*_curl_conv_callback3)(void *, size_t length);
typedef CURLcode (*_curl_conv_callback4)(const void *, size_t length);
/* evaluates to true if expr is of type curl_seek_callback or "similar" */
#define _curl_is_seek_cb(expr) \
(_curl_is_NULL(expr) || \
__builtin_types_compatible_p(__typeof__(expr), curl_seek_callback) || \
_curl_callback_compatible((expr), _curl_seek_callback1) || \
_curl_callback_compatible((expr), _curl_seek_callback2))
typedef CURLcode (*_curl_seek_callback1)(void *, curl_off_t, int);
typedef CURLcode (*_curl_seek_callback2)(const void *, curl_off_t, int);
#endif /* __CURL_TYPECHECK_GCC_H */

BIN
deps/libcurl/lib/libcurl.a vendored Normal file

Binary file not shown.

View File

@ -26,6 +26,7 @@ else
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${cfg_link_dir}/* || :
${csudo} rm -f ${inc_link_dir}/taos.h || :

View File

@ -48,6 +48,7 @@ cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_pat
cp ${compile_dir}/../packaging/tools/post.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/tools/preun.sh ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosdemox ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin

View File

@ -58,6 +58,7 @@ cp %{_compiledir}/../packaging/tools/preun.sh %{buildroot}%{homepath}/scri
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosdemox %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
@ -135,7 +136,8 @@ if [ $1 -eq 0 ];then
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
#${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${cfg_link_dir}/* || :
${csudo} rm -f ${inc_link_dir}/taos.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :

View File

@ -175,6 +175,7 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
@ -186,6 +187,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
[ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/taosdemox ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemox ${bin_link_dir}/taosdemox || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :

View File

@ -86,6 +86,7 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/taos || :
if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
fi
${csudo} rm -f ${bin_link_dir}/rmtaos || :
@ -97,6 +98,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
if [ "$osType" != "Darwin" ]; then
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/taosdemox ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemox ${bin_link_dir}/taosdemox || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
fi
[ -x ${install_main_dir}/bin/remove_client.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client.sh ${bin_link_dir}/rmtaos || :

View File

@ -85,8 +85,9 @@ function install_bin() {
# Remove links
${csudo} rm -f ${bin_link_dir}/power || :
if [ "$osType" != "Darwin" ]; then
${csudo} rm -f ${bin_link_dir}/powerdemo || :
${csudo} rm -f ${bin_link_dir}/powerdump || :
${csudo} rm -f ${bin_link_dir}/powerdemo || :
${csudo} rm -f ${bin_link_dir}/powerdemox || :
${csudo} rm -f ${bin_link_dir}/powerdump || :
fi
${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/set_core || :
@ -97,6 +98,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/power ] && ${csudo} ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || :
if [ "$osType" != "Darwin" ]; then
[ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || :
[ -x ${install_main_dir}/bin/powerdemox ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemox ${bin_link_dir}/powerdemox || :
[ -x ${install_main_dir}/bin/powerdump ] && ${csudo} ln -s ${install_main_dir}/bin/powerdump ${bin_link_dir}/powerdump || :
fi
[ -x ${install_main_dir}/bin/remove_client_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_client_power.sh ${bin_link_dir}/rmpower || :

View File

@ -174,6 +174,7 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/power || :
${csudo} rm -f ${bin_link_dir}/powerd || :
${csudo} rm -f ${bin_link_dir}/powerdemo || :
${csudo} rm -f ${bin_link_dir}/powerdemox || :
${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :
${csudo} rm -f ${bin_link_dir}/set_core || :
@ -184,6 +185,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/power ] && ${csudo} ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || :
[ -x ${install_main_dir}/bin/powerd ] && ${csudo} ln -s ${install_main_dir}/bin/powerd ${bin_link_dir}/powerd || :
[ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || :
[ -x ${install_main_dir}/bin/powerdemox ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemox ${bin_link_dir}/powerdemox || :
[ -x ${install_main_dir}/bin/remove_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_power.sh ${bin_link_dir}/rmpower || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :

View File

@ -45,7 +45,7 @@ if [ "$osType" != "Darwin" ]; then
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taos ${script_dir}/remove_client.sh"
else
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh"
bin_files="${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdemox ${script_dir}/remove_client.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"
else

View File

@ -77,6 +77,7 @@ if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdemox ${install_dir}/bin/powerdemox
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${script_dir}/set_core.sh ${install_dir}/bin
cp ${script_dir}/get_client.sh ${install_dir}/bin

View File

@ -36,7 +36,7 @@ if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taos
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh"
else
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh"
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${build_dir}/bin/taosdump ${build_dir}/bin/taosdemo ${build_dir}/bin/taosdemox ${build_dir}/bin/tarbitrator ${script_dir}/remove.sh ${script_dir}/set_core.sh ${script_dir}/get_client.sh"
fi
lib_files="${build_dir}/lib/libtaos.so.${version}"

View File

@ -77,6 +77,7 @@ else
cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdemox ${install_dir}/bin/powerdemox
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
cp ${script_dir}/set_core.sh ${install_dir}/bin

View File

@ -96,6 +96,8 @@ function install_bin() {
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/set_core || :
@ -105,6 +107,8 @@ function install_bin() {
[ -x ${bin_dir}/taos ] && ${csudo} ln -s ${bin_dir}/taos ${bin_link_dir}/taos || :
[ -x ${bin_dir}/taosd ] && ${csudo} ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || :
[ -x ${bin_dir}/taosdemo ] && ${csudo} ln -s ${bin_dir}/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${bin_dir}/taosdemox ] && ${csudo} ln -s ${bin_dir}/taosdemox ${bin_link_dir}/taosdemox || :
[ -x ${bin_dir}/taosdump ] && ${csudo} ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || :
[ -x ${bin_dir}/set_core.sh ] && ${csudo} ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || :
}

View File

@ -72,6 +72,7 @@ function clean_bin() {
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :

View File

@ -38,6 +38,7 @@ function clean_bin() {
# Remove link
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdemox || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
${csudo} rm -f ${bin_link_dir}/set_core || :

View File

@ -38,6 +38,7 @@ function clean_bin() {
# Remove link
${csudo} rm -f ${bin_link_dir}/power || :
${csudo} rm -f ${bin_link_dir}/powerdemo || :
${csudo} rm -f ${bin_link_dir}/powerdemox || :
${csudo} rm -f ${bin_link_dir}/powerdump || :
${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/set_core || :

View File

@ -72,6 +72,7 @@ function clean_bin() {
${csudo} rm -f ${bin_link_dir}/power || :
${csudo} rm -f ${bin_link_dir}/powerd || :
${csudo} rm -f ${bin_link_dir}/powerdemo || :
${csudo} rm -f ${bin_link_dir}/powerdemox || :
${csudo} rm -f ${bin_link_dir}/powerdump || :
${csudo} rm -f ${bin_link_dir}/rmpower || :
${csudo} rm -f ${bin_link_dir}/tarbitrator || :

View File

@ -224,19 +224,34 @@ static bool bnCheckVgroupReady(SVgObj *pVgroup, SVnodeGid *pRmVnode) {
return false;
}
int32_t rmVnodeVer = 0;
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SVnodeGid *pVnode = pVgroup->vnodeGid + i;
if (pVnode == pRmVnode) {
rmVnodeVer = mnodeGetVgidVer(pVnode->vver);
mTrace("vgId:%d, check vgroup status, vindex:%d dnode:%d status:%s role:%s vver:%d is watching", pVgroup->vgId, i,
pVnode->dnodeId, dnodeStatus[pVnode->pDnode->status], syncRole[pVnode->role], rmVnodeVer);
}
}
bool isReady = false;
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SVnodeGid *pVnode = pVgroup->vnodeGid + i;
if (pVnode == pRmVnode) continue;
int32_t vver = mnodeGetVgidVer(pVnode->vver);
mTrace("vgId:%d, check vgroup status, dnode:%d status:%d, vnode role:%s", pVgroup->vgId, pVnode->pDnode->dnodeId,
pVnode->pDnode->status, syncRole[pVnode->role]);
mTrace("vgId:%d, check vgroup status, vindex:%d dnode:%d status:%s role:%s vver:%d, rmvver:%d" , pVgroup->vgId, i,
pVnode->dnodeId, dnodeStatus[pVnode->pDnode->status], syncRole[pVnode->role], vver, rmVnodeVer);
if (pVnode->pDnode->status == TAOS_DN_STATUS_DROPPING) continue;
if (pVnode->pDnode->status == TAOS_DN_STATUS_OFFLINE) continue;
if (pVnode->role != TAOS_SYNC_ROLE_SLAVE && pVnode->role != TAOS_SYNC_ROLE_MASTER) continue;
if (pVnode->role == TAOS_SYNC_ROLE_SLAVE || pVnode->role == TAOS_SYNC_ROLE_MASTER) {
isReady = true;
if (rmVnodeVer == 0 || vver >= rmVnodeVer) {
mInfo("vgId:%d, is ready for vindex:%d in dnode:%d status:%s role:%s vver:%d larger than rmvver:%d", pVgroup->vgId, i,
pVnode->dnodeId, dnodeStatus[pVnode->pDnode->status], syncRole[pVnode->role], vver, rmVnodeVer);
}
isReady = true;
}
return isReady;
@ -256,7 +271,7 @@ static int32_t bnRemoveVnode(SVgObj *pVgroup) {
mDebug("vgId:%d, is not ready", pVgroup->vgId);
return -1;
} else {
mDebug("vgId:%d, is ready, discard dnode:%d", pVgroup->vgId, pSelVnode->dnodeId);
mInfo("vgId:%d, is ready, discard dnode:%d", pVgroup->vgId, pSelVnode->dnodeId);
bnDiscardVnode(pVgroup, pSelVnode);
return TSDB_CODE_SUCCESS;
}

View File

@ -921,6 +921,10 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin,
*notNullElems = pCtx->size - pCtx->preAggVals.statis.numOfNull;
assert(*notNullElems >= 0);
if (*notNullElems == 0){
return;
}
void * tval = NULL;
int16_t index = 0;

View File

@ -4478,6 +4478,7 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
const char* msg = "illegal value or data overflow";
const char* msg1 = "value is expected";
const char* msg2 = "invalid fill option";
const char* msg3 = "top/bottom not support fill";
if (pItem->pVar.nType != TSDB_DATA_TYPE_BINARY) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
@ -4560,6 +4561,14 @@ int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuery
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
for(int32_t i = 0; i < numOfExprs; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
if (pExpr->functionId == TSDB_FUNC_TOP || pExpr->functionId == TSDB_FUNC_BOTTOM) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
return TSDB_CODE_SUCCESS;
}

View File

@ -2503,7 +2503,7 @@ bool tscSetSqlOwner(SSqlObj* pSql) {
SSqlRes* pRes = &pSql->res;
// set the sql object owner
uint64_t threadId = taosGetPthreadId();
uint64_t threadId = taosGetSelfPthreadId();
if (atomic_val_compare_exchange_64(&pSql->owner, 0, threadId) != 0) {
pRes->code = TSDB_CODE_QRY_IN_EXEC;
return false;

View File

@ -367,11 +367,15 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) {
let offset = 0;
for (let i = 0; i < fields.length; i++) {
pdata = ref.reinterpret(pblock,8,i*8);
pdata = ref.ref(pdata.readPointer());
if (!convertFunctions[fields[i]['type']] ) {
throw new errors.DatabaseError("Invalid data type returned from database");
}
blocks[i] = convertFunctions[fields[i]['type']](pdata, 1, fieldlens[i], offset, isMicro);
if(ref.isNull(pdata.readPointer())){
blocks[i] = new Array();
}else{
pdata = ref.ref(pdata.readPointer());
if (!convertFunctions[fields[i]['type']] ) {
throw new errors.DatabaseError("Invalid data type returned from database");
}
blocks[i] = convertFunctions[fields[i]['type']](pdata, 1, fieldlens[i], offset, isMicro);
}
}
return {blocks: blocks, num_of_rows:Math.abs(num_of_rows)}
}
@ -437,14 +441,18 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback,
}
if (numOfRows2 > 0){
for (let i = 0; i < fields.length; i++) {
if (!convertFunctions[fields[i]['type']] ) {
throw new errors.DatabaseError("Invalid data type returned from database");
}
let prow = ref.reinterpret(row,8,i*8);
prow = prow.readPointer();
prow = ref.ref(prow);
blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, isMicro);
//offset += fields[i]['bytes'] * numOfRows2;
if(ref.isNull(pdata.readPointer())){
blocks[i] = new Array();
}else{
if (!convertFunctions[fields[i]['type']] ) {
throw new errors.DatabaseError("Invalid data type returned from database");
}
let prow = ref.reinterpret(row,8,i*8);
prow = prow.readPointer();
prow = ref.ref(prow);
blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, isMicro);
//offset += fields[i]['bytes'] * numOfRows2;
}
}
}
callback(param2, result2, numOfRows2, blocks);

View File

@ -1,6 +1,6 @@
{
"name": "td2.0-connector",
"version": "2.0.1",
"version": "2.0.4",
"description": "A Node.js connector for TDengine.",
"main": "tdengine.js",
"scripts": {

View File

@ -84,10 +84,19 @@ q.execute().then(function(r) {
r.pretty();
});
// test query null value
c1.execute("create table if not exists td_connector_test.weather(ts timestamp, temperature float, humidity int) tags(location nchar(64))");
c1.execute("insert into t1 using weather tags('北京') values(now, 11.11, 11)");
c1.execute("insert into t1(ts, temperature) values(now, 22.22)");
c1.execute("insert into t1(ts, humidity) values(now, 33)");
c1.query('select * from test.t1', true).then(function (result) {
result.pretty();
});
var q = c1.query('select * from td_connector_test.weather');
console.log(q.query);
q.execute().then(function(r) {
//console.log(r);
r.pretty();
});

View File

@ -143,7 +143,7 @@ static SCreateVnodeMsg* dnodeParseVnodeMsg(SRpcMsg *rpcMsg) {
pCreate->cfg.fsyncPeriod = htonl(pCreate->cfg.fsyncPeriod);
pCreate->cfg.commitTime = htonl(pCreate->cfg.commitTime);
for (int32_t j = 0; j < pCreate->cfg.replications; ++j) {
for (int32_t j = 0; j < pCreate->cfg.vgReplica; ++j) {
pCreate->nodes[j].nodeId = htonl(pCreate->nodes[j].nodeId);
}

View File

@ -436,7 +436,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf
#define TSDB_PORT_HTTP 11
#define TSDB_PORT_ARBITRATOR 12
#define TSDB_MAX_WAL_SIZE (1024*1024)
#define TSDB_MAX_WAL_SIZE (1024*1024*2)
typedef enum {
TAOS_QTYPE_RPC = 0,

View File

@ -209,9 +209,11 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_APP_ERROR, 0, 0x0509, "Unexpected
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VRESION_FILE, 0, 0x050A, "Invalid version file")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FULL, 0, 0x050B, "Database memory is full for commit failed")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FLOWCTRL, 0, 0x050C, "Database memory is full for waiting commit")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_DROPPING, 0, 0x050D, "Database is dropping")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_BALANCING, 0, 0x050E, "Database is balancing")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, 0, 0x0511, "Database suspended")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0512, "Database write operation denied")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_SYNCING, 0, 0x0513, "Database is syncing")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_SYNCING, 0, 0x0513, "Database is syncing")
// tsdb
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, 0, 0x0600, "Invalid table ID")

View File

@ -518,16 +518,17 @@ typedef struct SRetrieveTableRsp {
} SRetrieveTableRsp;
typedef struct {
int32_t vgId;
int32_t dbCfgVersion;
int64_t totalStorage;
int64_t compStorage;
int64_t pointsWritten;
uint8_t status;
uint8_t role;
uint8_t replica;
uint8_t reserved;
int32_t vgCfgVersion;
int32_t vgId;
int32_t dbCfgVersion;
int64_t totalStorage;
int64_t compStorage;
int64_t pointsWritten;
uint64_t vnodeVersion;
int32_t vgCfgVersion;
uint8_t status;
uint8_t role;
uint8_t replica;
uint8_t reserved;
} SVnodeLoad;
typedef struct {
@ -663,13 +664,14 @@ typedef struct {
int8_t precision;
int8_t compression;
int8_t walLevel;
int8_t replications;
int8_t vgReplica;
int8_t wals;
int8_t quorum;
int8_t update;
int8_t cacheLastRow;
int32_t vgCfgVersion;
int8_t reserved[10];
int8_t dbReplica;
int8_t reserved[9];
} SVnodeCfg;
typedef struct {

View File

@ -3,4 +3,5 @@ PROJECT(TDengine)
ADD_SUBDIRECTORY(shell)
ADD_SUBDIRECTORY(taosdemo)
ADD_SUBDIRECTORY(taosdemox)
ADD_SUBDIRECTORY(taosdump)

View File

@ -0,0 +1,25 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/include)
IF (TD_LINUX)
AUX_SOURCE_DIRECTORY(. SRC)
ADD_EXECUTABLE(taosdemox ${SRC})
#find_program(HAVE_CURL NAMES curl)
IF ((NOT TD_ARM_64) AND (NOT TD_ARM_32))
ADD_DEFINITIONS(-DTD_LOWA_CURL)
LINK_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/libcurl/lib)
ADD_LIBRARY(curl STATIC IMPORTED)
SET_PROPERTY(TARGET curl PROPERTY IMPORTED_LOCATION ${TD_COMMUNITY_DIR}/deps/libcurl/lib/libcurl.a)
TARGET_LINK_LIBRARIES(taosdemox curl)
ENDIF ()
IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosdemox taos_static cJson)
ELSE ()
TARGET_LINK_LIBRARIES(taosdemox taos cJson)
ENDIF ()
ENDIF ()

View File

@ -0,0 +1,53 @@
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 2,
"result_file": "./insert_res.txt",
"databases": [{
"dbinfo": {
"name": "db",
"drop": "no",
"replica": 1,
"days": 2,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb",
"child_table_exists":"no",
"childtable_count": 1,
"childtable_prefix": "stb_",
"auto_create_table": "no",
"data_source": "rand",
"insert_mode": "taosc",
"insert_rate": 0,
"insert_rows": 100000,
"multi_thread_write_one_tbl": "no",
"number_of_tbl_in_one_sql": 1,
"rows_per_tbl": 100,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}

View File

@ -0,0 +1,17 @@
{
"filetype":"query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"databases": "db01",
"super_table_query":
{"rate":1, "concurrent":1,
"sqls": [{"sql": "select count(*) from stb01", "result": "./query_res0.txt"}]
},
"sub_table_query":
{"stblname": "stb01", "rate":1, "threads":1,
"sqls": [{"sql": "select count(*) from xxxx", "result": "./query_res1.txt"}]
}
}

View File

@ -0,0 +1,17 @@
{
"filetype":"subscribe",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"databases": "db01",
"super_table_query":
{"concurrent":1, "mode":"sync", "interval":5000, "restart":"yes", "keepProgress":"yes",
"sqls": [{"sql": "select avg(c1) from stb01 where col1 > 1;", "result": "./subscribe_res0.txt"}]
},
"sub_table_query":
{"stblname": "stb01", "threads":1, "mode":"sync", "interval":10000, "restart":"yes", "keepProgress":"yes",
"sqls": [{"sql": "select col1 from xxxx where col1 > 10;", "result": "./subscribe_res1.txt"}]
}
}

File diff suppressed because it is too large Load Diff

View File

@ -128,8 +128,8 @@ typedef struct {
typedef struct {
int32_t dnodeId;
int8_t role;
int8_t reserved[3];
SDnodeObj* pDnode;
int8_t vver[3]; // To ensure compatibility, 3 bits are used to represent the remainder of 64 bit version
SDnodeObj *pDnode;
} SVnodeGid;
typedef struct SVgObj {

View File

@ -53,6 +53,9 @@ void mnodeSendAlterVgroupMsg(SVgObj *pVgroup);
SRpcEpSet mnodeGetEpSetFromVgroup(SVgObj *pVgroup);
SRpcEpSet mnodeGetEpSetFromIp(char *ep);
int32_t mnodeGetVgidVer(int8_t *vver);
void mnodeSetVgidVer(int8_t *cver, uint64_t iver);
#ifdef __cplusplus
}
#endif

View File

@ -171,7 +171,7 @@ void mnodeUpdateClusterId() {
void *pIter = mnodeGetNextCluster(NULL, &pCluster);
if (pCluster != NULL) {
tstrncpy(tsClusterId, pCluster->uid, TSDB_CLUSTER_ID_LEN);
mInfo("cluster id is set to %s", tsClusterId);
mDebug("cluster id is set to %s", tsClusterId);
}
mnodeDecClusterRef(pCluster);

View File

@ -571,6 +571,7 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
pVload->vgId = htonl(pVload->vgId);
pVload->dbCfgVersion = htonl(pVload->dbCfgVersion);
pVload->vgCfgVersion = htonl(pVload->vgCfgVersion);
pVload->vnodeVersion = htobe64(pVload->vnodeVersion);
SVgObj *pVgroup = mnodeGetVgroup(pVload->vgId);
if (pVgroup == NULL) {

View File

@ -387,6 +387,7 @@ static bool mnodeAllOnline() {
if (pMnode == NULL) break;
if (pMnode->role != TAOS_SYNC_ROLE_MASTER && pMnode->role != TAOS_SYNC_ROLE_SLAVE) {
allOnline = false;
mDebug("mnode:%d, role:%s, not online", pMnode->mnodeId, syncRole[pMnode->role]);
mnodeDecMnodeRef(pMnode);
}
}

View File

@ -225,6 +225,10 @@ void sdbUpdateMnodeRoles() {
for (int32_t i = 0; i < tsSdbMgmt.cfg.replica; ++i) {
SMnodeObj *pMnode = mnodeGetMnode(roles.nodeId[i]);
if (pMnode != NULL) {
if (pMnode->role != roles.role[i]) {
bnNotify();
}
pMnode->role = roles.role[i];
sdbInfo("vgId:1, mnode:%d, role:%s", pMnode->mnodeId, syncRole[pMnode->role]);
if (pMnode->mnodeId == dnodeGetDnodeId()) tsSdbMgmt.role = pMnode->role;

View File

@ -184,6 +184,7 @@ static int32_t mnodeVgroupActionEncode(SSdbRow *pRow) {
for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) {
pTmpVgroup->vnodeGid[i].pDnode = NULL;
pTmpVgroup->vnodeGid[i].role = 0;
memset(pTmpVgroup->vnodeGid[i].vver, 0, sizeof(pTmpVgroup->vnodeGid[i].vver));
}
pRow->rowSize = tsVgUpdateSize;
@ -317,9 +318,10 @@ void mnodeUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVl
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
if (pVgid->pDnode == pDnode) {
mTrace("dnode:%d, receive status from dnode, vgId:%d status:%s last:%s", pDnode->dnodeId, pVgroup->vgId,
syncRole[pVload->role], syncRole[pVgid->role]);
mTrace("vgId:%d, receive vnode status from dnode:%d, status:%s last:%s vver:%" PRIu64, pVgroup->vgId,
pDnode->dnodeId, syncRole[pVload->role], syncRole[pVgid->role], pVload->vnodeVersion);
pVgid->role = pVload->role;
mnodeSetVgidVer(pVgid->vver, pVload->vnodeVersion);
if (pVload->role == TAOS_SYNC_ROLE_MASTER) {
pVgroup->inUse = i;
}
@ -859,11 +861,12 @@ static SCreateVnodeMsg *mnodeBuildVnodeMsg(SVgObj *pVgroup) {
pCfg->precision = pDb->cfg.precision;
pCfg->compression = pDb->cfg.compression;
pCfg->walLevel = pDb->cfg.walLevel;
pCfg->replications = (int8_t) pVgroup->numOfVnodes;
pCfg->vgReplica = (int8_t) pVgroup->numOfVnodes;
pCfg->wals = 3;
pCfg->quorum = pDb->cfg.quorum;
pCfg->update = pDb->cfg.update;
pCfg->cacheLastRow = pDb->cfg.cacheLastRow;
pCfg->dbReplica = pDb->cfg.replications;
SVnodeDesc *pNodes = pVnode->nodes;
for (int32_t j = 0; j < pVgroup->numOfVnodes; ++j) {
@ -1179,3 +1182,14 @@ void mnodeSendDropAllDbVgroupsMsg(SDbObj *pDropDb) {
mInfo("db:%s, all vgroups:%d drop msg is sent to dnode", pDropDb->name, numOfVgroups);
}
int32_t mnodeGetVgidVer(int8_t *cver) {
int32_t iver = ((int32_t)cver[0]) * 10000 + ((int32_t)cver[1]) * 100 + (int32_t)cver[2];
return iver;
}
void mnodeSetVgidVer(int8_t *cver, uint64_t iver) {
cver[0] = (int8_t)((int32_t)(iver % 1000000) / 10000);
cver[1] = (int8_t)((int32_t)(iver % 100000) / 100);
cver[2] = (int8_t)(iver % 100);
}

View File

@ -29,12 +29,13 @@ extern "C" {
#endif
// TAOS_OS_FUNC_SEMPHONE_PTHREAD
bool taosCheckPthreadValid(pthread_t thread);
int64_t taosGetPthreadId();
void taosResetPthread(pthread_t *thread);
bool taosComparePthread(pthread_t first, pthread_t second);
bool taosCheckPthreadValid(pthread_t thread);
int64_t taosGetSelfPthreadId();
int64_t taosGetPthreadId(pthread_t thread);
void taosResetPthread(pthread_t* thread);
bool taosComparePthread(pthread_t first, pthread_t second);
int32_t taosGetPId();
int32_t taosGetCurrentAPPName(char *name, int32_t* len);
int32_t taosGetCurrentAPPName(char* name, int32_t* len);
#ifdef __cplusplus
}

View File

@ -31,7 +31,8 @@ int tsem_wait(tsem_t* sem) {
#ifndef TAOS_OS_FUNC_SEMPHONE_PTHREAD
bool taosCheckPthreadValid(pthread_t thread) { return thread != 0; }
int64_t taosGetPthreadId() { return (int64_t)pthread_self(); }
int64_t taosGetSelfPthreadId() { return (int64_t)pthread_self(); }
int64_t taosGetPthreadId(pthread_t thread) { return (int64_t)thread; }
void taosResetPthread(pthread_t *thread) { *thread = 0; }
bool taosComparePthread(pthread_t first, pthread_t second) { return first == second; }
int32_t taosGetPId() { return getpid(); }

View File

@ -25,14 +25,16 @@ bool taosCheckPthreadValid(pthread_t thread) { return thread.p != NULL; }
void taosResetPthread(pthread_t *thread) { thread->p = 0; }
int64_t taosGetPthreadId() {
int64_t taosGetPthreadId(pthread_t thread) {
#ifdef PTW32_VERSION
return pthread_getw32threadid_np(pthread_self());
return pthread_getw32threadid_np(thread);
#else
return (int64_t)pthread_self();
return (int64_t)thread;
#endif
}
int64_t taosGetSelfPthreadId() { return taosGetPthreadId(pthread_self()); }
bool taosComparePthread(pthread_t first, pthread_t second) {
return first.p == second.p;
}

View File

@ -7250,7 +7250,7 @@ static bool doBuildResCheck(SQInfo* pQInfo) {
// clear qhandle owner, it must be in the secure area. other thread may run ahead before current, after it is
// put into task to be executed.
assert(pQInfo->owner == taosGetPthreadId());
assert(pQInfo->owner == taosGetSelfPthreadId());
pQInfo->owner = 0;
pthread_mutex_unlock(&pQInfo->lock);
@ -7263,7 +7263,7 @@ static bool doBuildResCheck(SQInfo* pQInfo) {
bool qTableQuery(qinfo_t qinfo) {
SQInfo *pQInfo = (SQInfo *)qinfo;
assert(pQInfo && pQInfo->signature == pQInfo);
int64_t threadId = taosGetPthreadId();
int64_t threadId = taosGetSelfPthreadId();
int64_t curOwner = 0;
if ((curOwner = atomic_val_compare_exchange_64(&pQInfo->owner, 0, threadId)) != 0) {

View File

@ -272,7 +272,7 @@ static int rpcHashConn(void *handle, char *fqdn, uint16_t port, int8_t connType)
}
static void rpcLockCache(int64_t *lockedBy) {
int64_t tid = taosGetPthreadId();
int64_t tid = taosGetSelfPthreadId();
int i = 0;
while (atomic_val_compare_exchange_64(lockedBy, 0, tid) != 0) {
if (++i % 100 == 0) {
@ -282,7 +282,7 @@ static void rpcLockCache(int64_t *lockedBy) {
}
static void rpcUnlockCache(int64_t *lockedBy) {
int64_t tid = taosGetPthreadId();
int64_t tid = taosGetSelfPthreadId();
if (atomic_val_compare_exchange_64(lockedBy, tid, 0) != tid) {
assert(false);
}

View File

@ -1604,7 +1604,7 @@ static int rpcCheckAuthentication(SRpcConn *pConn, char *msg, int msgLen) {
}
static void rpcLockConn(SRpcConn *pConn) {
int64_t tid = taosGetPthreadId();
int64_t tid = taosGetSelfPthreadId();
int i = 0;
while (atomic_val_compare_exchange_64(&(pConn->lockedBy), 0, tid) != 0) {
if (++i % 1000 == 0) {
@ -1614,7 +1614,7 @@ static void rpcLockConn(SRpcConn *pConn) {
}
static void rpcUnlockConn(SRpcConn *pConn) {
int64_t tid = taosGetPthreadId();
int64_t tid = taosGetSelfPthreadId();
if (atomic_val_compare_exchange_64(&(pConn->lockedBy), tid, 0) != tid) {
assert(false);
}

View File

@ -478,9 +478,7 @@ static void syncAddArbitrator(SSyncNode *pNode) {
static void syncFreeNode(void *param) {
SSyncNode *pNode = param;
int32_t refCount = atomic_sub_fetch_32(&pNode->refCount, 1);
sDebug("vgId:%d, syncnode is freed, refCount:%d", pNode->vgId, refCount);
sDebug("vgId:%d, node is freed, refCount:%d", pNode->vgId, pNode->refCount);
pthread_mutex_destroy(&pNode->mutex);
tfree(pNode->pRecv);
@ -491,10 +489,10 @@ static void syncFreeNode(void *param) {
SSyncNode *syncAcquireNode(int64_t rid) {
SSyncNode *pNode = taosAcquireRef(tsNodeRefId, rid);
if (pNode == NULL) {
sDebug("failed to acquire syncnode from refId:%" PRId64, rid);
sDebug("failed to acquire node from refId:%" PRId64, rid);
} else {
int32_t refCount = atomic_add_fetch_32(&pNode->refCount, 1);
sTrace("vgId:%d, acquire syncnode refId:%" PRId64 ", refCount:%d", pNode->vgId, rid, refCount);
sTrace("vgId:%d, acquire node refId:%" PRId64 ", refCount:%d", pNode->vgId, rid, refCount);
}
return pNode;
@ -502,16 +500,14 @@ SSyncNode *syncAcquireNode(int64_t rid) {
void syncReleaseNode(SSyncNode *pNode) {
int32_t refCount = atomic_sub_fetch_32(&pNode->refCount, 1);
sTrace("vgId:%d, dec syncnode refId:%" PRId64 " refCount:%d", pNode->vgId, pNode->rid, refCount);
sTrace("vgId:%d, release node refId:%" PRId64 ", refCount:%d", pNode->vgId, pNode->rid, refCount);
taosReleaseRef(tsNodeRefId, pNode->rid);
}
static void syncFreePeer(void *param) {
SSyncPeer *pPeer = param;
int32_t refCount = atomic_sub_fetch_32(&pPeer->refCount, 1);
sDebug("%s, peer is freed, refCount:%d", pPeer->id, refCount);
sDebug("%s, peer is freed, refCount:%d", pPeer->id, pPeer->refCount);
syncReleaseNode(pPeer->pSyncNode);
tfree(pPeer);
@ -531,7 +527,7 @@ SSyncPeer *syncAcquirePeer(int64_t rid) {
void syncReleasePeer(SSyncPeer *pPeer) {
int32_t refCount = atomic_sub_fetch_32(&pPeer->refCount, 1);
sTrace("%s, dec peer refId:%" PRId64 ", refCount:%d", pPeer->id, pPeer->rid, refCount);
sTrace("%s, release peer refId:%" PRId64 ", refCount:%d", pPeer->id, pPeer->rid, refCount);
taosReleaseRef(tsPeerRefId, pPeer->rid);
}
@ -879,14 +875,14 @@ static void syncProcessSyncRequest(char *msg, SSyncPeer *pPeer) {
int32_t ret = pthread_create(&thread, &thattr, syncRetrieveData, (void *)pPeer->rid);
pthread_attr_destroy(&thattr);
if (ret != 0) {
sError("%s, failed to create sync thread since %s", pPeer->id, strerror(errno));
if (ret < 0) {
sError("%s, failed to create sync retrieve thread since %s", pPeer->id, strerror(errno));
syncReleasePeer(pPeer);
} else {
pPeer->sstatus = TAOS_SYNC_STATUS_START;
sDebug("%s, thread is created to retrieve data, set sstatus:%s", pPeer->id, syncStatus[pPeer->sstatus]);
sDebug("%s, sync retrieve thread:0x%08" PRIx64 " create successfully, rid:%" PRId64 ", set sstatus:%s", pPeer->id,
taosGetPthreadId(thread), pPeer->rid, syncStatus[pPeer->sstatus]);
}
syncReleasePeer(pPeer);
}
static void syncNotStarted(void *param, void *tmrId) {
@ -1154,19 +1150,19 @@ static void syncCreateRestoreDataThread(SSyncPeer *pPeer) {
(void)syncAcquirePeer(pPeer->rid);
int32_t ret = pthread_create(&(thread), &thattr, (void *)syncRestoreData, (void *)pPeer->rid);
int32_t ret = pthread_create(&thread, &thattr, (void *)syncRestoreData, (void *)pPeer->rid);
pthread_attr_destroy(&thattr);
if (ret < 0) {
SSyncNode *pNode = pPeer->pSyncNode;
nodeSStatus = TAOS_SYNC_STATUS_INIT;
sError("%s, failed to create sync thread, set sstatus:%s", pPeer->id, syncStatus[nodeSStatus]);
sError("%s, failed to create sync restore thread, set sstatus:%s", pPeer->id, syncStatus[nodeSStatus]);
taosClose(pPeer->syncFd);
syncReleasePeer(pPeer);
} else {
sInfo("%s, sync connection is up", pPeer->id);
sInfo("%s, sync restore thread:0x%08" PRIx64 " create successfully, rid:%" PRId64, pPeer->id,
taosGetPthreadId(thread), pPeer->rid);
}
syncReleasePeer(pPeer);
}
static void syncProcessIncommingConnection(int32_t connFd, uint32_t sourceIp) {

View File

@ -353,12 +353,16 @@ static int32_t syncRestoreDataStepByStep(SSyncPeer *pPeer) {
void *syncRestoreData(void *param) {
int64_t rid = (int64_t)param;
SSyncPeer *pPeer = syncAcquirePeer(rid);
if (pPeer == NULL) return NULL;
if (pPeer == NULL) {
sError("failed to restore data, invalid peer rid:%" PRId64, rid);
return NULL;
}
SSyncNode *pNode = pPeer->pSyncNode;
taosBlockSIGPIPE();
__sync_fetch_and_add(&tsSyncNum, 1);
sInfo("%s, start to restore data, sstatus:%s", pPeer->id, syncStatus[nodeSStatus]);
(*pNode->notifyRole)(pNode->vgId, TAOS_SYNC_ROLE_SYNCING);
@ -380,11 +384,14 @@ void *syncRestoreData(void *param) {
(*pNode->notifyRole)(pNode->vgId, nodeRole);
nodeSStatus = TAOS_SYNC_STATUS_INIT;
sInfo("%s, sync over, set sstatus:%s", pPeer->id, syncStatus[nodeSStatus]);
sInfo("%s, restore data over, set sstatus:%s", pPeer->id, syncStatus[nodeSStatus]);
taosClose(pPeer->syncFd);
syncCloseRecvBuffer(pNode);
__sync_fetch_and_sub(&tsSyncNum, 1);
// The ref is obtained in both the create thread and the current thread, so it is released twice
syncReleasePeer(pPeer);
syncReleasePeer(pPeer);
return NULL;

View File

@ -194,7 +194,7 @@ static int32_t syncReadOneWalRecord(int32_t sfd, SWalHead *pHead) {
}
if (ret == 0) {
sTrace("sfd:%d, read to the end of file, ret:%d", sfd, ret);
sDebug("sfd:%d, read to the end of file, ret:%d", sfd, ret);
return 0;
}
@ -253,7 +253,7 @@ static int32_t syncRetrieveLastWal(SSyncPeer *pPeer, char *name, uint64_t fversi
break;
}
sTrace("%s, last wal is forwarded, hver:%" PRIu64, pPeer->id, pHead->version);
sDebug("%s, last wal is forwarded, hver:%" PRIu64, pPeer->id, pHead->version);
int32_t wsize = code;
int32_t ret = taosWriteMsg(pPeer->syncFd, pHead, wsize);
@ -466,10 +466,15 @@ static int32_t syncRetrieveDataStepByStep(SSyncPeer *pPeer) {
void *syncRetrieveData(void *param) {
int64_t rid = (int64_t)param;
SSyncPeer *pPeer = syncAcquirePeer(rid);
if (pPeer == NULL) return NULL;
if (pPeer == NULL) {
sError("failed to retrieve data, invalid peer rid:%" PRId64, rid);
return NULL;
}
SSyncNode *pNode = pPeer->pSyncNode;
taosBlockSIGPIPE();
sInfo("%s, start to retrieve data, sstatus:%s", pPeer->id, syncStatus[pPeer->sstatus]);
if (pNode->notifyFlowCtrl) (*pNode->notifyFlowCtrl)(pNode->vgId, pPeer->numOfRetrieves);
@ -496,7 +501,11 @@ void *syncRetrieveData(void *param) {
pPeer->fileChanged = 0;
taosClose(pPeer->syncFd);
// The ref is obtained in both the create thread and the current thread, so it is released twice
syncReleasePeer(pPeer);
syncReleasePeer(pPeer);
sInfo("%s, sync retrieve data over, sstatus:%s", pPeer->id, syncStatus[pPeer->sstatus]);
return NULL;
}

View File

@ -207,10 +207,10 @@ void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) {
int tsdbAsyncCommit(STsdbRepo *pRepo) {
if (pRepo->mem == NULL) return 0;
ASSERT(pRepo->imem == NULL);
sem_wait(&(pRepo->readyToCommit));
ASSERT(pRepo->imem == NULL);
if (pRepo->code != TSDB_CODE_SUCCESS) {
tsdbWarn("vgId:%d try to commit when TSDB not in good state: %s", REPO_ID(pRepo), tstrerror(terrno));
}

View File

@ -364,7 +364,7 @@ void taosPrintLog(const char *flags, int32_t dflag, const char *format, ...) {
ptm = localtime_r(&curTime, &Tm);
len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d 0x%08" PRIx64 " ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour,
ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec, taosGetPthreadId());
ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec, taosGetSelfPthreadId());
len += sprintf(buffer + len, "%s", flags);
va_start(argpointer, format);
@ -450,7 +450,7 @@ void taosPrintLongString(const char *flags, int32_t dflag, const char *format, .
ptm = localtime_r(&curTime, &Tm);
len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d 0x%08" PRIx64 " ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour,
ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec, taosGetPthreadId());
ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec, taosGetSelfPthreadId());
len += sprintf(buffer + len, "%s", flags);
va_start(argpointer, format);

View File

@ -249,7 +249,7 @@ void taosNotePrint(SNoteObj *pNote, const char *const format, ...) {
curTime = timeSecs.tv_sec;
ptm = localtime_r(&curTime, &Tm);
len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d 0x%08" PRIx64 " ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour,
ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec, taosGetPthreadId());
ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec, taosGetSelfPthreadId());
va_start(argpointer, format);
len += vsnprintf(buffer + len, MAX_NOTE_LINE_SIZE - len, format, argpointer);
va_end(argpointer);

View File

@ -447,7 +447,7 @@ static int taosDecRefCount(int rsetId, int64_t rid, int remove) {
}
static void taosLockList(int64_t *lockedBy) {
int64_t tid = taosGetPthreadId();
int64_t tid = taosGetSelfPthreadId();
int i = 0;
while (atomic_val_compare_exchange_64(lockedBy, 0, tid) != 0) {
if (++i % 100 == 0) {
@ -457,7 +457,7 @@ static void taosLockList(int64_t *lockedBy) {
}
static void taosUnlockList(int64_t *lockedBy) {
int64_t tid = taosGetPthreadId();
int64_t tid = taosGetSelfPthreadId();
if (atomic_val_compare_exchange_64(lockedBy, tid, 0) != tid) {
assert(false);
}

View File

@ -119,7 +119,7 @@ static void timerDecRef(tmr_obj_t* timer) {
}
static void lockTimerList(timer_list_t* list) {
int64_t tid = taosGetPthreadId();
int64_t tid = taosGetSelfPthreadId();
int i = 0;
while (atomic_val_compare_exchange_64(&(list->lockedBy), 0, tid) != 0) {
if (++i % 1000 == 0) {
@ -129,7 +129,7 @@ static void lockTimerList(timer_list_t* list) {
}
static void unlockTimerList(timer_list_t* list) {
int64_t tid = taosGetPthreadId();
int64_t tid = taosGetSelfPthreadId();
if (atomic_val_compare_exchange_64(&(list->lockedBy), tid, 0) != tid) {
assert(false);
tmrError("%" PRId64 " trying to unlock a timer list not locked by current thread.", tid);
@ -257,7 +257,7 @@ static bool removeFromWheel(tmr_obj_t* timer) {
static void processExpiredTimer(void* handle, void* arg) {
tmr_obj_t* timer = (tmr_obj_t*)handle;
timer->executedBy = taosGetPthreadId();
timer->executedBy = taosGetSelfPthreadId();
uint8_t state = atomic_val_compare_exchange_8(&timer->state, TIMER_STATE_WAITING, TIMER_STATE_EXPIRED);
if (state == TIMER_STATE_WAITING) {
const char* fmt = "%s timer[id=%" PRIuPTR ", fp=%p, param=%p] execution start.";
@ -406,7 +406,7 @@ static bool doStopTimer(tmr_obj_t* timer, uint8_t state) {
return false;
}
if (timer->executedBy == taosGetPthreadId()) {
if (timer->executedBy == taosGetSelfPthreadId()) {
// taosTmrReset is called in the timer callback, should do nothing in this
// case to avoid dead lock. note taosTmrReset must be the last statement
// of the callback funtion, will be a bug otherwise.

View File

@ -45,6 +45,9 @@ typedef struct {
int8_t accessState;
int8_t isFull;
int8_t isCommiting;
int8_t dbReplica;
int8_t dropped;
int8_t reserved;
uint64_t version; // current version
uint64_t cversion; // version while commit start
uint64_t fversion; // version on saved data file
@ -64,7 +67,6 @@ typedef struct {
void * qMgmt;
char * rootDir;
tsem_t sem;
int8_t dropped;
char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
pthread_mutex_t statusMutex;
} SVnodeObj;

View File

@ -38,8 +38,9 @@ static void vnodeLoadCfg(SVnodeObj *pVnode, SCreateVnodeMsg* vnodeMsg) {
pVnode->walCfg.walLevel = vnodeMsg->cfg.walLevel;
pVnode->walCfg.fsyncPeriod = vnodeMsg->cfg.fsyncPeriod;
pVnode->walCfg.keep = TAOS_WAL_NOT_KEEP;
pVnode->syncCfg.replica = vnodeMsg->cfg.replications;
pVnode->syncCfg.replica = vnodeMsg->cfg.vgReplica;
pVnode->syncCfg.quorum = vnodeMsg->cfg.quorum;
pVnode->dbReplica = vnodeMsg->cfg.dbReplica;
for (int i = 0; i < pVnode->syncCfg.replica; ++i) {
SVnodeDesc *node = &vnodeMsg->nodes[i];
@ -203,12 +204,21 @@ int32_t vnodeReadCfg(SVnodeObj *pVnode) {
}
vnodeMsg.cfg.wals = (int8_t)wals->valueint;
cJSON *replica = cJSON_GetObjectItem(root, "replica");
if (!replica || replica->type != cJSON_Number) {
cJSON *vgReplica = cJSON_GetObjectItem(root, "replica");
if (!vgReplica || vgReplica->type != cJSON_Number) {
vError("vgId:%d, failed to read %s, replica not found", pVnode->vgId, file);
goto PARSE_VCFG_ERROR;
}
vnodeMsg.cfg.replications = (int8_t)replica->valueint;
vnodeMsg.cfg.vgReplica = (int8_t)vgReplica->valueint;
cJSON *dbReplica = cJSON_GetObjectItem(root, "dbReplica");
if (!dbReplica || dbReplica->type != cJSON_Number) {
vError("vgId:%d, failed to read %s, dbReplica not found", pVnode->vgId, file);
vnodeMsg.cfg.dbReplica = vnodeMsg.cfg.vgReplica;
vnodeMsg.cfg.vgCfgVersion = 0;
} else {
vnodeMsg.cfg.dbReplica = (int8_t)dbReplica->valueint;
}
cJSON *quorum = cJSON_GetObjectItem(root, "quorum");
if (!quorum || quorum->type != cJSON_Number) {
@ -220,8 +230,8 @@ int32_t vnodeReadCfg(SVnodeObj *pVnode) {
cJSON *cacheLastRow = cJSON_GetObjectItem(root, "cacheLastRow");
if (!cacheLastRow || cacheLastRow->type != cJSON_Number) {
vError("vgId: %d, failed to read %s, cacheLastRow not found", pVnode->vgId, file);
//goto PARSE_VCFG_ERROR;
vnodeMsg.cfg.cacheLastRow = 0;
vnodeMsg.cfg.vgCfgVersion = 0;
} else {
vnodeMsg.cfg.cacheLastRow = (int8_t)cacheLastRow->valueint;
}
@ -233,7 +243,7 @@ int32_t vnodeReadCfg(SVnodeObj *pVnode) {
}
int size = cJSON_GetArraySize(nodeInfos);
if (size != vnodeMsg.cfg.replications) {
if (size != vnodeMsg.cfg.vgReplica) {
vError("vgId:%d, failed to read %s, nodeInfos size not matched", pVnode->vgId, file);
goto PARSE_VCFG_ERROR;
}
@ -311,17 +321,18 @@ int32_t vnodeWriteCfg(SCreateVnodeMsg *pMsg) {
len += snprintf(content + len, maxLen - len, " \"compression\": %d,\n", pMsg->cfg.compression);
len += snprintf(content + len, maxLen - len, " \"walLevel\": %d,\n", pMsg->cfg.walLevel);
len += snprintf(content + len, maxLen - len, " \"fsync\": %d,\n", pMsg->cfg.fsyncPeriod);
len += snprintf(content + len, maxLen - len, " \"replica\": %d,\n", pMsg->cfg.replications);
len += snprintf(content + len, maxLen - len, " \"replica\": %d,\n", pMsg->cfg.vgReplica);
len += snprintf(content + len, maxLen - len, " \"dbReplica\": %d,\n", pMsg->cfg.dbReplica);
len += snprintf(content + len, maxLen - len, " \"wals\": %d,\n", pMsg->cfg.wals);
len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pMsg->cfg.quorum);
len += snprintf(content + len, maxLen - len, " \"cacheLastRow\": %d,\n", pMsg->cfg.cacheLastRow);
len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n");
for (int32_t i = 0; i < pMsg->cfg.replications; i++) {
for (int32_t i = 0; i < pMsg->cfg.vgReplica; i++) {
SVnodeDesc *node = &pMsg->nodes[i];
dnodeUpdateEp(node->nodeId, node->nodeEp, NULL, NULL);
len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", node->nodeId);
len += snprintf(content + len, maxLen - len, " \"nodeEp\": \"%s\"\n", node->nodeEp);
if (i < pMsg->cfg.replications - 1) {
if (i < pMsg->cfg.vgReplica - 1) {
len += snprintf(content + len, maxLen - len, " },{\n");
} else {
len += snprintf(content + len, maxLen - len, " }]\n");

View File

@ -142,6 +142,7 @@ static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SStatusMsg *pStatus) {
pLoad->totalStorage = htobe64(totalStorage);
pLoad->compStorage = htobe64(compStorage);
pLoad->pointsWritten = htobe64(pointsWritten);
pLoad->vnodeVersion = htobe64(pVnode->version);
pLoad->status = pVnode->status;
pLoad->role = pVnode->role;
pLoad->replica = pVnode->syncCfg.replica;

View File

@ -108,6 +108,13 @@ static int32_t vnodeCheckWrite(void *vparam) {
return TSDB_CODE_VND_NO_WRITE_AUTH;
}
if (pVnode->dbReplica != pVnode->syncCfg.replica &&
pVnode->syncCfg.nodeInfo[pVnode->syncCfg.replica - 1].nodeId == dnodeGetDnodeId()) {
vDebug("vgId:%d, vnode is balancing and will be dropped, dbReplica:%d vgReplica:%d, refCount:%d pVnode:%p",
pVnode->vgId, pVnode->dbReplica, pVnode->syncCfg.replica, pVnode->refCount, pVnode);
return TSDB_CODE_VND_IS_BALANCING;
}
// tsdb may be in reset state
if (pVnode->tsdb == NULL) {
vDebug("vgId:%d, tsdb is null, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode);
@ -271,7 +278,7 @@ void vnodeFreeFromWQueue(void *vparam, SVWriteMsg *pWrite) {
static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) {
SVWriteMsg *pWrite = param;
SVnodeObj * pVnode = pWrite->pVnode;
int32_t code = TSDB_CODE_VND_SYNCING;
int32_t code = TSDB_CODE_VND_IS_SYNCING;
if (pVnode->flowctrlLevel <= 0) code = TSDB_CODE_VND_IS_FLOWCTRL;

View File

@ -74,8 +74,14 @@ function runQueryPerfTest {
CREATETABLETIME=`grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'`
INSERTRECORDSTIME=`grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'`
REQUESTSPERSECOND=`grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $13}'`
delay=`grep 'delay' taosdemoperf.txt | awk '{print $4}'`
AVGDELAY=`echo ${delay:0:${#delay}-3}`
delay=`grep 'delay' taosdemoperf.txt | awk '{print $6}'`
MAXDELAY=`echo ${delay:0:${#delay}-3}`
delay=`grep 'delay' taosdemoperf.txt | awk '{print $8}'`
MINDELAY=`echo ${delay:0:${#delay}-2}`
python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -t $CREATETABLETIME -i $INSERTRECORDSTIME -r $REQUESTSPERSECOND | tee -a $PERFORMANCE_TEST_REPORT
python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -t $CREATETABLETIME -i $INSERTRECORDSTIME -r $REQUESTSPERSECOND -avg $AVGDELAY -max $MAXDELAY -min $MINDELAY | tee -a $PERFORMANCE_TEST_REPORT
[ -f taosdemoperf.txt ] && rm taosdemoperf.txt
}

View File

@ -178,11 +178,15 @@ python3 ./test.py -f query/floatCompare.py
#stream
python3 ./test.py -f stream/metric_1.py
python3 ./test.py -f stream/metric_n.py
python3 ./test.py -f stream/new.py
python3 ./test.py -f stream/stream1.py
python3 ./test.py -f stream/stream2.py
#python3 ./test.py -f stream/parser.py
python3 ./test.py -f stream/history.py
python3 ./test.py -f stream/sys.py
python3 ./test.py -f stream/table_1.py
python3 ./test.py -f stream/table_n.py
#alter table
python3 ./test.py -f alter/alter_table_crash.py

View File

@ -85,6 +85,22 @@ class TDTestCase:
tdSql.checkData(0, 1, 0.1)
tdSql.checkData(1, 1, 1.1)
#TD-2457 bottom + interval + order by
tdSql.error('select top(col2,1) from test interval(1y) order by col2;')
#TD-2563 top + super_table + interval
tdSql.execute("create table meters(ts timestamp, c int) tags (d int)")
tdSql.execute("create table t1 using meters tags (1)")
sql = 'insert into t1 values '
for i in range(20000):
sql = sql + '(%d, %d)' % (self.ts + i , i % 47)
if i % 2000 == 0:
tdSql.execute(sql)
sql = 'insert into t1 values '
tdSql.execute(sql)
tdSql.query('select bottom(c,1) from meters interval(10a)')
tdSql.checkData(0,1,0)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -89,6 +89,20 @@ class TDTestCase:
tdSql.checkRows(2)
tdSql.checkData(0, 1, 8.1)
tdSql.checkData(1, 1, 9.1)
#TD-2563 top + super_table + interval
tdSql.execute("create table meters(ts timestamp, c int) tags (d int)")
tdSql.execute("create table t1 using meters tags (1)")
sql = 'insert into t1 values '
for i in range(20000):
sql = sql + '(%d, %d)' % (self.ts + i , i % 47)
if i % 2000 == 0:
tdSql.execute(sql)
sql = 'insert into t1 values '
tdSql.execute(sql)
tdSql.query('select top(c,1) from meters interval(10a)')
tdSql.checkData(0,1,9)
def stop(self):
tdSql.close()

View File

@ -178,11 +178,15 @@ python3 ./test.py -f query/floatCompare.py
#stream
python3 ./test.py -f stream/metric_1.py
python3 ./test.py -f stream/metric_n.py
python3 ./test.py -f stream/new.py
python3 ./test.py -f stream/stream1.py
python3 ./test.py -f stream/stream2.py
#python3 ./test.py -f stream/parser.py
python3 ./test.py -f stream/history.py
python3 ./test.py -f stream/sys.py
python3 ./test.py -f stream/table_1.py
python3 ./test.py -f stream/table_n.py
#alter table
python3 ./test.py -f alter/alter_table_crash.py

View File

@ -0,0 +1,123 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import time
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tbNum = 10
rowNum = 20
totalNum = tbNum * rowNum
tdSql.prepare()
tdLog.info("===== preparing data =====")
tdSql.execute(
"create table stb(ts timestamp, tbcol int, tbcol2 float) tags(tgcol int)")
for i in range(tbNum):
tdSql.execute("create table tb%d using stb tags(%d)" % (i, i))
for j in range(rowNum):
tdSql.execute(
"insert into tb%d values (now - %dm, %d, %d)" %
(i, 1440 - j, j, j))
time.sleep(0.1)
tdLog.info("===== step 1 =====")
tdSql.query("select count(*), count(tbcol), count(tbcol2) from stb interval(1d)")
tdSql.checkData(0, 1, totalNum)
tdSql.checkData(0, 2, totalNum)
tdSql.checkData(0, 3, totalNum)
tdLog.info("===== step 2 =====")
tdSql.execute("create table strm_c3 as select count(*), count(tbcol), count(tbcol2) from stb interval(1d)")
tdLog.info("===== step 3 =====")
tdSql.execute("create table strm_c32 as select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from stb interval(1d)")
tdLog.info("===== step 4 =====")
tdSql.query("select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from stb interval(1d)")
tdSql.checkData(0, 1, totalNum)
tdSql.checkData(0, 2, totalNum)
tdSql.checkData(0, 3, totalNum)
tdLog.info("===== step 5 =====")
tdSql.execute("create table strm_c31 as select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from stb interval(1d)")
tdLog.info("===== step 6 =====")
tdSql.query("select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from stb interval(1d)")
tdSql.checkData(0, 1, 9.5)
tdSql.checkData(0, 2, 1900)
tdSql.checkData(0, 3, 0)
tdSql.checkData(0, 4, 19)
tdSql.checkData(0, 5, 0)
tdSql.checkData(0, 6, 19)
tdSql.execute("create table strm_avg as select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from stb interval(1d)")
tdLog.info("===== step 7 =====")
tdSql.query("select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), count(tbcol) from stb where ts < now + 4m interval(1d)")
tdSql.checkData(0, 1, 9.5)
tdSql.checkData(0, 2, 1900)
tdSql.checkData(0, 3, 0)
tdSql.checkData(0, 4, 19)
tdSql.checkData(0, 5, 0)
tdSql.checkData(0, 6, 19)
tdSql.checkData(0, 7, totalNum)
tdLog.info("===== step 8 =====")
tdSql.query("select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), count(tbcol) from stb where ts < now + 4m interval(1d)")
tdSql.checkData(0, 1, 9.5)
tdSql.checkData(0, 2, 1900)
tdSql.checkData(0, 3, 0)
tdSql.checkData(0, 4, 19)
tdSql.checkData(0, 5, 0)
tdSql.checkData(0, 6, 19)
tdSql.checkData(0, 7, totalNum)
tdLog.info("===== step 9 =====")
tdSql.waitedQuery("select * from strm_c3", 1, 120)
tdSql.checkData(0, 1, totalNum)
tdSql.checkData(0, 2, totalNum)
tdSql.checkData(0, 3, totalNum)
tdLog.info("===== step 10 =====")
tdSql.waitedQuery("select * from strm_c31", 1, 30)
for i in range(1, 10):
tdSql.checkData(0, i, totalNum)
tdLog.info("===== step 11 =====")
tdSql.waitedQuery("select * from strm_avg", 1, 20)
tdSql.checkData(0, 1, 9.5)
tdSql.checkData(0, 2, 1900)
tdSql.checkData(0, 3, 0)
tdSql.checkData(0, 4, 19)
tdSql.checkData(0, 5, 0)
tdSql.checkData(0, 6, 19)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,61 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# migrated from 'stream_on_sys.sim'
# -*- coding: utf-8 -*-
import sys
import time
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
class TDTestCase:
updatecfgDict = {'monitor': 1}
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.execute("use log")
tdSql.execute("create table cpustrm as select count(*), avg(cpu_taosd), max(cpu_taosd), min(cpu_taosd), avg(cpu_system), max(cpu_cores), min(cpu_cores), last(cpu_cores) from log.dn1 interval(4s)")
tdSql.execute("create table memstrm as select count(*), avg(mem_taosd), max(mem_taosd), min(mem_taosd), avg(mem_system), first(mem_total), last(mem_total) from log.dn1 interval(4s)")
tdSql.execute("create table diskstrm as select count(*), avg(disk_used), last(disk_used), avg(disk_total), first(disk_total) from log.dn1 interval(4s)")
tdSql.execute("create table bandstrm as select count(*), avg(band_speed), last(band_speed) from log.dn1 interval(4s)")
tdSql.execute("create table reqstrm as select count(*), avg(req_http), last(req_http), avg(req_select), last(req_select), avg(req_insert), last(req_insert) from log.dn1 interval(4s)")
tdSql.execute("create table iostrm as select count(*), avg(io_read), last(io_read), avg(io_write), last(io_write) from log.dn1 interval(4s)")
sqls = [
"select * from cpustrm",
"select * from memstrm",
"select * from diskstrm",
"select * from bandstrm",
"select * from reqstrm",
"select * from iostrm",
]
for sql in sqls:
(rows, _) = tdSql.waitedQuery(sql, 1, 120)
if rows < 1:
tdLog.exit("failed: sql:%s, expect at least one row" % sql)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,89 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import time
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def createFuncStream(self, expr, suffix, value):
tbname = "strm_" + suffix
tdLog.info("create stream table %s" % tbname)
tdSql.query("select %s from tb1 interval(1d)" % expr)
tdSql.checkData(0, 1, value)
tdSql.execute("create table %s as select %s from tb1 interval(1d)" % (tbname, expr))
def checkStreamData(self, suffix, value):
sql = "select * from strm_" + suffix
tdSql.waitedQuery(sql, 1, 120)
tdSql.checkData(0, 1, value)
def run(self):
tbNum = 10
rowNum = 20
tdSql.prepare()
tdLog.info("===== step1 =====")
tdSql.execute(
"create table stb(ts timestamp, tbcol int, tbcol2 float) tags(tgcol int)")
for i in range(tbNum):
tdSql.execute("create table tb%d using stb tags(%d)" % (i, i))
for j in range(rowNum):
tdSql.execute(
"insert into tb%d values (now - %dm, %d, %d)" %
(i, 1440 - j, j, j))
time.sleep(0.1)
self.createFuncStream("count(*)", "c1", rowNum)
self.createFuncStream("count(tbcol)", "c2", rowNum)
self.createFuncStream("count(tbcol2)", "c3", rowNum)
self.createFuncStream("avg(tbcol)", "av", 9.5)
self.createFuncStream("sum(tbcol)", "su", 190)
self.createFuncStream("min(tbcol)", "mi", 0)
self.createFuncStream("max(tbcol)", "ma", 19)
self.createFuncStream("first(tbcol)", "fi", 0)
self.createFuncStream("last(tbcol)", "la", 19)
self.createFuncStream("stddev(tbcol)", "st", 5.766281297335398)
self.createFuncStream("percentile(tbcol, 1)", "pe", 0.19)
self.createFuncStream("count(tbcol)", "as", rowNum)
self.checkStreamData("c1", rowNum)
self.checkStreamData("c2", rowNum)
self.checkStreamData("c3", rowNum)
self.checkStreamData("av", 9.5)
self.checkStreamData("su", 190)
self.checkStreamData("mi", 0)
self.checkStreamData("ma", 19)
self.checkStreamData("fi", 0)
self.checkStreamData("la", 19)
self.checkStreamData("st", 5.766281297335398)
self.checkStreamData("pe", 0.19)
self.checkStreamData("as", rowNum)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,143 @@
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import time
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tbNum = 10
rowNum = 20
tdSql.prepare()
tdLog.info("===== preparing data =====")
tdSql.execute(
"create table stb(ts timestamp, tbcol int, tbcol2 float) tags(tgcol int)")
for i in range(tbNum):
tdSql.execute("create table tb%d using stb tags(%d)" % (i, i))
for j in range(rowNum):
tdSql.execute(
"insert into tb%d values (now - %dm, %d, %d)" %
(i, 1440 - j, j, j))
time.sleep(0.1)
tdLog.info("===== step 1 =====")
tdSql.query("select count(*), count(tbcol), count(tbcol2) from tb1 interval(1d)")
tdSql.checkData(0, 1, rowNum)
tdSql.checkData(0, 2, rowNum)
tdSql.checkData(0, 3, rowNum)
tdLog.info("===== step 2 =====")
tdSql.execute("create table strm_c3 as select count(*), count(tbcol), count(tbcol2) from tb1 interval(1d)")
tdLog.info("===== step 3 =====")
tdSql.execute("create table strm_c32 as select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from tb1 interval(1d)")
tdLog.info("===== step 4 =====")
tdSql.query("select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from tb1 interval(1d)")
tdSql.checkData(0, 1, rowNum)
tdSql.checkData(0, 2, rowNum)
tdSql.checkData(0, 3, rowNum)
tdLog.info("===== step 5 =====")
tdSql.execute("create table strm_c31 as select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from tb1 interval(1d)")
tdLog.info("===== step 6 =====")
tdSql.query("select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from tb1 interval(1d)")
tdSql.checkData(0, 1, 9.5)
tdSql.checkData(0, 2, 190)
tdSql.checkData(0, 3, 0)
tdSql.checkData(0, 4, 19)
tdSql.checkData(0, 5, 0)
tdSql.checkData(0, 6, 19)
tdSql.execute("create table strm_avg as select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from tb1 interval(1d)")
tdLog.info("===== step 7 =====")
tdSql.query("select stddev(tbcol), leastsquares(tbcol, 1, 1), percentile(tbcol, 1) from tb1 interval(1d)")
tdSql.checkData(0, 1, 5.766281297335398)
tdSql.checkData(0, 3, 0.19)
tdSql.execute("create table strm_ot as select stddev(tbcol), leastsquares(tbcol, 1, 1), percentile(tbcol, 1) from tb1 interval(1d)")
tdLog.info("===== step 8 =====")
tdSql.query("select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), stddev(tbcol), percentile(tbcol, 1), count(tbcol), leastsquares(tbcol, 1, 1) from tb1 interval(1d)")
tdSql.checkData(0, 1, 9.5)
tdSql.checkData(0, 2, 190)
tdSql.checkData(0, 3, 0)
tdSql.checkData(0, 4, 19)
tdSql.checkData(0, 5, 0)
tdSql.checkData(0, 6, 19)
tdSql.checkData(0, 7, 5.766281297335398)
tdSql.checkData(0, 8, 0.19)
tdSql.checkData(0, 9, rowNum)
tdSql.execute("create table strm_to as select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), stddev(tbcol), percentile(tbcol, 1), count(tbcol), leastsquares(tbcol, 1, 1) from tb1 interval(1d)")
tdLog.info("===== step 9 =====")
tdSql.query("select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), stddev(tbcol), percentile(tbcol, 1), count(tbcol), leastsquares(tbcol, 1, 1) from tb1 where ts < now + 4m interval(1d)")
tdSql.checkData(0, 9, rowNum)
tdSql.execute("create table strm_wh as select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), stddev(tbcol), percentile(tbcol, 1), count(tbcol), leastsquares(tbcol, 1, 1) from tb1 where ts < now + 4m interval(1d)")
tdLog.info("===== step 10 =====")
tdSql.waitedQuery("select * from strm_c3", 1, 120)
tdSql.checkData(0, 1, rowNum)
tdSql.checkData(0, 2, rowNum)
tdSql.checkData(0, 3, rowNum)
tdLog.info("===== step 11 =====")
tdSql.waitedQuery("select * from strm_c31", 1, 30)
for i in range(1, 10):
tdSql.checkData(0, i, rowNum)
tdLog.info("===== step 12 =====")
tdSql.waitedQuery("select * from strm_avg", 1, 20)
tdSql.checkData(0, 1, 9.5)
tdSql.checkData(0, 2, 190)
tdSql.checkData(0, 3, 0)
tdSql.checkData(0, 4, 19)
tdSql.checkData(0, 5, 0)
tdSql.checkData(0, 6, 19)
tdLog.info("===== step 13 =====")
tdSql.waitedQuery("select * from strm_ot", 1, 20)
tdSql.checkData(0, 1, 5.766281297335398)
tdSql.checkData(0, 3, 0.19)
tdLog.info("===== step 14 =====")
tdSql.waitedQuery("select * from strm_to", 1, 20)
tdSql.checkData(0, 1, 9.5)
tdSql.checkData(0, 2, 190)
tdSql.checkData(0, 3, 0)
tdSql.checkData(0, 4, 19)
tdSql.checkData(0, 5, 0)
tdSql.checkData(0, 6, 19)
tdSql.checkData(0, 7, 5.766281297335398)
tdSql.checkData(0, 8, 0.19)
tdSql.checkData(0, 9, rowNum)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -39,6 +39,23 @@ class TDTestCase:
except Exception as e:
tdLog.exit(e)
# case for defect: https://jira.taosdata.com:18080/browse/TD-2560
tdSql.execute("create table db.tb02 using st tags(2)")
tdSql.execute("create table db.tb03 using st tags(3)")
tdSql.execute("create table db.tb04 using st tags(4)")
tdSql.query("show tables like 'tb%' ")
tdSql.checkRows(4)
tdSql.query("show tables like 'tb0%' ")
tdSql.checkRows(3)
tdSql.execute("create table db.st0 (ts timestamp, i int) tags(j int)")
tdSql.execute("create table db.st1 (ts timestamp, i int, c2 int) tags(j int, loc nchar(20))")
tdSql.query("show stables like 'st%' ")
tdSql.checkRows(3)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -22,12 +22,15 @@ import argparse
import os.path
class taosdemoPerformace:
def __init__(self, commitID, dbName, createTableTime, insertRecordsTime, recordsPerSecond):
def __init__(self, commitID, dbName, createTableTime, insertRecordsTime, recordsPerSecond, avgDelay, maxDelay, minDelay):
self.commitID = commitID
self.dbName = dbName
self.createTableTime = createTableTime
self.insertRecordsTime = insertRecordsTime
self.recordsPerSecond = recordsPerSecond
self.recordsPerSecond = recordsPerSecond
self.avgDelay = avgDelay
self.maxDelay = maxDelay
self.minDelay = minDelay
self.host = "127.0.0.1"
self.user = "root"
self.password = "taosdata"
@ -43,12 +46,15 @@ class taosdemoPerformace:
cursor.execute("create database if not exists %s" % self.dbName)
cursor.execute("use %s" % self.dbName)
cursor.execute("create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50))")
cursor.execute("create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50), avg_delay float, max_delay float, min_delay float)")
print("==================== taosdemo performance ====================")
print("create tables time: %f" % self.createTableTime)
print("insert records time: %f" % self.insertRecordsTime)
print("records per second: %f" % self.recordsPerSecond)
cursor.execute("insert into taosdemo_perf values(now, %f, %f, %f, '%s')" % (self.createTableTime, self.insertRecordsTime, self.recordsPerSecond, self.commitID))
print("avg delay: %f" % self.avgDelay)
print("max delay: %f" % self.maxDelay)
print("min delay: %f" % self.minDelay)
cursor.execute("insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f)" % (self.createTableTime, self.insertRecordsTime, self.recordsPerSecond, self.commitID, self.avgDelay, self.maxDelay, self.minDelay))
cursor.execute("drop database if exists taosdemo_insert_test")
cursor.close()
@ -86,8 +92,28 @@ if __name__ == '__main__':
action='store',
type=float,
help='records per request')
parser.add_argument(
'-avg',
'---avg-delay',
action='store',
type=float,
help='avg delay')
parser.add_argument(
'-max',
'---max-delay',
action='store',
type=float,
help='max delay')
parser.add_argument(
'-min',
'---min-delay',
action='store',
type=float,
help='min delay')
args = parser.parse_args()
perftest = taosdemoPerformace(args.commit_id, args.database_name, args.create_table, args.insert_records, args.records_per_second)
perftest = taosdemoPerformace(args.commit_id, args.database_name, args.create_table, args.insert_records, args.records_per_second,
args.avg_delay, args.max_delay, args.min_delay)
perftest.createTablesAndStoreData()

View File

@ -134,8 +134,6 @@ run general/parser/tags_dynamically_specifiy.sim
run general/parser/set_tag_vals.sim
#unsupport run general/parser/repeatAlter.sim
#unsupport run general/parser/slimit_alter_tags.sim
#unsupport run general/parser/stream_on_sys.sim
#unsupport run general/parser/repeatStream.sim
run general/stable/disk.sim
run general/stable/dnode3.sim
run general/stable/metrics.sim
@ -213,9 +211,6 @@ run general/vector/table_time.sim
run general/stream/restart_stream.sim
run general/stream/stream_3.sim
run general/stream/stream_restart.sim
run general/stream/table_1.sim
run general/stream/table_n.sim
run general/stream/metrics_n.sim
run general/stream/table_del.sim
run general/stream/metrics_del.sim
run general/stream/table_replica1_vnoden.sim

View File

@ -1,9 +0,0 @@
$i = 1
$repeats = 5
while $i <= $repeats
print ====== repeat: $i
run general/parser/stream.sim
$i = $i + 1
endw
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -1,62 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/cfg.sh -n dnode1 -c monitor -v 1
system sh/cfg.sh -n dnode1 -c monitorInterval -v 1
system sh/exec.sh -n dnode1 -s start
sleep 500
sql connect
print ======================== stream_on_sys.sim
$db = log
$tb = tb
$mt = mt
$strm = strm
$tbNum = 10
$rowNum = 20
$totalNum = 200
$i = 0
sql use $db
sql create table cpustrm as select count(*), avg(cpu_taosd), max(cpu_taosd), min(cpu_taosd), avg(cpu_system), max(cpu_cores), min(cpu_cores), last(cpu_cores) from log.dn1 interval(4s)
sql create table memstrm as select count(*), avg(mem_taosd), max(mem_taosd), min(mem_taosd), avg(mem_system), first(mem_total), last(mem_total) from log.dn1 interval(4s)
sql create table diskstrm as select count(*), avg(disk_used), last(disk_used), avg(disk_total), first(disk_total) from log.dn1 interval(4s)
sql create table bandstrm as select count(*), avg(band_speed), last(band_speed) from log.dn1 interval(4s)
sql create table reqstrm as select count(*), avg(req_http), last(req_http), avg(req_select), last(req_select), avg(req_insert), last(req_insert) from log.dn1 interval(4s)
sql create table iostrm as select count(*), avg(io_read), last(io_read), avg(io_write), last(io_write) from log.dn1 interval(4s)
sleep 120000
sql select * from cpustrm
if $rows <= 0 then
return -1
endi
sql select * from memstrm
if $rows <= 0 then
return -1
endi
sql select * from diskstrm
if $rows <= 0 then
return -1
endi
sql select * from bandstrm
if $rows <= 0 then
return -1
endi
sql select * from reqstrm
if $rows <= 0 then
return -1
endi
sql select * from iostrm
if $rows <= 0 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -105,10 +105,3 @@ sleep 500
run general/parser/sliding.sim
sleep 500
run general/parser/function.sim
#sleep 500
#run general/parser/repeatStream.sim
#sleep 500
#run general/parser/stream_on_sys.sim
#sleep 500
#run general/parser/stream.sim

View File

@ -1,262 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect
print ======================== dnode1 start
$dbPrefix = mn_db
$tbPrefix = mn_tb
$mtPrefix = mn_mt
$stPrefix = mn_st
$tbNum = 10
$rowNum = 20
$totalNum = 200
print =============== step1
$i = 0
$db = $dbPrefix . $i
$mt = $mtPrefix . $i
$st = $stPrefix . $i
sql drop databae $db -x step1
step1:
sql create database $db
sql use $db
sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
$i = 0
while $i < $tbNum
$tb = $tbPrefix . $i
sql create table $tb using $mt tags( $i )
$x = -1440
$y = 0
while $y < $rowNum
$ms = $x . m
sql insert into $tb values (now $ms , $y , $y )
$x = $x + 1
$y = $y + 1
endw
$i = $i + 1
endw
sleep 100
print =============== step2 c3
$i = 1
$tb = $tbPrefix . $i
sql select count(*), count(tbcol), count(tbcol2) from $mt interval(1d)
print select count(*), count(tbcol), count(tbcol2) from $mt interval(1d) ===> $data00 $data01 $data02, $data03
if $data01 != 200 then
return -1
endi
if $data02 != 200 then
return -1
endi
if $data03 != 200 then
return -1
endi
$st = $stPrefix . c3
sql create table $st as select count(*), count(tbcol), count(tbcol2) from $mt interval(1d)
print =============== step3 count32
#total 32 count in select
sql select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from $mt interval(1d)
# total 32 count in stream
$st = $stPrefix . c32
sql create table $st as select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from $mt interval(1d)
print =============== step4 count31
#total 31 count in select
sql select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from $mt interval(1d)
print ===> $data00 $data01 $data02, $data03
if $data01 != 200 then
return -1
endi
if $data02 != 200 then
return -1
endi
if $data03 != 200 then
return -1
endi
# total 31 count in stream will crash, 32 will error
$st = $stPrefix . c31
sql create table $st as select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from $mt interval(1d)
print =============== step5 avg ...
sql select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt interval(1d)
print ===> $data01 $data02 $data03 $data04 $data05 $data06
if $data01 != 9.500000000 then
return -1
endi
if $data02 != 1900 then
return -1
endi
if $data03 != 0 then
return -1
endi
if $data04 != 19 then
return -1
endi
if $data05 != 0 then
return -1
endi
if $data06 != 19 then
return -1
endi
$st = $stPrefix . avg
sql create table $st as select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt interval(1d)
print =============== step6 others
sql select stddev(tbcol), leastsquares(tbcol, 1, 1), percentile(tbcol, 1) from $mt interval(1d) -x step6
return -1
step6:
$st = $stPrefix . ot
sql create table $st as select stddev(tbcol), leastsquares(tbcol, 1, 1), percentile(tbcol, 1) from $mt interval(1d) -x step61
return -1
step61:
print =============== step7 where
sql select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), count(tbcol) from $mt where ts < now + 4m interval(1d)
print select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), count(tbcol) from $mt where ts < now + 4m interval(1d)
print ===> $data01 $data02 $data03 $data04 $data05 $data06 $data07
if $data01 != 9.500000000 then
return -1
endi
if $data02 != 1900 then
return -1
endi
if $data03 != 0 then
return -1
endi
if $data04 != 19 then
return -1
endi
if $data05 != 0 then
return -1
endi
if $data06 != 19 then
return -1
endi
if $data07 != 200 then
return -1
endi
$st = $stPrefix . wh
#sql create table $st as select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), count(tbcol) from $mt where ts < now + 4m interval(1d)
print =============== step8 as
sql select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), count(tbcol) from $mt where ts < now + 4m interval(1d)
print ===> $data01 $data02 $data03 $data04 $data05 $data06 $data07
if $data01 != 9.500000000 then
return -1
endi
if $data02 != 1900 then
return -1
endi
if $data03 != 0 then
return -1
endi
if $data04 != 19 then
return -1
endi
if $data05 != 0 then
return -1
endi
if $data06 != 19 then
return -1
endi
if $data07 != 200 then
return -1
endi
$st = $stPrefix . as
#sql create table $st as select avg(tbcol) as a1, sum(tbcol) as a2, min(tbcol) as a3, max(tbcol) as a4, first(tbcol) as a5, last(tbcol) as a6, count(tbcol) as a7, avg(tbcol) as a8, sum(tbcol) as a9, min(tbcol) as a3, max(tbcol) as a4, first(tbcol) as a5, last(tbcol) as a6, count(tbcol) as a7 from $mt where ts < now + 4m interval(1d)
print =============== step9
print sleep 120 seconds
sleep 120000
print =============== step10
$st = $stPrefix . c3
sql select * from $st
print ===> select * from $st
print ===> $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
if $data01 != 200 then
return -1
endi
if $data02 != 200 then
return -1
endi
if $data03 != 200 then
return -1
endi
$st = $stPrefix . c31
sql select * from $st
if $data01 != 200 then
return -1
endi
if $data02 != 200 then
return -1
endi
if $data03 != 200 then
return -1
endi
if $data04 != 200 then
return -1
endi
if $data05 != 200 then
return -1
endi
if $data06 != 200 then
return -1
endi
if $data07 != 200 then
return -1
endi
if $data08 != 200 then
return -1
endi
if $data09 != 200 then
return -1
endi
$st = $stPrefix . avg
sql select * from $st
print ===> select * from $st
print ===> $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
if $data01 != 9.500000000 then
return -1
endi
if $data02 != 1900 then
return -1
endi
if $data03 != 0 then
return -1
endi
if $data04 != 19 then
return -1
endi
if $data05 != 0 then
return -1
endi
if $data06 != 19 then
return -1
endi

View File

@ -1,317 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect
print ======================== dnode1 start
$dbPrefix = t1_db
$tbPrefix = t1_tb
$mtPrefix = t1_mt
$stPrefix = t1_st
$tbNum = 10
$rowNum = 20
$totalNum = 200
print =============== step1
$i = 0
$db = $dbPrefix . $i
$mt = $mtPrefix . $i
$st = $stPrefix . $i
sql drop databae $db -x step1
step1:
sql create database $db
sql use $db
sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
$i = 0
while $i < $tbNum
$tb = $tbPrefix . $i
sql create table $tb using $mt tags( $i )
$x = -1440
$y = 0
while $y < $rowNum
$ms = $x . m
sql insert into $tb values (now $ms , $y , $y )
$x = $x + 1
$y = $y + 1
endw
$i = $i + 1
endw
sleep 100
print =============== step2 c1
$i = 1
$tb = $tbPrefix . $i
sql select count(*) from $tb interval(1d)
print select count(*) from $tb interval(1d) ===> $data00 $data01
if $data01 != $rowNum then
return -1
endi
$st = $stPrefix . c1
sql create table $st as select count(*) from $tb interval(1d)
print =============== step3 c2
sql select count(tbcol) from $tb interval(1d)
print select count(tbcol) from $tb interval(1d) ===> $data00 $data01
if $data01 != $rowNum then
return -1
endi
$st = $stPrefix . c2
sql create table $st as select count(tbcol) from $tb interval(1d)
print =============== step4 c3
sql select count(tbcol2) from $tb interval(1d)
print select count(tbcol2) from $tb interval(1d) ===> $data00 $data01
if $data01 != $rowNum then
return -1
endi
$st = $stPrefix . c3
sql create table $st as select count(tbcol2) from $tb interval(1d)
print =============== step5 avg
sql select avg(tbcol) from $tb interval(1d)
print select avg(tbcol) from $tb interval(1d) ===> $data00 $data01
if $data01 != 9.500000000 then
return -1
endi
$st = $stPrefix . av
sql create table $st as select avg(tbcol) from $tb interval(1d)
print =============== step6 su
sql select sum(tbcol) from $tb interval(1d)
print select sum(tbcol) from $tb interval(1d) ===> $data00 $data01
if $data01 != 190 then
return -1
endi
$st = $stPrefix . su
sql create table $st as select sum(tbcol) from $tb interval(1d)
print =============== step7 mi
sql select min(tbcol) from $tb interval(1d)
print select min(tbcol) from $tb interval(1d) ===> $data00 $data01
if $data01 != 0 then
return -1
endi
$st = $stPrefix . mi
sql create table $st as select min(tbcol) from $tb interval(1d)
print =============== step8 ma
sql select max(tbcol) from $tb interval(1d)
print select max(tbcol) from $tb interval(1d) ===> $data00 $data01
if $data01 != 19 then
return -1
endi
$st = $stPrefix . ma
sql create table $st as select max(tbcol) from $tb interval(1d)
print =============== step9 fi
sql select first(tbcol) from $tb interval(1d)
print select first(tbcol) from $tb interval(1d) ===> $data00 $data01
if $data01 != 0 then
return -1
endi
$st = $stPrefix . fi
sql create table $st as select first(tbcol) from $tb interval(1d)
print =============== step10 la
sql select last(tbcol) from $tb interval(1d)
print select last(tbcol) from $tb interval(1d) ===> $data00 $data01
if $data01 != 19 then
return -1
endi
$st = $stPrefix . la
sql create table $st as select last(tbcol) from $tb interval(1d)
print =============== step11 st
sql select stddev(tbcol) from $tb interval(1d)
print select stddev(tbcol) from $tb interval(1d) ===> $data00 $data01
if $data01 != 5.766281297 then
return -1
endi
$st = $stPrefix . std
sql create table $st as select stddev(tbcol) from $tb interval(1d)
print =============== step12 le
sql select leastsquares(tbcol, 1, 1) from $tb interval(1d)
print select leastsquares(tbcol, 1, 1) from $tb interval(1d) ===> $data00 $data01
#if $data01 != @(0.000017, -25362055.126740)@ then
# return -1
#endi
$st = $stPrefix . le
sql create table $st as select leastsquares(tbcol, 1, 1) from $tb interval(1d)
print =============== step13
sql select top(tbcol, 1) from $tb interval(1d)
print =============== step14
sql select bottom(tbcol, 1) from $tb interval(1d)
print =============== step15 pe
sql select percentile(tbcol, 1) from $tb interval(1d)
print select percentile(tbcol, 1) from $tb interval(1d) ===> $data00 $data01
if $data01 != 0.190000000 then
return -1
endi
$st = $stPrefix . pe
sql create table $st as select percentile(tbcol, 1) from $tb interval(1d)
print =============== step16
sql select diff(tbcol) from $tb interval(1d) -x step16
return -1
step16:
print =============== step17 wh
sql select count(tbcol) from $tb where ts < now + 4m interval(1d)
print select count(tbcol) from $tb where ts < now + 4m interval(1d) ===> $data00 $data01
if $data01 != $rowNum then
return -1
endi
$st = $stPrefix . wh
#sql create table $st as select count(tbcol) from $tb where ts < now + 4m interval(1d)
print =============== step18 as
sql select count(tbcol) from $tb interval(1d)
print select count(tbcol) from $tb interval(1d) ===> $data00 $data01
if $data01 != $rowNum then
return -1
endi
$st = $stPrefix . as
sql create table $st as select count(tbcol) as c from $tb interval(1d)
print =============== step19 gb
sql select count(tbcol) from $tb interval(1d) group by tgcol -x step19
return -1
step19:
print =============== step20 x
sql select count(tbcol) from $tb where ts < now + 4m interval(1d) group by tgcol -x step20
return -1
step20:
print =============== step21
print sleep 120 seconds
sleep 120000
print =============== step22
$st = $stPrefix . c1
sql select * from $st
print ===> select * from $st ===> $data00 $data01
if $data01 != $rowNum then
return -1
endi
$st = $stPrefix . c2
sql select * from $st
print ===> select * from $st ===> $data00 $data01
if $data01 != $rowNum then
return -1
endi
$st = $stPrefix . c3
sql select * from $st
print ===> select * from $st ===> $data00 $data01
if $data01 != $rowNum then
return -1
endi
$st = $stPrefix . av
sql select * from $st
print ===> select * from $st ===> $data00 $data01
if $data01 != 9.500000000 then
return -1
endi
$st = $stPrefix . su
sql select * from $st
print ===> select * from $st ===> $data00 $data01
if $data01 != 190 then
return -1
endi
$st = $stPrefix . mi
sql select * from $st
print ===> select * from $st ===> $data00 $data01
if $data01 != 0 then
return -1
endi
$st = $stPrefix . ma
sql select * from $st
print ===> select * from $st ===> $data00 $data01
if $data01 != 19 then
return -1
endi
$st = $stPrefix . fi
sql select * from $st
print ===> select * from $st ===> $data00 $data01
if $data01 != 0 then
return -1
endi
$st = $stPrefix . la
sql select * from $st
print ===> select * from $st ===> $data00 $data01
if $data01 != 19 then
return -1
endi
$st = $stPrefix . std
sql select * from $st
print ===> select * from $st ===> $data00 $data01
if $data01 != 5.766281297 then
return -1
endi
$st = $stPrefix . le
sql select * from $st
#print ===> select * from $st ===> $data00 $data01
#if $data01 != @(0.000017, -25270086.331047)@ then
# return -1
#endi
$st = $stPrefix . pe
sql select * from $st
print ===> select * from $st ===> $data00 $data01
if $data01 != 0.190000000 then
return -1
endi
#$st = $stPrefix . wh
#sql select * from $st
#print ===> select * from $st ===> $data00 $data01
#if $data01 != $rowNum then
# return -1
#endi
$st = $stPrefix . as
sql select * from $st
print ===> select * from $st ===> $data00 $data01
if $data01 != $rowNum then
return -1
endi

View File

@ -1,293 +0,0 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 0
system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect
print ======================== dnode1 start
$dbPrefix = tn_db
$tbPrefix = tn_tb
$mtPrefix = tn_mt
$stPrefix = tn_st
$tbNum = 10
$rowNum = 20
$totalNum = 200
print =============== step1
$i = 0
$db = $dbPrefix . $i
$mt = $mtPrefix . $i
$st = $stPrefix . $i
sql drop databae $db -x step1
step1:
sql create database $db
sql use $db
sql create table $mt (ts timestamp, tbcol int, tbcol2 float) TAGS(tgcol int)
$i = 0
while $i < $tbNum
$tb = $tbPrefix . $i
sql create table $tb using $mt tags( $i )
$x = -1440
$y = 0
while $y < $rowNum
$ms = $x . m
sql insert into $tb values (now $ms , $y , $y )
$x = $x + 1
$y = $y + 1
endw
$i = $i + 1
endw
sleep 100
print =============== step2 c3
$i = 1
$tb = $tbPrefix . $i
sql select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
print select count(*), count(tbcol), count(tbcol2) from $tb interval(1d) ===> $data00 $data01 $data02, $data03
if $data01 != $rowNum then
return -1
endi
if $data02 != $rowNum then
return -1
endi
if $data03 != $rowNum then
return -1
endi
$st = $stPrefix . c3
sql create table $st as select count(*), count(tbcol), count(tbcol2) from $tb interval(1d)
print =============== step3 count32
#total 32 count in select
sql select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from $tb interval(1d)
# total 32 count in stream
$st = $stPrefix . c32
sql create table $st as select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from $tb interval(1d)
print =============== step4 count31
#total 31 count in select
sql select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from $tb interval(1d)
print ===> $data00 $data01 $data02, $data03
if $data01 != $rowNum then
return -1
endi
if $data02 != $rowNum then
return -1
endi
if $data03 != $rowNum then
return -1
endi
# total 31 count in stream will crash, 32 will error
$st = $stPrefix . c31
sql create table $st as select count(*), count(tbcol) as c1, count(tbcol2) as c2, count(tbcol) as c3, count(tbcol) as c4, count(tbcol) as c5, count(tbcol) as c6, count(tbcol) as c7, count(tbcol) as c8, count(tbcol) as c9, count(tbcol) as c10, count(tbcol) as c11, count(tbcol) as c12, count(tbcol) as c13, count(tbcol) as c14, count(tbcol) as c15, count(tbcol) as c16, count(tbcol) as c17, count(tbcol) as c18, count(tbcol) as c19, count(tbcol) as c20, count(tbcol) as c21, count(tbcol) as c22, count(tbcol) as c23, count(tbcol) as c24, count(tbcol) as c25, count(tbcol) as c26, count(tbcol) as c27, count(tbcol) as c28, count(tbcol) as c29, count(tbcol) as c30 from $tb interval(1d)
print =============== step5 avg ...
sql select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $tb interval(1d)
print ===> $data01 $data02 $data03 $data04 $data05 $data06
if $data01 != 9.500000000 then
return -1
endi
if $data02 != 190 then
return -1
endi
if $data03 != 0 then
return -1
endi
if $data04 != 19 then
return -1
endi
if $data05 != 0 then
return -1
endi
if $data06 != 19 then
return -1
endi
$st = $stPrefix . avg
sql create table $st as select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $tb interval(1d)
print =============== step6 others
sql select stddev(tbcol), leastsquares(tbcol, 1, 1), percentile(tbcol, 1) from $tb interval(1d)
print ===> $data01 $data02 $data03
if $data01 != 5.766281297 then
return -1
endi
#if $data02 != @(0.000017, -25362055.126740)@ then
# return -1
#endi
if $data03 != 0.190000000 then
return -1
endi
$st = $stPrefix . ot
sql create table $st as select stddev(tbcol), leastsquares(tbcol, 1, 1), percentile(tbcol, 1) from $tb interval(1d)
print =============== step7 total ...
sql select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), stddev(tbcol), percentile(tbcol, 1), count(tbcol), leastsquares(tbcol, 1, 1) from $tb interval(1d)
print ===> $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
if $data01 != 9.500000000 then
return -1
endi
if $data02 != 190 then
return -1
endi
if $data03 != 0 then
return -1
endi
if $data04 != 19 then
return -1
endi
if $data05 != 0 then
return -1
endi
if $data06 != 19 then
return -1
endi
if $data07 != 5.766281297 then
return -1
endi
if $data08 != 0.190000000 then
return -1
endi
if $data09 != 20 then
return -1
endi
$st = $stPrefix . to
sql create table $st as select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), stddev(tbcol), percentile(tbcol, 1), count(tbcol), leastsquares(tbcol, 1, 1) from $tb interval(1d)
print =============== step8 where
sql select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), stddev(tbcol), percentile(tbcol, 1), count(tbcol), leastsquares(tbcol, 1, 1) from $tb where ts < now + 4m interval(1d)
print ===> $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
if $data09 != 20 then
return -1
endi
$st = $stPrefix . wh
sql create table $st as select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), stddev(tbcol), percentile(tbcol, 1), count(tbcol), leastsquares(tbcol, 1, 1) from $tb where ts < now + 4m interval(1d)
print =============== step9 as
sql select avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol), stddev(tbcol), percentile(tbcol, 1), count(tbcol), leastsquares(tbcol, 1, 1) from $tb where ts < now + 4m interval(1d)
print ===> $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
if $data09 != 20 then
return -1
endi
$st = $stPrefix . as
#sql create table $st as select avg(tbcol) as a1, sum(tbcol) as a2, min(tbcol) as a3, max(tbcol) as a4, first(tbcol) as a5, last(tbcol) as a6, stddev(tbcol) as a7, percentile(tbcol, 1) as a8, count(tbcol) as a9, leastsquares(tbcol, 1, 1) as a10 from $tb where ts < now + 4m interval(1d)
print =============== step10
print sleep 120 seconds
sleep 120000
print =============== step11
$st = $stPrefix . c3
sql select * from $st
print ===> select * from $st
print ===> $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
if $data01 != $rowNum then
return -1
endi
if $data02 != $rowNum then
return -1
endi
if $data03 != $rowNum then
return -1
endi
$st = $stPrefix . c31
sql select * from $st
if $data01 != $rowNum then
return -1
endi
if $data02 != $rowNum then
return -1
endi
if $data03 != $rowNum then
return -1
endi
$st = $stPrefix . avg
sql select * from $st
print ===> select * from $st
print ===> $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
if $data01 != 9.500000000 then
return -1
endi
if $data02 != 190 then
return -1
endi
if $data03 != 0 then
return -1
endi
if $data04 != 19 then
return -1
endi
if $data05 != 0 then
return -1
endi
if $data06 != 19 then
return -1
endi
$st = $stPrefix . ot
sql select * from $st
print ===> select * from $st
print ===> $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
if $data01 != 5.766281297 then
return -1
endi
#if $data02 != @(0.000017, -25362055.126740)@ then
# return -1
#endi
if $data03 != 0.190000000 then
return -1
endi
$st = $stPrefix . to
sql select * from $st
print ===> select * from $st
print ===> $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
if $data01 != 9.500000000 then
return -1
endi
if $data02 != 190 then
return -1
endi
if $data03 != 0 then
return -1
endi
if $data04 != 19 then
return -1
endi
if $data05 != 0 then
return -1
endi
if $data06 != 19 then
return -1
endi
if $data07 != 5.766281297 then
return -1
endi
if $data08 != 0.190000000 then
return -1
endi
if $data09 != 20 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -1,8 +1,5 @@
run general/stream/stream_3.sim
run general/stream/stream_restart.sim
run general/stream/table_1.sim
run general/stream/table_n.sim
run general/stream/metrics_n.sim
run general/stream/table_del.sim
run general/stream/metrics_del.sim
run general/stream/table_replica1_vnoden.sim

View File

@ -288,6 +288,7 @@ cd ../../../debug; make
./test.sh -f unique/dnode/data1.sim
./test.sh -f unique/dnode/m2.sim
./test.sh -f unique/dnode/m3.sim
./test.sh -f unique/dnode/lossdata.sim
./test.sh -f unique/dnode/offline1.sim
./test.sh -f unique/dnode/offline2.sim
./test.sh -f unique/dnode/offline3.sim
@ -318,6 +319,7 @@ cd ../../../debug; make
./test.sh -f unique/mnode/mgmt24.sim
./test.sh -f unique/mnode/mgmt25.sim
./test.sh -f unique/mnode/mgmt26.sim
./test.sh -f unique/mnode/mgmt30.sim
./test.sh -f unique/mnode/mgmt33.sim
./test.sh -f unique/mnode/mgmt34.sim
./test.sh -f unique/mnode/mgmtr2.sim
@ -329,16 +331,12 @@ cd ../../../debug; make
./test.sh -f unique/vnode/replica3_repeat.sim
./test.sh -f unique/vnode/replica3_vgroup.sim
./test.sh -f general/parser/stream_on_sys.sim
./test.sh -f general/stream/metrics_del.sim
./test.sh -f general/stream/metrics_n.sim
./test.sh -f general/stream/metrics_replica1_vnoden.sim
./test.sh -f general/stream/restart_stream.sim
./test.sh -f general/stream/stream_3.sim
./test.sh -f general/stream/stream_restart.sim
./test.sh -f general/stream/table_1.sim
./test.sh -f general/stream/table_del.sim
./test.sh -f general/stream/table_n.sim
./test.sh -f general/stream/table_replica1_vnoden.sim
./test.sh -f unique/arbitrator/check_cluster_cfg_para.sim

View File

@ -150,6 +150,8 @@
./test.sh -f general/parser/repeatAlter.sim
./test.sh -f general/parser/union.sim
./test.sh -f general/parser/topbot.sim
./test.sh -f general/db/nosuchfile.sim
./test.sh -f general/parser/function.sim
./test.sh -f general/stable/disk.sim
./test.sh -f general/stable/dnode3.sim

View File

@ -26,6 +26,9 @@ cd ../../../debug; make
./test.sh -f general/tag/set.sim
./test.sh -f general/tag/smallint.sim
./test.sh -f general/tag/tinyint.sim
./test.sh -f general/wal/sync.sim
./test.sh -f general/wal/kill.sim
./test.sh -f general/wal/maxtables.sim
./test.sh -f general/user/authority.sim
./test.sh -f general/user/monitor.sim
@ -87,4 +90,15 @@ cd ../../../debug; make
./test.sh -f unique/vnode/replica2_repeat.sim
./test.sh -f unique/vnode/replica3_basic.sim
./test.sh -f unique/vnode/replica3_repeat.sim
./test.sh -f unique/vnode/replica3_vgroup.sim
./test.sh -f unique/vnode/replica3_vgroup.sim
./test.sh -f unique/dnode/monitor.sim
./test.sh -f unique/dnode/monitor_bug.sim
./test.sh -f unique/dnode/simple.sim
./test.sh -f unique/dnode/data1.sim
./test.sh -f unique/dnode/m2.sim
./test.sh -f unique/dnode/m3.sim
./test.sh -f unique/dnode/offline3.sim
./test.sh -f general/wal/sync.sim
./test.sh -f general/wal/kill.sim
./test.sh -f general/wal/maxtables.sim

View File

@ -60,20 +60,16 @@
./test.sh -f unique/mnode/mgmt22.sim
./test.sh -f unique/mnode/mgmt23.sim
./test.sh -f unique/mnode/mgmt24.sim
#./test.sh -f unique/mnode/mgmt25.sim
#./test.sh -f unique/mnode/mgmt26.sim
./test.sh -f unique/mnode/mgmt25.sim
./test.sh -f unique/mnode/mgmt26.sim
./test.sh -f unique/mnode/mgmt33.sim
./test.sh -f unique/mnode/mgmt34.sim
./test.sh -f unique/mnode/mgmtr2.sim
./test.sh -f general/parser/stream_on_sys.sim
./test.sh -f general/stream/metrics_del.sim
./test.sh -f general/stream/metrics_n.sim
./test.sh -f general/stream/metrics_replica1_vnoden.sim
./test.sh -f general/stream/restart_stream.sim
./test.sh -f general/stream/stream_3.sim
./test.sh -f general/stream/stream_restart.sim
./test.sh -f general/stream/table_1.sim
./test.sh -f general/stream/table_del.sim
./test.sh -f general/stream/table_n.sim
./test.sh -f general/stream/table_replica1_vnoden.sim

View File

@ -147,9 +147,7 @@ cd ../../../debug; make
./test.sh -f general/parser/join_multivnode.sim
./test.sh -f general/parser/binary_escapeCharacter.sim
./test.sh -f general/parser/bug.sim
#./test.sh -f general/parser/stream_on_sys.sim
./test.sh -f general/parser/repeatAlter.sim
#./test.sh -f general/parser/repeatStream.sim
./test.sh -f general/stable/disk.sim
./test.sh -f general/stable/dnode3.sim

View File

@ -10,6 +10,7 @@ cd ../../../debug; make
./test.sh -f unique/cluster/balance2.sim
./test.sh -f unique/cluster/balance3.sim
./test.sh -f unique/cluster/cache.sim
./test.sh -f unique/cluster/vgroup100.sim
./test.sh -f unique/column/replica3.sim
@ -25,12 +26,17 @@ cd ../../../debug; make
./test.sh -f unique/db/replica_part.sim
./test.sh -f unique/dnode/alternativeRole.sim
./test.sh -f unique/dnode/monitor.sim
./test.sh -f unique/dnode/monitor_bug.sim
./test.sh -f unique/dnode/simple.sim
./test.sh -f unique/dnode/balance1.sim
./test.sh -f unique/dnode/balance2.sim
./test.sh -f unique/dnode/balance3.sim
./test.sh -f unique/dnode/balancex.sim
./test.sh -f unique/dnode/data1.sim
./test.sh -f unique/dnode/m2.sim
./test.sh -f unique/dnode/m3.sim
./test.sh -f unique/dnode/lossdata.sim
./test.sh -f unique/dnode/offline1.sim
./test.sh -f unique/dnode/offline2.sim
./test.sh -f unique/dnode/offline3.sim
@ -54,12 +60,14 @@ cd ../../../debug; make
./test.sh -f unique/stable/replica3_dnode6.sim
./test.sh -f unique/stable/replica3_vnode3.sim
./test.sh -f unique/mnode/mgmt20.sim
./test.sh -f unique/mnode/mgmt21.sim
./test.sh -f unique/mnode/mgmt22.sim
./test.sh -f unique/mnode/mgmt23.sim
./test.sh -f unique/mnode/mgmt24.sim
./test.sh -f unique/mnode/mgmt25.sim
./test.sh -f unique/mnode/mgmt26.sim
./test.sh -f unique/mnode/mgmt30.sim
./test.sh -f unique/mnode/mgmt33.sim
./test.sh -f unique/mnode/mgmt34.sim
./test.sh -f unique/mnode/mgmtr2.sim

View File

@ -134,9 +134,6 @@ run general/parser/tags_dynamically_specifiy.sim
run general/parser/set_tag_vals.sim
run general/parser/repeatAlter.sim
##unsupport run general/parser/slimit_alter_tags.sim
##unsupport run general/parser/stream_on_sys.sim
run general/parser/stream.sim
#unsupport run general/parser/repeatStream.sim
run general/stable/disk.sim
run general/stable/dnode3.sim
run general/stable/metrics.sim
@ -214,14 +211,8 @@ run general/vector/table_mix.sim
run general/vector/table_query.sim
run general/vector/table_time.sim
run general/stream/restart_stream.sim
run general/stream/stream_1.sim
run general/stream/stream_2.sim
run general/stream/stream_3.sim
run general/stream/stream_restart.sim
run general/stream/table_1.sim
run general/stream/metrics_1.sim
run general/stream/table_n.sim
run general/stream/metrics_n.sim
run general/stream/table_del.sim
run general/stream/metrics_del.sim
run general/stream/table_replica1_vnoden.sim

View File

@ -0,0 +1,165 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/deploy.sh -n dnode5 -i 5
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode5 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode5 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxTablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxTablesPerVnode -v 4
print ========== step1
system sh/exec.sh -n dnode1 -s start
sql connect
sql create dnode $hostname2
sql create dnode $hostname3
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
$x = 0
step1:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 $data4_1
print dnode2 $data4_2
print dnode3 $data4_3
if $data4_1 != ready then
goto step1
endi
if $data4_2 != ready then
goto step1
endi
if $data4_3 != ready then
goto step1
endi
print ========== step2
sql create database d1 replica 2
sql create table d1.t1 (t timestamp, i int)
print ========== step2.1
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode2 openVnodes $data2_2
print dnode3 openVnodes $data2_3
print dnode4 openVnodes $data2_4
if $data2_1 != 0 then
return -1
endi
if $data2_2 != 1 then
return -1
endi
if $data2_3 != 1 then
return -1
endi
print ========== step3
sql create dnode $hostname4
system sh/exec.sh -n dnode4 -s start
$x = 0
show3:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode2 openVnodes $data2_2
print dnode3 openVnodes $data2_3
print dnode4 openVnodes $data2_4
if $data2_2 != 1 then
goto show3
endi
if $data2_3 != 1 then
goto show3
endi
if $data2_4 != 0 then
goto show3
endi
sql show d1.vgroups;
print d1.vgroups $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
print ========== step4
sql drop dnode $hostname3
$i = 0
$rowNum = 10000
while $i < $rowNum
$ts = 1500000000000 + $i
sql insert into d1.t1 values( $ts , $i )
$i = $i + 1
endw
print insert $rowNum finished
$x = 0
show4:
$x = $x + 1
sleep 1000
if $x == 40 then
return -1
endi
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode2 openVnodes $data2_2
print dnode3 openVnodes $data2_3
print dnode4 openVnodes $data2_4
print dnode5 openVnodes $data2_5
if $data2_2 != 1 then
goto show4
endi
if $data2_3 != null then
goto show4
endi
if $data2_4 != 1 then
goto show4
endi
system sh/exec.sh -n dnode3 -s stop -x SIGINT
print ========== step5
sql select count(*) from d1.t1
print select count(*) from d1.t1 ==> $data00
if $data00 != $rowNum then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode5 -s stop -x SIGINT

View File

@ -50,6 +50,24 @@ $d1_first = $rows
sql select * from log.dn2
$d2_first = $rows
$x = 0
show4:
$x = $x + 1
sleep 1000
if $x == 20 then
return -1
endi
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
if $data2_1 != master then
goto show4
endi
if $data2_2 != slave then
goto show4
endi
sleep 3000
sql select * from log.dn1
$d1_second = $rows

View File

@ -0,0 +1,68 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode1 -c balanceInterval -v 3000
system sh/cfg.sh -n dnode2 -c balanceInterval -v 3000
system sh/cfg.sh -n dnode3 -c balanceInterval -v 3000
print ============== step1
system sh/exec.sh -n dnode1 -s start
sql connect
sql show mnodes
print dnode1 ==> $data2_1
print dnode2 ==> $data2_2
print dnode3 ==> $data3_3
if $data2_1 != master then
return -1
endi
if $data3_2 != null then
return -1
endi
if $data3_3 != null then
return -1
endi
print ============== step2
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
sleep 5000
sql create dnode $hostname2
sql create dnode $hostname3
$x = 0
step2:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
sql show mnodes
$dnode1Role = $data2_1
$dnode2Role = $data2_2
$dnode3Role = $data2_3
print dnode1 ==> $dnode1Role
print dnode2 ==> $dnode2Role
print dnode3 ==> $dnode3Role
if $dnode1Role != master then
goto step2
endi
if $dnode2Role != slave then
goto step2
endi
if $dnode3Role != slave then
goto step2
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT

View File

@ -6,7 +6,16 @@ GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
function git_branch {
branch="`git branch 2>/dev/null | grep "^\*" | sed -e "s/^\*\ //"`"
if [ "${branch}" != "" ];then
if [ "${branch}" = "(no branch)" ];then
branch="(`git rev-parse --short HEAD`...)"
fi
branch=(${branch////_})
echo "$branch"
fi
}
function runSimCaseOneByOne {
while read -r line; do
if [[ $line =~ ^./test.sh* ]] || [[ $line =~ ^run* ]]; then
@ -44,6 +53,11 @@ function runSimCaseOneByOnefq {
out_log=`tail -1 out.log `
if [[ $out_log =~ 'failed' ]];then
if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then
cp -r ../../../sim ~/sim_$(git_branch)_`date "+%Y_%m_%d_%H:%M:%S"`
else
cp -r ../../sim ~/sim_$(git_branch)_`date "+%Y_%m_%d_%H:%M:%S" `
fi
exit 8
fi
end_time=`date +%s`
@ -95,6 +109,7 @@ function runPyCaseOneByOnefq {
end_time=`date +%s`
out_log=`tail -1 pytest-out.log `
if [[ $out_log =~ 'failed' ]];then
cp -r ../../sim ~/sim_$(git_branch)_`date "+%Y_%m_%d_%H:%M:%S" `
exit 8
fi
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log