Merge pull request #28419 from taosdata/feat/3.0/TD-32231

[TD-32231] Merge the 3.0 branches of taoskeeper community edition and enterprise edition
This commit is contained in:
Shengliang Guan 2024-10-30 14:00:07 +08:00 committed by GitHub
commit 88aced10ec
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
71 changed files with 13817 additions and 4 deletions

58
.github/workflows/taoskeeper-ci.yml vendored Normal file
View File

@ -0,0 +1,58 @@
name: TaosKeeper CI
on:
push:
paths:
- tools/keeper/**
jobs:
build:
runs-on: ubuntu-latest
name: Run unit tests
steps:
- name: Checkout the repository
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.18
- name: Install system dependencies
run: |
sudo apt update -y
sudo apt install -y build-essential cmake libgeos-dev
- name: Install TDengine
run: |
mkdir debug
cd debug
cmake .. -DBUILD_HTTP=false -DBUILD_JDBC=false -DBUILD_TOOLS=false -DBUILD_TEST=off -DBUILD_KEEPER=true
make -j 4
sudo make install
which taosd
which taosadapter
which taoskeeper
- name: Start taosd
run: |
cp /etc/taos/taos.cfg ./
sudo echo "supportVnodes 256" >> taos.cfg
nohup sudo taosd -c taos.cfg &
- name: Start taosadapter
run: nohup sudo taosadapter &
- name: Run tests with coverage
working-directory: tools/keeper
run: |
go mod tidy
go test -v -coverpkg=./... -coverprofile=coverage.out ./...
go tool cover -func=coverage.out
- name: Clean up
if: always()
run: |
if pgrep taosd; then sudo pkill taosd; fi
if pgrep taosadapter; then sudo pkill taosadapter; fi

View File

@ -1,6 +1,7 @@
cmake_minimum_required(VERSION 3.0)
set(CMAKE_VERBOSE_MAKEFILE FALSE)
set(TD_BUILD_TAOSA_INTERNAL FALSE)
set(TD_BUILD_KEEPER_INTERNAL FALSE)
# set output directory
SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib)
@ -57,6 +58,19 @@ IF(TD_BUILD_HTTP)
ADD_DEFINITIONS(-DHTTP_EMBEDDED)
ENDIF()
IF("${BUILD_KEEPER}" STREQUAL "")
SET(TD_BUILD_KEEPER FALSE)
ELSEIF(${BUILD_KEEPER} MATCHES "false")
SET(TD_BUILD_KEEPER FALSE)
ELSEIF(${BUILD_KEEPER} MATCHES "true")
SET(TD_BUILD_KEEPER TRUE)
ELSEIF(${BUILD_KEEPER} MATCHES "internal")
SET(TD_BUILD_KEEPER FALSE)
SET(TD_BUILD_KEEPER_INTERNAL TRUE)
ELSE()
SET(TD_BUILD_KEEPER FALSE)
ENDIF()
IF("${BUILD_TOOLS}" STREQUAL "")
IF(TD_LINUX)
IF(TD_ARM_32)

View File

@ -12,9 +12,18 @@ if exist C:\\TDengine\\data\\dnode\\dnodeCfg.json (
rem // stop and delete service
mshta vbscript:createobject("shell.application").shellexecute("%~s0",":stop_delete","","runas",1)(window.close)
echo This might take a few moment to accomplish deleting service taosd/taosadapter ...
if exist %binary_dir%\\build\\bin\\taosadapter.exe (
echo This might take a few moment to accomplish deleting service taosd/taosadapter ...
)
if exist %binary_dir%\\build\\bin\\taoskeeper.exe (
echo This might take a few moment to accomplish deleting service taosd/taoskeeper ...
)
call :check_svc taosd
call :check_svc taosadapter
call :check_svc taoskeeper
set source_dir=%2
set source_dir=%source_dir:/=\\%
@ -46,6 +55,11 @@ if exist %binary_dir%\\test\\cfg\\taosadapter.toml (
copy %binary_dir%\\test\\cfg\\taosadapter.toml %target_dir%\\cfg\\taosadapter.toml > nul
)
)
if exist %binary_dir%\\test\\cfg\\taoskeeper.toml (
if not exist %target_dir%\\cfg\\taoskeeper.toml (
copy %binary_dir%\\test\\cfg\\taoskeeper.toml %target_dir%\\cfg\\taoskeeper.toml > nul
)
)
copy %source_dir%\\include\\client\\taos.h %target_dir%\\include > nul
copy %source_dir%\\include\\util\\taoserror.h %target_dir%\\include > nul
copy %source_dir%\\include\\libs\\function\\taosudf.h %target_dir%\\include > nul
@ -98,12 +112,15 @@ if %Enterprise% == TRUE (
copy %binary_dir%\\build\\bin\\*explorer.exe %target_dir% > nul
)
)
copy %binary_dir%\\build\\bin\\taosd.exe %target_dir% > nul
copy %binary_dir%\\build\\bin\\udfd.exe %target_dir% > nul
if exist %binary_dir%\\build\\bin\\taosadapter.exe (
copy %binary_dir%\\build\\bin\\taosadapter.exe %target_dir% > nul
)
if exist %binary_dir%\\build\\bin\\taoskeeper.exe (
copy %binary_dir%\\build\\bin\\taoskeeper.exe %target_dir% > nul
)
mshta vbscript:createobject("shell.application").shellexecute("%~s0",":hasAdmin","","runas",1)(window.close)
@ -116,6 +133,10 @@ if exist %binary_dir%\\build\\bin\\taosadapter.exe (
echo To start/stop taosAdapter with administrator privileges: %ESC%[92msc start/stop taosadapter %ESC%[0m
)
if exist %binary_dir%\\build\\bin\\taoskeeper.exe (
echo To start/stop taosKeeper with administrator privileges: %ESC%[92msc start/stop taoskeeper %ESC%[0m
)
goto :eof
:hasAdmin
@ -123,6 +144,7 @@ goto :eof
call :stop_delete
call :check_svc taosd
call :check_svc taosadapter
call :check_svc taoskeeper
if exist c:\\windows\\sysnative (
echo x86
@ -141,6 +163,7 @@ if exist c:\\windows\\sysnative (
rem // create services
sc create "taosd" binPath= "C:\\TDengine\\taosd.exe --win_service" start= DEMAND
sc create "taosadapter" binPath= "C:\\TDengine\\taosadapter.exe" start= DEMAND
sc create "taoskeeper" binPath= "C:\\TDengine\\taoskeeper.exe" start= DEMAND
set "env=HKLM\System\CurrentControlSet\Control\Session Manager\Environment"
for /f "tokens=2*" %%I in ('reg query "%env%" /v Path ^| findstr /i "\<Path\>"') do (
@ -181,6 +204,8 @@ sc stop taosd
sc delete taosd
sc stop taosadapter
sc delete taosadapter
sc stop taoskeeper
sc delete taoskeeper
exit /B 0
:check_svc

View File

@ -129,6 +129,13 @@ function kill_taosadapter() {
fi
}
function kill_taoskeeper() {
pid=$(ps -ef | grep "taoskeeper" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo}kill -9 $pid || :
fi
}
function kill_taosd() {
pid=$(ps -ef | grep -w ${serverName} | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
@ -155,6 +162,7 @@ function install_bin() {
${csudo}rm -f ${bin_link_dir}/${clientName} || :
${csudo}rm -f ${bin_link_dir}/${serverName} || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taoskeeper || :
${csudo}rm -f ${bin_link_dir}/udfd || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
${csudo}rm -f ${bin_link_dir}/taosdump || :
@ -169,6 +177,7 @@ function install_bin() {
[ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || :
[ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taoskeeper ] && ${csudo}cp -r ${binary_dir}/build/bin/taoskeeper ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || :
${csudo}cp -r ${binary_dir}/build/bin/${serverName} ${install_main_dir}/bin || :
@ -183,6 +192,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taoskeeper ] && ${csudo}ln -s ${install_main_dir}/bin/taoskeeper ${bin_link_dir}/taoskeeper > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo > /dev/null 2>&1 || :
@ -197,6 +207,7 @@ function install_bin() {
[ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || :
[ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taoskeeper ] && ${csudo}cp -r ${binary_dir}/build/bin/taoskeeper ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/*explorer ] && ${csudo}cp -r ${binary_dir}/build/bin/*explorer ${install_main_dir}/bin || :
@ -208,6 +219,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taoskeeper ] && ${csudo}ln -s ${install_main_dir}/bin/taoskeeper ${bin_link_dir}/taoskeeper > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd > /dev/null 2>&1 || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump > /dev/null 2>&1 || :
[ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || :
@ -407,6 +419,29 @@ function install_taosadapter_config() {
fi
}
function install_taoskeeper_config() {
if [ ! -f "${cfg_install_dir}/taoskeeper.toml" ]; then
${csudo}mkdir -p ${cfg_install_dir} || :
[ -f ${binary_dir}/test/cfg/taoskeeper.toml ] &&
${csudo}cp ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_install_dir} &&
${csudo}cp ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_dir} || :
[ -f ${cfg_install_dir}/taoskeeper.toml ] &&
${csudo}chmod 644 ${cfg_install_dir}/taoskeeper.toml || :
[ -f ${binary_dir}/test/cfg/taoskeeper.toml ] &&
${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml \
${cfg_install_dir}/taoskeeper.toml.${verNumber} || :
[ -f ${cfg_install_dir}/taoskeeper.toml ] &&
${csudo}ln -s ${cfg_install_dir}/taoskeeper.toml \
${install_main_dir}/cfg/taoskeeper.toml > /dev/null 2>&1 || :
else
if [ -f "${binary_dir}/test/cfg/taoskeeper.toml" ]; then
${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml \
${cfg_install_dir}/taoskeeper.toml.${verNumber} || :
${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_dir} || :
fi
fi
}
function install_log() {
${csudo}rm -rf ${log_dir} || :
${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
@ -526,6 +561,15 @@ function install_taosadapter_service() {
fi
}
function install_taoskeeper_service() {
if ((${service_mod} == 0)); then
[ -f ${binary_dir}/test/cfg/taoskeeper.service ] &&
${csudo}cp ${binary_dir}/test/cfg/taoskeeper.service \
${service_config_dir}/ || :
${csudo}systemctl daemon-reload
fi
}
function install_service_on_launchctl() {
${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || :
${csudo}cp ${script_dir}/com.taosdata.taosd.plist /Library/LaunchDaemons/com.taosdata.taosd.plist
@ -534,6 +578,10 @@ function install_service_on_launchctl() {
${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || :
${csudo}cp ${script_dir}/com.taosdata.taosadapter.plist /Library/LaunchDaemons/com.taosdata.taosadapter.plist
${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || :
${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taoskeeper.plist > /dev/null 2>&1 || :
${csudo}cp ${script_dir}/com.taosdata.taoskeeper.plist /Library/LaunchDaemons/com.taosdata.taoskeeper.plist
${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taoskeeper.plist > /dev/null 2>&1 || :
}
function install_service() {
@ -549,6 +597,7 @@ function install_service() {
install_service_on_launchctl
fi
}
function install_app() {
if [ "$osType" = "Darwin" ]; then
${csudo}rm -rf /Applications/TDengine.app &&
@ -573,6 +622,7 @@ function update_TDengine() {
elif ((${service_mod} == 1)); then
${csudo}service ${serverName} stop || :
else
kill_taoskeeper
kill_taosadapter
kill_taosd
fi
@ -591,9 +641,11 @@ function update_TDengine() {
install_service
install_taosadapter_service
install_taoskeeper_service
install_config
install_taosadapter_config
install_taoskeeper_config
echo
echo -e "\033[44;32;1m${productName} is updated successfully!${NC}"
@ -602,22 +654,33 @@ function update_TDengine() {
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}"
[ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To configure Adapter ${NC}: edit ${configDir}/taosadapter.toml"
[ -f ${configDir}/taoskeeper.toml ] && [ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To configure Keeper ${NC}: edit ${configDir}/taoskeeper.toml"
if ((${service_mod} == 0)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}"
[ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}systemctl start taoskeeper ${NC}"
elif ((${service_mod} == 1)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}service taosadapter start${NC}"
[ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}service taoskeeper start${NC}"
else
if [ "$osType" != "Darwin" ]; then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}"
[ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: taosadapter &${NC}"
[ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: taoskeeper &${NC}"
else
echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}"
echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}"
[ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}"
[ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: sudo launchctl start com.tdengine.taoskeeper${NC}"
fi
fi
@ -643,9 +706,11 @@ function install_TDengine() {
install_service
install_taosadapter_service
install_taoskeeper_service
install_config
install_taosadapter_config
install_taoskeeper_config
# Ask if to start the service
echo
@ -654,22 +719,33 @@ function install_TDengine() {
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}"
[ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To configure Adapter ${NC}: edit ${configDir}/taosadapter.toml"
[ -f ${configDir}/taoskeeper.toml ] && [ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To configure Keeper ${NC}: edit ${configDir}/taoskeeper.toml"
if ((${service_mod} == 0)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}"
[ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}systemctl start taoskeeper ${NC}"
elif ((${service_mod} == 1)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}service taosadapter start${NC}"
[ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}service taoskeeper start${NC}"
else
if [ "$osType" != "Darwin" ]; then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}"
[ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: taosadapter &${NC}"
[ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: taoskeeper &${NC}"
else
echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}"
echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}"
[ -f ${installDir}/bin/taosadapter ] && \
echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}"
[ -f ${installDir}/bin/taoskeeper ] && \
echo -e "${GREEN_DARK}To start Keeper ${NC}: sudo launchctl start com.tdengine.taoskeeper${NC}"
fi
fi

View File

@ -218,3 +218,75 @@ ELSE()
)
ENDIF()
ENDIF()
IF(TD_BUILD_KEEPER)
MESSAGE("")
MESSAGE("${Green} build taoskeeper, current platform is ${PLATFORM_ARCH_STR} ${ColourReset}")
EXECUTE_PROCESS(
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/keeper
COMMAND git rev-parse HEAD
OUTPUT_VARIABLE taoskeeper_commit_sha1
)
IF("${taoskeeper_commit_sha1}" STREQUAL "")
SET(taoskeeper_commit_sha1 "unknown")
ELSE()
STRING(STRIP "${taoskeeper_commit_sha1}" taoskeeper_commit_sha1)
ENDIF()
SET(taos_version ${TD_VER_NUMBER})
MESSAGE("${Green} taoskeeper will use ${taos_version} and commit ${taoskeeper_commit_sha1} as version ${ColourReset}")
MESSAGE(" current source dir is ${CMAKE_CURRENT_SOURCE_DIR}")
IF(TD_WINDOWS)
MESSAGE("Building taoskeeper on Windows")
INCLUDE(ExternalProject)
ExternalProject_Add(taoskeeper
PREFIX "taoskeeper"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/keeper
BUILD_ALWAYS off
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND cmake -E echo "taoskeeper no need cmake to config"
PATCH_COMMAND
COMMAND git clean -f -d
BUILD_COMMAND
COMMAND go build -a -ldflags "-X 'github.com/taosdata/taoskeeper/version.Version=${taos_version}' -X 'github.com/taosdata/taoskeeper/version.CommitID=${taoskeeper_commit_sha1}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'"
INSTALL_COMMAND
COMMAND cmake -E echo "Comparessing taoskeeper.exe"
COMMAND cmake -E time upx taoskeeper.exe
COMMAND cmake -E echo "Copy taoskeeper.exe"
COMMAND cmake -E copy taoskeeper.exe ${CMAKE_BINARY_DIR}/build/bin/taoskeeper.exe
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E echo "Copy taoskeeper.toml"
COMMAND cmake -E copy ./config/taoskeeper.toml ${CMAKE_BINARY_DIR}/test/cfg/
)
ELSE()
IF(TD_DARWIN)
MESSAGE("Building taoskeeper on macOS")
ELSE()
MESSAGE("Building taoskeeper on Linux")
ENDIF()
INCLUDE(ExternalProject)
ExternalProject_Add(taoskeeper
PREFIX "taoskeeper"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/keeper
BUILD_ALWAYS off
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND cmake -E echo "taoskeeper no need cmake to config"
PATCH_COMMAND
COMMAND git clean -f -d
BUILD_COMMAND
COMMAND go build -a -ldflags "-X 'github.com/taosdata/taoskeeper/version.Version=${taos_version}' -X 'github.com/taosdata/taoskeeper/version.CommitID=${taoskeeper_commit_sha1}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'"
INSTALL_COMMAND
COMMAND cmake -E echo "Copy taoskeeper"
COMMAND cmake -E copy taoskeeper ${CMAKE_BINARY_DIR}/build/bin
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E echo "Copy taoskeeper.toml"
COMMAND cmake -E copy ./config/taoskeeper.toml ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E echo "Copy taoskeeper.service"
COMMAND cmake -E copy ./taoskeeper.service ${CMAKE_BINARY_DIR}/test/cfg/
)
ENDIF()
ENDIF()

View File

@ -0,0 +1 @@
!taoskeeper

22
tools/keeper/.gitignore vendored Normal file
View File

@ -0,0 +1,22 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
*.html
*.data
# Dependency directories (remove the comment below to include it)
vendor
/debug/
/.idea/
/taoskeeper
/test_data
/.vscode

10
tools/keeper/CHANGELOG.md Normal file
View File

@ -0,0 +1,10 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Conventional Changelog](https://www.conventionalcommits.org/en/v1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## Footnote
This changelog is automatically generated.

16
tools/keeper/Dockerfile Normal file
View File

@ -0,0 +1,16 @@
FROM golang:1.18.6-alpine as builder
LABEL maintainer = "Linhe Huo <linhe.huo@gmail.com>"
WORKDIR /usr/src/taoskeeper
COPY ./ /usr/src/taoskeeper/
ENV GO111MODULE=on \
GOPROXY=https://goproxy.cn,direct
RUN go mod tidy && go build
FROM alpine:3
RUN mkdir -p /etc/taos
COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/
COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml
RUN chmod u+rw /etc/taos/taoskeeper.toml
EXPOSE 6043
CMD ["taoskeeper"]

View File

@ -0,0 +1,24 @@
FROM golang:1.18.6-alpine as builder
LABEL maintainer = "TDengine"
ARG latestv
ARG gitinfo
ARG buildinfo
RUN apk --no-cache add upx && \
rm -rf /var/lib/apt/lists/*
WORKDIR /usr/src/taoskeeper
COPY ./ /usr/src/taoskeeper/
ENV GO111MODULE=on \
GOPROXY=https://goproxy.cn,direct
RUN echo "$latestv $gitinfo $buildinfo"
RUN go mod tidy && go build -ldflags="-s -w -X 'github.com/taosdata/taoskeeper/version.Version=${latestv}' -X 'github.com/taosdata/taoskeeper/version.Gitinfo=${gitinfo}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${buildinfo}'" -o taoskeeper . && upx -9 taoskeeper
FROM alpine:3
RUN mkdir -p /etc/taos
COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/
COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml
RUN chmod u+rw /etc/taos/taoskeeper.toml
EXPOSE 6043
CMD ["taoskeeper"]

267
tools/keeper/README-CN.md Normal file
View File

@ -0,0 +1,267 @@
# TaosKeeper
taosKeeper 是 TDengine 各项监控指标的导出工具,通过简单的几项配置即可获取 TDengine 的运行状态。并且 taosKeeper 企业版支持多种收集器,可以方便进行监控数据的展示。
taosKeeper 使用 TDengine RESTful 接口,所以不需要安装 TDengine 客户端即可使用。
## 构建
### 获取源码
从 GitHub 克隆源码:
```sh
git clone https://github.com/taosdata/TDengine
cd TDengine/tools/keeper
```
### 编译
taosKeeper 使用 `GO` 语言编写,在构建前需要配置好 `GO` 语言开发环境。
```sh
go mod tidy
go build
```
## 安装
如果是自行构建的项目,仅需要拷贝 `taoskeeper` 文件到你的 `PATH` 中。
```sh
sudo install taoskeeper /usr/bin/
```
## 启动
在启动前,应该做好如下配置:
`/etc/taos/taoskeeper.toml` 配置 TDengine 连接参数以及监控指标前缀等其他信息。
```toml
# gin 框架是否启用 debug
debug = false
# 服务监听端口, 默认为 6043
port = 6043
# 日志级别,包含 panic、error、info、debug、trace等
loglevel = "info"
# 程序中使用协程池的大小
gopoolsize = 50000
# 查询 TDengine 监控数据轮询间隔
RotationInterval = "15s"
[tdengine]
host = "127.0.0.1"
port = 6041
username = "root"
password = "taosdata"
# 需要被监控的 taosAdapter
[taosAdapter]
address = ["127.0.0.1:6041"]
[metrics]
# 监控指标前缀
prefix = "taos"
# 存放监控数据的数据库
database = "log"
# 指定需要监控的普通表
tables = []
[environment]
# 是否在容器中运行,影响 taosKeeper 自身的监控数据
incgroup = false
```
现在可以启动服务,输入:
```sh
taoskeeper
```
如果你使用 `systemd`,复制 `taoskeeper.service``/lib/systemd/system/`,并启动服务。
```sh
sudo cp taoskeeper.service /lib/systemd/system/
sudo systemctl daemon-reload
sudo systemctl start taoskeeper
```
让 taosKeeper 随系统开机自启动。
```sh
sudo systemctl enable taoskeeper
```
如果使用 `systemd`,你可以使用如下命令完成安装。
```sh
go mod tidy
go build
sudo install taoskeeper /usr/bin/
sudo cp taoskeeper.service /lib/systemd/system/
sudo systemctl daemon-reload
sudo systemctl start taoskeeper
sudo systemctl enable taoskeeper
```
## Docker
如下介绍了如何在 docker 中构建 taosKeeper
在构建前请配置好 `./config/taoskeeper.toml` 中合适的参数,并编辑 Dockerfile ,示例如下。
```dockerfile
FROM golang:1.18.6-alpine as builder
WORKDIR /usr/src/taoskeeper
COPY ./ /usr/src/taoskeeper/
ENV GO111MODULE=on \
GOPROXY=https://goproxy.cn,direct
RUN go mod tidy && go build
FROM alpine:3
RUN mkdir -p /etc/taos
COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/
COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml
EXPOSE 6043
CMD ["taoskeeper"]
```
如果已经有 taosKeeper 可执行文件,在配置好 `taoskeeper.toml` 后你可以使用如下方式构建:
```dockerfile
FROM ubuntu:18.04
RUN mkdir -p /etc/taos
COPY ./taoskeeper /usr/bin/
COPY ./taoskeeper.toml /etc/taos/taoskeeper.toml
EXPOSE 6043
CMD ["taoskeeper"]
```
## 使用(**企业版**
### Prometheus (by scrape)
taosKeeper 可以像 `node-exporter` 一样向 Prometheus 提供监控指标。\
`/etc/prometheus/prometheus.yml` 添加配置:
```yml
global:
scrape_interval: 5s
scrape_configs:
- job_name: "taoskeeper"
static_configs:
- targets: ["taoskeeper:6043"]
```
现在使用 PromQL 查询即可以显示结果,比如要查看指定主机(通过 FQDN 正则匹配表达式筛选)硬盘使用百分比:
```promql
taos_dn_disk_used / taos_dn_disk_total {fqdn=~ "tdengine.*"}
```
你可以使用 `docker-compose` 测试完整的链路。
`docker-compose.yml`示例:
```yml
version: "3.7"
services:
tdengine:
image: tdengine/tdengine
environment:
TAOS_FQDN: tdengine
volumes:
- taosdata:/var/lib/taos
taoskeeper:
build: ./
depends_on:
- tdengine
environment:
TDENGINE_HOST: tdengine
TDENGINE_PORT: 6041
volumes:
- ./config/taoskeeper.toml:/etc/taos/taoskeeper.toml
ports:
- 6043:6043
prometheus:
image: prom/prometheus
volumes:
- ./prometheus/:/etc/prometheus/
ports:
- 9090:9090
volumes:
taosdata:
```
启动:
```sh
docker-compose up -d
```
现在通过访问 <http://localhost:9090> 来查询结果。访问[simple dashboard](https://grafana.com/grafana/dashboards/15164) 来查看TaosKeeper + Prometheus + Grafana 监控 TDengine 的快速启动实例。
### Telegraf
如果使用 telegraf 来收集各个指标,仅需要在配置中增加:
```toml
[[inputs.prometheus]]
## An array of urls to scrape metrics from.
urls = ["http://taoskeeper:6043/metrics"]
```
可以通过 `docker-compose` 来测试
```sh
docker-compose -f docker-compose.yml -f telegraf.yml up -d telegraf taoskeeper
```
由于可以在 `telegraf.conf` 设置日志为标准输出:
```toml
[[outputs.file]]
files = ["stdout"]
```
所以你可以通过 `docker-compose logs` 在标准输出中追踪 TDengine 各项指标。
```sh
docker-compose -f docker-compose.yml -f telegraf.yml logs -f telegraf
```
### Zabbix
1. 导入 zabbix 临时文件 `zbx_taos_keeper_templates.xml`
2. 使用 `TDengine` 模板来创建主机,修改宏 `{$TAOSKEEPER_HOST}``{$COLLECTION_INTERVAL}`
3. 等待并查看到自动创建的条目。
### 常见问题
* 启动报错显示connection refused
**解析**taosKeeper 依赖 restful 接口查询数据,请检查 taosAdapter 是否正常运行或 taoskeeper.toml 中 taosAdapter 地址是否正确。
* taosKeeper 监控不同 TDengine 显示的检测指标数目不一致?
**解析**:如果 TDengine 中未创建某项指标taoskeeper 不能获取对应的检测结果。
* 不能接收到 TDengine 的监控日志。
**解析**: 修改 `/etc/taos/taos.cfg` 文件并增加如下参数:
```cfg
monitor 1 // 启用monitor
monitorInterval 30 // 发送间隔 (s)
monitorFqdn localhost // 接收消息的FQDN默认为空
monitorPort 6043 // 接收消息的端口号
monitorMaxLogs 100 // 每个监控间隔缓存的最大日志数量
```

273
tools/keeper/README.md Normal file
View File

@ -0,0 +1,273 @@
# TaosKeeper
TDengine Metrics Exporter for Kinds of Collectors, you can obtain the running status of TDengine by performing several simple configurations.
This tool uses TDengine RESTful API, so you could just build it without TDengine client.
## Build
### Get the source codes
```sh
git clone https://github.com/taosdata/TDengine
cd TDengine/tools/keeper
```
### compile
```sh
go mod tidy
go build
```
## Install
If you build the tool by your self, just copy the `taoskeeper` binary to your `PATH`.
```sh
sudo install taoskeeper /usr/bin/
```
## Start
Before start, you should configure some options like database ip, port or the prefix and others for exported metrics.
in `/etc/taos/taoskeeper.toml`.
```toml
# Start with debug middleware for gin
debug = false
# Listen port, default is 6043
port = 6043
# log level
loglevel = "info"
# go pool size
gopoolsize = 50000
# interval for TDengine metrics
RotationInterval = "15s"
[tdengine]
host = "127.0.0.1"
port = 6041
username = "root"
password = "taosdata"
# list of taosAdapter that need to be monitored
[taosAdapter]
address = ["127.0.0.1:6041"]
[metrics]
# metrics prefix in metrics names.
prefix = "taos"
# database for storing metrics data
database = "log"
# export some tables that are not super table
tables = []
[environment]
# Whether running in cgroup.
incgroup = false
```
Now you could run the tool:
```sh
taoskeeper
```
If you use `systemd`, copy the `taoskeeper.service` to `/lib/systemd/system/` and start the service.
```sh
sudo cp taoskeeper.service /lib/systemd/system/
sudo systemctl daemon-reload
sudo systemctl start taoskeeper
```
To start taoskeeper whenever os rebooted, you should enable the systemd service:
```sh
sudo systemctl enable taoskeeper
```
So if use `systemd`, you'd better install it with these lines all-in-one:
```sh
go mod tidy
go build
sudo install taoskeeper /usr/bin/
sudo cp taoskeeper.service /lib/systemd/system/
sudo systemctl daemon-reload
sudo systemctl start taoskeeper
sudo systemctl enable taoskeeper
```
## Docker
Here is an example to show how to build this tool in docker:
Before building, you should configure `./config/taoskeeper.toml` with proper parameters and edit Dockerfile. Take following as example.
```dockerfile
FROM golang:1.18.2 as builder
WORKDIR /usr/src/taoskeeper
COPY ./ /usr/src/taoskeeper/
ENV GO111MODULE=on \
GOPROXY=https://goproxy.cn,direct
RUN go mod tidy && go build
FROM alpine:3
RUN mkdir -p /etc/taos
COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/
COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml
EXPOSE 6043
CMD ["taoskeeper"]
```
If you already have taosKeeper binary file, you can build this tool like:
```dockerfile
FROM ubuntu:18.04
RUN mkdir -p /etc/taos
COPY ./taoskeeper /usr/bin/
COPY ./taoskeeper.toml /etc/taos/taoskeeper.toml
EXPOSE 6043
CMD ["taoskeeper"]
```
## Usage (**Enterprise Edition**)
### Prometheus (by scrape)
It's now act as a prometheus exporter like `node-exporter`.
Here's how to add this in scrape configs of `/etc/prometheus/prometheus.yml`:
```yml
global:
scrape_interval: 5s
scrape_configs:
- job_name: "taoskeeper"
static_configs:
- targets: [ "taoskeeper:6043" ]
```
Now PromQL query will show the right result, for example, to show disk used percent in an specific host with FQDN regex
match expression:
```promql
taos_dn_disk_used / taos_dn_disk_total {fqdn=~ "tdengine.*"}
```
You can use `docker-compose` with the current `docker-compose.yml` to test the whole stack.
Here is the `docker-compose.yml`:
```yml
version: "3.7"
services:
tdengine:
image: tdengine/tdengine
environment:
TAOS_FQDN: tdengine
volumes:
- taosdata:/var/lib/taos
taoskeeper:
build: ./
depends_on:
- tdengine
environment:
TDENGINE_HOST: tdengine
TDENGINE_PORT: 6041
volumes:
- ./config/taoskeeper.toml:/etc/taos/taoskeeper.toml
ports:
- 6043:6043
prometheus:
image: prom/prometheus
volumes:
- ./prometheus/:/etc/prometheus/
ports:
- 9090:9090
volumes:
taosdata:
```
Start the stack:
```sh
docker-compose up -d
```
Now you point to <http://localhost:9090> (if you have not started a prometheus server by yourself) and query.
For a quick demo with TaosKeeper + Prometheus + Grafana, we provide
a [simple dashboard](https://grafana.com/grafana/dashboards/15164) to monitor TDengine.
### Telegraf
If you are using telegraf to collect metrics, just add inputs like this:
```toml
[[inputs.prometheus]]
## An array of urls to scrape metrics from.
urls = ["http://taoskeeper:6043/metrics"]
```
You can test it with `docker-compose`:
```sh
docker-compose -f docker-compose.yml -f telegraf.yml up -d telegraf taoskeeper
```
Since we have set an stdout file output in `telegraf.conf`:
```toml
[[outputs.file]]
files = ["stdout"]
```
So you can track with TDengine metrics in standard output with `docker-compose logs`:
```sh
docker-compose -f docker-compose.yml -f telegraf.yml logs -f telegraf
```
### Zabbix
1. Import the zabbix template file `zbx_taos_keeper_templates.xml`.
2. Use the template `TDengine` to create the host and modify the macros `{$TAOSKEEPER_HOST}`
and `{$COLLECTION_INTERVAL}`.
3. Waiting for monitoring items to be created automatically.
### FAQ
* Error occurred: Connection refused, while taosKeeper was starting
**Answer**: taoskeeper relies on restful interfaces to query data. Check whether the taosAdapter is running or whether
the taosAdapter address in taoskeeper.toml is correct.
* Why detection metrics displayed by different TDengine's inconsistent with taoskeeper monitoring?
**Answer**: If a metric is not created in TDengine, taoskeeper cannot get the corresponding test results.
* Cannot receive log from TDengine server.
**Answer**: Modify `/etc/taos/taos.cfg` file and add parameters like:
```cfg
monitor 1 // start monitor
monitorInterval 30 // send log interval (s)
monitorFqdn localhost
monitorPort 6043 // taosKeeper port
monitorMaxLogs 100
```

View File

@ -0,0 +1,260 @@
package api
import (
"bytes"
"context"
"crypto/md5"
"encoding/hex"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
"github.com/taosdata/taoskeeper/db"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/infrastructure/log"
"github.com/taosdata/taoskeeper/util"
)
var adapterLog = log.GetLogger("ADP")
type adapterReqType int
const (
rest adapterReqType = iota // 0 - rest
ws // 1 - ws
)
type Adapter struct {
username string
password string
host string
port int
usessl bool
conn *db.Connector
db string
dbOptions map[string]interface{}
}
func NewAdapter(c *config.Config) *Adapter {
return &Adapter{
username: c.TDengine.Username,
password: c.TDengine.Password,
host: c.TDengine.Host,
port: c.TDengine.Port,
usessl: c.TDengine.Usessl,
db: c.Metrics.Database.Name,
dbOptions: c.Metrics.Database.Options,
}
}
func (a *Adapter) Init(c gin.IRouter) error {
if err := a.createDatabase(); err != nil {
return fmt.Errorf("create database error:%s", err)
}
if err := a.initConnect(); err != nil {
return fmt.Errorf("init db connect error:%s", err)
}
if err := a.createTable(); err != nil {
return fmt.Errorf("create table error:%s", err)
}
c.POST("/adapter_report", a.handleFunc())
return nil
}
func (a *Adapter) handleFunc() gin.HandlerFunc {
return func(c *gin.Context) {
qid := util.GetQid(c.GetHeader("X-QID"))
adapterLog := adapterLog.WithFields(
logrus.Fields{config.ReqIDKey: qid},
)
if a.conn == nil {
adapterLog.Error("no connection")
c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"})
return
}
data, err := c.GetRawData()
if err != nil {
adapterLog.Errorf("get adapter report data error, msg:%s", err)
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get adapter report data error. %s", err)})
return
}
if adapterLog.Logger.IsLevelEnabled(logrus.TraceLevel) {
adapterLog.Tracef("received adapter report data:%s", string(data))
}
var report AdapterReport
if err = json.Unmarshal(data, &report); err != nil {
adapterLog.Errorf("parse adapter report data error, data:%s, error:%s", string(data), err)
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse adapter report data error: %s", err)})
return
}
sql := a.parseSql(report)
adapterLog.Debugf("adapter report sql:%s", sql)
if _, err = a.conn.Exec(context.Background(), sql, qid); err != nil {
adapterLog.Errorf("adapter report error, msg:%s", err)
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, gin.H{})
}
}
func (a *Adapter) initConnect() error {
conn, err := db.NewConnectorWithDb(a.username, a.password, a.host, a.port, a.db, a.usessl)
if err != nil {
adapterLog.Dup().Errorf("init db connect error, msg:%s", err)
return err
}
a.conn = conn
return nil
}
func (a *Adapter) parseSql(report AdapterReport) string {
// reqType: 0: rest, 1: websocket
restTbName := a.tableName(report.Endpoint, rest)
wsTbName := a.tableName(report.Endpoint, ws)
ts := time.Unix(report.Timestamp, 0).Format(time.RFC3339)
metric := report.Metric
return fmt.Sprintf("insert into %s using adapter_requests tags ('%s', %d) "+
"values('%s', %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d) "+
"%s using adapter_requests tags ('%s', %d) "+
"values('%s', %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d)",
restTbName, report.Endpoint, rest, ts, metric.RestTotal, metric.RestQuery, metric.RestWrite, metric.RestOther,
metric.RestInProcess, metric.RestSuccess, metric.RestFail, metric.RestQuerySuccess, metric.RestQueryFail,
metric.RestWriteSuccess, metric.RestWriteFail, metric.RestOtherSuccess, metric.RestOtherFail,
metric.RestQueryInProcess, metric.RestWriteInProcess,
wsTbName, report.Endpoint, ws, ts, metric.WSTotal,
metric.WSQuery, metric.WSWrite, metric.WSOther, metric.WSInProcess, metric.WSSuccess, metric.WSFail,
metric.WSQuerySuccess, metric.WSQueryFail, metric.WSWriteSuccess, metric.WSWriteFail, metric.WSOtherSuccess,
metric.WSOtherFail, metric.WSQueryInProcess, metric.WSWriteInProcess)
}
func (a *Adapter) tableName(endpoint string, reqType adapterReqType) string {
var tbname string
if reqType == rest {
tbname = fmt.Sprintf("adapter_req_%s_%s", endpoint, "rest")
} else {
tbname = fmt.Sprintf("adapter_req_%s_%s", endpoint, "ws")
}
if len(tbname) <= util.MAX_TABLE_NAME_LEN {
return util.ToValidTableName(tbname)
} else {
sum := md5.Sum([]byte(fmt.Sprintf("%s%d", endpoint, reqType)))
return fmt.Sprintf("adapter_req_%s", hex.EncodeToString(sum[:]))
}
}
func (a *Adapter) createDatabase() error {
qid := util.GetQidOwn()
adapterLog := adapterLog.WithFields(
logrus.Fields{config.ReqIDKey: qid},
)
conn, err := db.NewConnector(a.username, a.password, a.host, a.port, a.usessl)
if err != nil {
return fmt.Errorf("connect to database error, msg:%s", err)
}
defer func() { _ = conn.Close() }()
sql := a.createDBSql()
adapterLog.Infof("create database, sql:%s", sql)
_, err = conn.Exec(context.Background(), sql, util.GetQidOwn())
if err != nil {
adapterLog.Errorf("create database error, msg:%s", err)
return err
}
return err
}
func (a *Adapter) createDBSql() string {
var buf bytes.Buffer
buf.WriteString(fmt.Sprintf("create database if not exists %s ", a.db))
for k, v := range a.dbOptions {
buf.WriteString(k)
switch v := v.(type) {
case string:
buf.WriteString(fmt.Sprintf(" '%s'", v))
default:
buf.WriteString(fmt.Sprintf(" %v", v))
}
buf.WriteString(" ")
}
return buf.String()
}
var adapterTableSql = "create stable if not exists `adapter_requests` (" +
"`ts` timestamp, " +
"`total` int unsigned, " +
"`query` int unsigned, " +
"`write` int unsigned, " +
"`other` int unsigned, " +
"`in_process` int unsigned, " +
"`success` int unsigned, " +
"`fail` int unsigned, " +
"`query_success` int unsigned, " +
"`query_fail` int unsigned, " +
"`write_success` int unsigned, " +
"`write_fail` int unsigned, " +
"`other_success` int unsigned, " +
"`other_fail` int unsigned, " +
"`query_in_process` int unsigned, " +
"`write_in_process` int unsigned ) " +
"tags (`endpoint` varchar(32), `req_type` tinyint unsigned )"
func (a *Adapter) createTable() error {
if a.conn == nil {
return errNoConnection
}
_, err := a.conn.Exec(context.Background(), adapterTableSql, util.GetQidOwn())
return err
}
type AdapterReport struct {
Timestamp int64 `json:"ts"`
Metric AdapterMetrics `json:"metrics"`
Endpoint string `json:"endpoint"`
}
type AdapterMetrics struct {
RestTotal int `json:"rest_total"`
RestQuery int `json:"rest_query"`
RestWrite int `json:"rest_write"`
RestOther int `json:"rest_other"`
RestInProcess int `json:"rest_in_process"`
RestSuccess int `json:"rest_success"`
RestFail int `json:"rest_fail"`
RestQuerySuccess int `json:"rest_query_success"`
RestQueryFail int `json:"rest_query_fail"`
RestWriteSuccess int `json:"rest_write_success"`
RestWriteFail int `json:"rest_write_fail"`
RestOtherSuccess int `json:"rest_other_success"`
RestOtherFail int `json:"rest_other_fail"`
RestQueryInProcess int `json:"rest_query_in_process"`
RestWriteInProcess int `json:"rest_write_in_process"`
WSTotal int `json:"ws_total"`
WSQuery int `json:"ws_query"`
WSWrite int `json:"ws_write"`
WSOther int `json:"ws_other"`
WSInProcess int `json:"ws_in_process"`
WSSuccess int `json:"ws_success"`
WSFail int `json:"ws_fail"`
WSQuerySuccess int `json:"ws_query_success"`
WSQueryFail int `json:"ws_query_fail"`
WSWriteSuccess int `json:"ws_write_success"`
WSWriteFail int `json:"ws_write_fail"`
WSOtherSuccess int `json:"ws_other_success"`
WSOtherFail int `json:"ws_other_fail"`
WSQueryInProcess int `json:"ws_query_in_process"`
WSWriteInProcess int `json:"ws_write_in_process"`
}

View File

@ -0,0 +1,98 @@
package api
import (
"context"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/taosdata/taoskeeper/db"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/util"
)
func TestAdapter2(t *testing.T) {
c := &config.Config{
InstanceID: 64,
Port: 6043,
TDengine: config.TDengineRestful{
Host: "127.0.0.1",
Port: 6041,
Username: "root",
Password: "taosdata",
Usessl: false,
},
Metrics: config.MetricsConfig{
Database: config.Database{
Name: "adapter_report_test",
Options: map[string]interface{}{},
},
},
}
a := NewAdapter(c)
err := a.Init(router)
assert.NoError(t, err)
w := httptest.NewRecorder()
body := strings.NewReader(" {\"ts\": 1696928323, \"metrics\": {\"rest_total\": 10, \"rest_query\": 2, " +
"\"rest_write\": 5, \"rest_other\": 3, \"rest_in_process\": 1, \"rest_fail\": 5, \"rest_success\": 3, " +
"\"rest_query_success\": 1, \"rest_query_fail\": 2, \"rest_write_success\": 2, \"rest_write_fail\": 3, " +
"\"rest_other_success\": 1, \"rest_other_fail\": 2, \"rest_query_in_process\": 1, \"rest_write_in_process\": 2, " +
"\"ws_total\": 10, \"ws_query\": 2, \"ws_write\": 3, \"ws_other\": 5, \"ws_in_process\": 1, \"ws_success\": 3, " +
"\"ws_fail\": 3, \"ws_query_success\": 1, \"ws_query_fail\": 1, \"ws_write_success\": 2, \"ws_write_fail\": 2, " +
"\"ws_other_success\": 1, \"ws_other_fail\": 2, \"ws_query_in_process\": 1, \"ws_write_in_process\": 2 }, " +
"\"endpoint\": \"adapter-1:6041\"}")
req, _ := http.NewRequest(http.MethodPost, "/adapter_report", body)
req.Header.Set("X-QID", "0x1234567890ABCD00")
router.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
conn, err := db.NewConnectorWithDb(c.TDengine.Username, c.TDengine.Password, c.TDengine.Host, c.TDengine.Port, c.Metrics.Database.Name, c.TDengine.Usessl)
defer func() {
_, _ = conn.Query(context.Background(), "drop database if exists adapter_report_test", util.GetQidOwn())
}()
assert.NoError(t, err)
data, err := conn.Query(context.Background(), "select * from adapter_report_test.adapter_requests where req_type=0", util.GetQidOwn())
assert.NoError(t, err)
assert.Equal(t, 1, len(data.Data))
assert.Equal(t, uint32(10), data.Data[0][1])
assert.Equal(t, uint32(2), data.Data[0][2])
assert.Equal(t, uint32(5), data.Data[0][3])
assert.Equal(t, uint32(3), data.Data[0][4])
assert.Equal(t, uint32(1), data.Data[0][5])
assert.Equal(t, uint32(3), data.Data[0][6])
assert.Equal(t, uint32(5), data.Data[0][7])
assert.Equal(t, uint32(1), data.Data[0][8])
assert.Equal(t, uint32(2), data.Data[0][9])
assert.Equal(t, uint32(2), data.Data[0][10])
assert.Equal(t, uint32(3), data.Data[0][11])
assert.Equal(t, uint32(1), data.Data[0][12])
assert.Equal(t, uint32(2), data.Data[0][13])
assert.Equal(t, uint32(1), data.Data[0][14])
assert.Equal(t, uint32(2), data.Data[0][15])
data, err = conn.Query(context.Background(), "select * from adapter_report_test.adapter_requests where req_type=1", util.GetQidOwn())
assert.NoError(t, err)
assert.Equal(t, 1, len(data.Data))
assert.Equal(t, uint32(10), data.Data[0][1])
assert.Equal(t, uint32(2), data.Data[0][2])
assert.Equal(t, uint32(3), data.Data[0][3])
assert.Equal(t, uint32(5), data.Data[0][4])
assert.Equal(t, uint32(1), data.Data[0][5])
assert.Equal(t, uint32(3), data.Data[0][6])
assert.Equal(t, uint32(3), data.Data[0][7])
assert.Equal(t, uint32(1), data.Data[0][8])
assert.Equal(t, uint32(1), data.Data[0][9])
assert.Equal(t, uint32(2), data.Data[0][10])
assert.Equal(t, uint32(2), data.Data[0][11])
assert.Equal(t, uint32(1), data.Data[0][12])
assert.Equal(t, uint32(2), data.Data[0][13])
assert.Equal(t, uint32(1), data.Data[0][14])
assert.Equal(t, uint32(2), data.Data[0][15])
conn.Exec(context.Background(), "drop database "+c.Metrics.Database.Name, util.GetQidOwn())
}

336
tools/keeper/api/audit.go Normal file
View File

@ -0,0 +1,336 @@
package api
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"regexp"
"strconv"
"strings"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
"github.com/taosdata/taoskeeper/db"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/infrastructure/log"
"github.com/taosdata/taoskeeper/util"
)
var auditLogger = log.GetLogger("AUD")
const MAX_DETAIL_LEN = 50000
type Audit struct {
username string
password string
host string
port int
usessl bool
conn *db.Connector
db string
dbOptions map[string]interface{}
}
type AuditInfo struct {
Timestamp string `json:"timestamp"`
ClusterID string `json:"cluster_id"`
User string `json:"user"`
Operation string `json:"operation"`
Db string `json:"db"`
Resource string `json:"resource"`
ClientAdd string `json:"client_add"` // client address
Details string `json:"details"`
}
type AuditArrayInfo struct {
Records []AuditInfo `json:"records"`
}
type AuditInfoOld struct {
Timestamp int64 `json:"timestamp"`
ClusterID string `json:"cluster_id"`
User string `json:"user"`
Operation string `json:"operation"`
Db string `json:"db"`
Resource string `json:"resource"`
ClientAdd string `json:"client_add"` // client address
Details string `json:"details"`
}
func NewAudit(c *config.Config) (*Audit, error) {
a := Audit{
username: c.TDengine.Username,
password: c.TDengine.Password,
host: c.TDengine.Host,
port: c.TDengine.Port,
usessl: c.TDengine.Usessl,
db: c.Audit.Database.Name,
dbOptions: c.Audit.Database.Options,
}
if a.db == "" {
a.db = "audit"
}
return &a, nil
}
func (a *Audit) Init(c gin.IRouter) error {
if err := a.createDatabase(); err != nil {
return fmt.Errorf("create database error, msg:%s", err)
}
if err := a.initConnect(); err != nil {
return fmt.Errorf("init db connect error, msg:%s", err)
}
if err := a.createSTables(); err != nil {
return fmt.Errorf("create stable error, msg:%s", err)
}
c.POST("/audit", a.handleFunc())
c.POST("/audit_v2", a.handleFunc())
c.POST("/audit-batch", a.handleBatchFunc())
return nil
}
func (a *Audit) handleBatchFunc() gin.HandlerFunc {
return func(c *gin.Context) {
qid := util.GetQid(c.GetHeader("X-QID"))
auditLogger := auditLogger.WithFields(
logrus.Fields{config.ReqIDKey: qid},
)
if a.conn == nil {
auditLogger.Error("no connection")
c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"})
return
}
data, err := c.GetRawData()
if err != nil {
auditLogger.Errorf("get audit data error, msg:%s", err)
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get audit data error. %s", err)})
return
}
if auditLogger.Logger.IsLevelEnabled(logrus.TraceLevel) {
auditLogger.Tracef("receive audit request, data:%s", string(data))
}
var auditArray AuditArrayInfo
if err := json.Unmarshal(data, &auditArray); err != nil {
auditLogger.Errorf("parse audit data error, data:%s, error:%s", string(data), err)
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse audit data error: %s", err)})
return
}
if len(auditArray.Records) == 0 {
if auditLogger.Logger.IsLevelEnabled(logrus.TraceLevel) {
auditLogger.Trace("handle request successfully (no records)")
}
c.JSON(http.StatusOK, gin.H{})
return
}
err = handleBatchRecord(auditArray.Records, a.conn, qid)
if err != nil {
auditLogger.Errorf("process records error, error:%s", err)
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("process records error. %s", err)})
return
}
c.JSON(http.StatusOK, gin.H{})
}
}
func (a *Audit) handleFunc() gin.HandlerFunc {
return func(c *gin.Context) {
qid := util.GetQid(c.GetHeader("X-QID"))
auditLogger := auditLogger.WithFields(
logrus.Fields{config.ReqIDKey: qid},
)
if a.conn == nil {
auditLogger.Error("no connection")
c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"})
return
}
data, err := c.GetRawData()
if err != nil {
auditLogger.Errorf("get audit data error, msg:%s", err)
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get audit data error. %s", err)})
return
}
if auditLogger.Logger.IsLevelEnabled(logrus.TraceLevel) {
auditLogger.Tracef("receive audit request, data:%s", string(data))
}
sql := ""
isStrTime, _ := regexp.MatchString(`"timestamp"\s*:\s*"[^"]*"`, string(data))
if isStrTime {
var audit AuditInfo
if err := json.Unmarshal(data, &audit); err != nil {
auditLogger.Errorf("parse audit data error, data:%s, error:%s", string(data), err)
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse audit data error: %s", err)})
return
}
sql = parseSql(audit)
} else {
var audit AuditInfoOld
if err := json.Unmarshal(data, &audit); err != nil {
auditLogger.Errorf("parse old audit error, data:%s, error:%s", string(data), err)
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse audit data error: %s", err)})
return
}
sql = parseSqlOld(audit)
}
if _, err = a.conn.Exec(context.Background(), sql, qid); err != nil {
auditLogger.Errorf("save audit data error, sql:%s, error:%s", sql, err)
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("save audit data error: %s", err)})
return
}
c.JSON(http.StatusOK, gin.H{})
}
}
func handleDetails(details string) string {
if strings.Contains(details, "'") {
details = strings.ReplaceAll(details, "'", "\\'")
}
if strings.Contains(details, "\"") {
details = strings.ReplaceAll(details, "\"", "\\\"")
}
if len(details) > MAX_DETAIL_LEN {
details = details[:MAX_DETAIL_LEN]
}
return details
}
func parseSql(audit AuditInfo) string {
details := handleDetails(audit.Details)
return fmt.Sprintf(
"insert into %s using operations tags ('%s') values (%s, '%s', '%s', '%s', '%s', '%s', '%s') ",
getTableName(audit), audit.ClusterID, audit.Timestamp, audit.User, audit.Operation, audit.Db, audit.Resource, audit.ClientAdd, details)
}
func parseSqlOld(audit AuditInfoOld) string {
details := handleDetails(audit.Details)
return fmt.Sprintf(
"insert into %s using operations tags ('%s') values (%s, '%s', '%s', '%s', '%s', '%s', '%s') ",
getTableNameOld(audit), audit.ClusterID, strconv.FormatInt(audit.Timestamp, 10)+"000000", audit.User, audit.Operation, audit.Db, audit.Resource, audit.ClientAdd, details)
}
func handleBatchRecord(auditArray []AuditInfo, conn *db.Connector, qid uint64) error {
var builder strings.Builder
var head = fmt.Sprintf(
"insert into %s using operations tags ('%s') values",
getTableName(auditArray[0]), auditArray[0].ClusterID)
builder.WriteString(head)
var qid_counter uint8 = 0
for _, audit := range auditArray {
details := handleDetails(audit.Details)
valuesStr := fmt.Sprintf(
"(%s, '%s', '%s', '%s', '%s', '%s', '%s') ",
audit.Timestamp, audit.User, audit.Operation, audit.Db, audit.Resource, audit.ClientAdd, details)
if (builder.Len() + len(valuesStr)) > MAX_SQL_LEN {
sql := builder.String()
if _, err := conn.Exec(context.Background(), sql, qid|uint64((qid_counter%255))); err != nil {
return err
}
builder.Reset()
builder.WriteString(head)
}
builder.WriteString(valuesStr)
qid_counter++
}
if builder.Len() > len(head) {
sql := builder.String()
if _, err := conn.Exec(context.Background(), sql, qid|uint64((qid_counter%255))); err != nil {
return err
}
}
return nil
}
func getTableName(audit AuditInfo) string {
return fmt.Sprintf("t_operations_%s", audit.ClusterID)
}
func getTableNameOld(audit AuditInfoOld) string {
return fmt.Sprintf("t_operations_%s", audit.ClusterID)
}
func (a *Audit) initConnect() error {
conn, err := db.NewConnectorWithDb(a.username, a.password, a.host, a.port, a.db, a.usessl)
if err != nil {
auditLogger.Errorf("init db connect error, msg:%s", err)
return err
}
a.conn = conn
return nil
}
func (a *Audit) createDatabase() error {
conn, err := db.NewConnector(a.username, a.password, a.host, a.port, a.usessl)
if err != nil {
return fmt.Errorf("connect to database error, msg:%s", err)
}
defer func() { _ = conn.Close() }()
sql := a.createDBSql()
auditLogger.Infof("create database, sql:%s", sql)
_, err = conn.Exec(context.Background(), sql, util.GetQidOwn())
if err != nil {
auditLogger.Errorf("create database error, msg:%s", err)
return err
}
return err
}
var errNoConnection = errors.New("no connection")
func (a *Audit) createDBSql() string {
var buf bytes.Buffer
buf.WriteString(fmt.Sprintf("create database if not exists %s precision 'ns' ", a.db))
for k, v := range a.dbOptions {
buf.WriteString(k)
switch v := v.(type) {
case string:
buf.WriteString(fmt.Sprintf(" '%s'", v))
default:
buf.WriteString(fmt.Sprintf(" %v", v))
}
buf.WriteString(" ")
}
return buf.String()
}
func (a *Audit) createSTables() error {
var createTableSql = "create stable if not exists operations " +
"(ts timestamp, user_name varchar(25), operation varchar(20), db varchar(65), resource varchar(193), client_address varchar(25), details varchar(50000)) " +
"tags (cluster_id varchar(64))"
if a.conn == nil {
return errNoConnection
}
_, err := a.conn.Exec(context.Background(), createTableSql, util.GetQidOwn())
if err != nil {
auditLogger.Errorf("## create stable error, msg:%s", err)
return err
}
return nil
}

View File

@ -0,0 +1,153 @@
package api
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/taosdata/taoskeeper/db"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/util"
)
func TestAudit(t *testing.T) {
cfg := util.GetCfg()
cfg.Audit = config.AuditConfig{
Database: config.Database{
Name: "keepter_test_audit",
},
Enable: true,
}
a, err := NewAudit(cfg)
assert.NoError(t, err)
err = a.Init(router)
assert.NoError(t, err)
longDetails := strings.Repeat("0123456789", 5000)
cases := []struct {
name string
ts int64
detail string
data string
expect string
}{
{
name: "1",
ts: 1699839716440000000,
data: `{"timestamp": "1699839716440000000", "cluster_id": "cluster_id", "user": "user", "operation": "operation", "db":"dbnamea", "resource":"resourcenamea", "client_add": "localhost:30000", "details": "detail"}`,
expect: "detail",
},
{
name: "2",
ts: 1699839716441000000,
data: `{"timestamp": "1699839716441000000", "cluster_id": "cluster_id", "user": "user", "operation": "operation", "db":"dbnamea", "resource":"resourcenamea", "client_add": "localhost:30000", "details": "` + longDetails + `"}`,
expect: longDetails[:50000],
},
{
name: "3",
ts: 1699839716442000000,
data: "{\"timestamp\": \"1699839716442000000\", \"cluster_id\": \"cluster_id\", \"user\": \"user\", \"operation\": \"operation\", \"db\":\"dbnameb\", \"resource\":\"resourcenameb\", \"client_add\": \"localhost:30000\", \"details\": \"create database `meter` buffer 32 cachemodel 'none' duration 50d keep 3650d single_stable 0 wal_retention_period 3600 precision 'ms'\"}",
expect: "create database `meter` buffer 32 cachemodel 'none' duration 50d keep 3650d single_stable 0 wal_retention_period 3600 precision 'ms'",
},
}
cases2 := []struct {
name string
ts int64
detail string
data string
expect string
}{
{
name: "1",
ts: 1699839716445000000,
data: `{"timestamp":1699839716445, "cluster_id": "cluster_id", "user": "user", "operation": "operation", "db":"dbnamea", "resource":"resourcenamea", "client_add": "localhost:30000", "details": "details"}`,
expect: "details",
},
}
conn, err := db.NewConnectorWithDb(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.Audit.Database.Name, cfg.TDengine.Usessl)
assert.NoError(t, err)
defer func() {
_, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", cfg.Audit.Database.Name), util.GetQidOwn())
}()
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
w := httptest.NewRecorder()
body := strings.NewReader(c.data)
req, _ := http.NewRequest(http.MethodPost, "/audit_v2", body)
router.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, details from %s.operations where ts=%d", cfg.Audit.Database.Name, c.ts), util.GetQidOwn())
assert.NoError(t, err)
assert.Equal(t, 1, len(data.Data))
assert.Equal(t, c.expect, data.Data[0][1])
})
}
for _, c := range cases2 {
t.Run(c.name, func(t *testing.T) {
w := httptest.NewRecorder()
body := strings.NewReader(c.data)
req, _ := http.NewRequest(http.MethodPost, "/audit", body)
router.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, details from %s.operations where ts=%d", cfg.Audit.Database.Name, c.ts), util.GetQidOwn())
assert.NoError(t, err)
assert.Equal(t, 1, len(data.Data))
assert.Equal(t, c.expect, data.Data[0][1])
})
}
for _, c := range cases2 {
t.Run(c.name, func(t *testing.T) {
w := httptest.NewRecorder()
body := strings.NewReader(c.data)
req, _ := http.NewRequest(http.MethodPost, "/audit", body)
router.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, details from %s.operations where ts=%d", cfg.Audit.Database.Name, c.ts), util.GetQidOwn())
assert.NoError(t, err)
assert.Equal(t, 1, len(data.Data))
assert.Equal(t, c.expect, data.Data[0][1])
})
}
MAX_SQL_LEN = 300
// test audit batch
input := `{"records":[{"timestamp":"1702548856940013848","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45166","db":"test","resource":"","details":"d630302"},{"timestamp":"1702548856939746458","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45230","db":"test","resource":"","details":"d130277"},{"timestamp":"1702548856939586665","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:50288","db":"test","resource":"","details":"d5268"},{"timestamp":"1702548856939528940","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:50222","db":"test","resource":"","details":"d255282"},{"timestamp":"1702548856939336371","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45126","db":"test","resource":"","details":"d755297"},{"timestamp":"1702548856939075131","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45122","db":"test","resource":"","details":"d380325"},{"timestamp":"1702548856938640661","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45152","db":"test","resource":"","details":"d255281"},{"timestamp":"1702548856938505795","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45122","db":"test","resource":"","details":"d130276"},{"timestamp":"1702548856938363319","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45178","db":"test","resource":"","details":"d755296"},{"timestamp":"1702548856938201478","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45166","db":"test","resource":"","details":"d380324"},{"timestamp":"1702548856937740618","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:50288","db":"test","resource":"","details":"d5266"}]}`
defer func() {
_, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", cfg.Audit.Database.Name), util.GetQidOwn())
}()
t.Run("testbatch", func(t *testing.T) {
//test empty array
w1 := httptest.NewRecorder()
body1 := strings.NewReader(`{"records": []}`)
req1, _ := http.NewRequest(http.MethodPost, "/audit-batch", body1)
router.ServeHTTP(w1, req1)
assert.Equal(t, 200, w1.Code)
//test 2 items array
w := httptest.NewRecorder()
body := strings.NewReader(input)
req, _ := http.NewRequest(http.MethodPost, "/audit-batch", body)
router.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
data, err := conn.Query(context.Background(), "select ts, details from "+cfg.Audit.Database.Name+".operations where cluster_id='8468922059162439502'", util.GetQidOwn())
assert.NoError(t, err)
assert.Equal(t, 11, len(data.Data))
})
}

View File

@ -0,0 +1,21 @@
package api
import (
"net/http"
"github.com/gin-gonic/gin"
)
func NewCheckHealth(version string) *CheckHealth {
return &CheckHealth{version: version}
}
type CheckHealth struct {
version string
}
func (h *CheckHealth) Init(c gin.IRouter) {
c.GET("check_health", func(context *gin.Context) {
context.JSON(http.StatusOK, map[string]string{"version": h.version})
})
}

View File

@ -0,0 +1,89 @@
package api
import (
"bytes"
"context"
"fmt"
"time"
"github.com/sirupsen/logrus"
"github.com/taosdata/taoskeeper/db"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/infrastructure/log"
"github.com/taosdata/taoskeeper/util"
)
var commonLogger = log.GetLogger("CMN")
func CreateDatabase(username string, password string, host string, port int, usessl bool, dbname string, databaseOptions map[string]interface{}) {
qid := util.GetQidOwn()
commonLogger := commonLogger.WithFields(
logrus.Fields{config.ReqIDKey: qid},
)
ctx := context.Background()
conn, err := db.NewConnector(username, password, host, port, usessl)
if err != nil {
commonLogger.Errorf("connect to adapter error, msg:%s", err)
return
}
defer closeConn(conn)
createDBSql := generateCreateDBSql(dbname, databaseOptions)
commonLogger.Warningf("create database sql: %s", createDBSql)
for i := 0; i < 3; i++ {
if _, err := conn.Exec(ctx, createDBSql, util.GetQidOwn()); err != nil {
commonLogger.Errorf("try %v times: create database %s error, msg:%v", i+1, dbname, err)
time.Sleep(5 * time.Second)
continue
}
return
}
panic(err)
}
func generateCreateDBSql(dbname string, databaseOptions map[string]interface{}) string {
var buf bytes.Buffer
buf.WriteString("create database if not exists ")
buf.WriteString(dbname)
for k, v := range databaseOptions {
buf.WriteString(" ")
buf.WriteString(k)
switch v := v.(type) {
case string:
buf.WriteString(fmt.Sprintf(" '%s'", v))
default:
buf.WriteString(fmt.Sprintf(" %v", v))
}
buf.WriteString(" ")
}
return buf.String()
}
func CreatTables(username string, password string, host string, port int, usessl bool, dbname string, createList []string) {
ctx := context.Background()
conn, err := db.NewConnectorWithDb(username, password, host, port, dbname, usessl)
if err != nil {
commonLogger.Errorf("connect to database error, msg:%s", err)
return
}
defer closeConn(conn)
for _, createSql := range createList {
commonLogger.Infof("execute sql:%s", createSql)
if _, err = conn.Exec(ctx, createSql, util.GetQidOwn()); err != nil {
commonLogger.Errorf("execute sql: %s, error: %s", createSql, err)
}
}
}
func closeConn(conn *db.Connector) {
if err := conn.Close(); err != nil {
commonLogger.Errorf("close connection error, msg:%s", err)
}
}

View File

@ -0,0 +1,297 @@
package api
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/gin-gonic/gin"
"github.com/shopspring/decimal"
"github.com/stretchr/testify/assert"
"github.com/taosdata/taoskeeper/cmd"
"github.com/taosdata/taoskeeper/db"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/infrastructure/log"
"github.com/taosdata/taoskeeper/process"
"github.com/taosdata/taoskeeper/util"
)
var router *gin.Engine
var conf *config.Config
var dbName = "exporter_test"
func TestMain(m *testing.M) {
conf = config.InitConfig()
log.ConfigLog()
conf.Metrics.Database.Name = dbName
conn, err := db.NewConnector(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.TDengine.Usessl)
if err != nil {
panic(err)
}
defer conn.Close()
ctx := context.Background()
conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", conf.Metrics.Database.Name), util.GetQidOwn())
if _, err = conn.Exec(ctx, fmt.Sprintf("create database if not exists %s", dbName), util.GetQidOwn()); err != nil {
logger.Errorf("execute sql: %s, error: %s", fmt.Sprintf("create database %s", dbName), err)
}
gin.SetMode(gin.ReleaseMode)
router = gin.New()
reporter := NewReporter(conf)
reporter.Init(router)
var createList = []string{
CreateClusterInfoSql,
CreateDnodeSql,
CreateMnodeSql,
CreateDnodeInfoSql,
CreateDataDirSql,
CreateLogDirSql,
CreateTempDirSql,
CreateVgroupsInfoSql,
CreateVnodeRoleSql,
CreateSummarySql,
CreateGrantInfoSql,
CreateKeeperSql,
}
CreatTables(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.TDengine.Usessl, conf.Metrics.Database.Name, createList)
processor := process.NewProcessor(conf)
node := NewNodeExporter(processor)
node.Init(router)
m.Run()
if _, err = conn.Exec(ctx, fmt.Sprintf("drop database if exists %s", dbName), util.GetQidOwn()); err != nil {
logger.Errorf("execute sql: %s, error: %s", fmt.Sprintf("drop database %s", dbName), err)
}
}
func TestGetMetrics(t *testing.T) {
w := httptest.NewRecorder()
req, _ := http.NewRequest(http.MethodGet, "/metrics", nil)
router.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
}
var now = time.Now()
var nowStr = now.Format(time.RFC3339Nano)
var report = Report{
Ts: nowStr,
DnodeID: 1,
DnodeEp: "localhost:7100",
ClusterID: "6980428120398645172",
Protocol: 1,
ClusterInfo: &ClusterInfo{
FirstEp: "localhost:7100",
FirstEpDnodeID: 1,
Version: "3.0.0.0",
MasterUptime: 2.3090276954462752e-05,
MonitorInterval: 1,
VgroupsTotal: 2,
VgroupsAlive: 2,
VnodesTotal: 2,
VnodesAlive: 2,
ConnectionsTotal: 1,
Dnodes: []Dnode{
{
DnodeID: 1,
DnodeEp: "localhost:7100",
Status: "ready",
},
},
Mnodes: []Mnode{
{
MnodeID: 1,
MnodeEp: "localhost:7100",
Role: "master",
},
},
},
VgroupInfos: []VgroupInfo{
{
VgroupID: 1,
DatabaseName: "test",
TablesNum: 1,
Status: "ready",
Vnodes: []Vnode{
{
DnodeID: 1,
VnodeRole: "LEADER",
},
{
DnodeID: 2,
VnodeRole: "FOLLOWER",
},
},
},
},
GrantInfo: &GrantInfo{
ExpireTime: 2147483647,
TimeseriesUsed: 800,
TimeseriesTotal: 2147483647,
},
DnodeInfo: DnodeInfo{
Uptime: 0.000291412026854232,
CPUEngine: 0.0828500414250207,
CPUSystem: 0.4971002485501243,
CPUCores: 12,
MemEngine: 9268,
MemSystem: 54279816,
MemTotal: 65654816,
DiskEngine: 0,
DiskUsed: 39889702912,
DiskTotal: 210304475136,
NetIn: 4727.45292368682,
NetOut: 2194.251734390486,
IoRead: 3789.8909811694753,
IoWrite: 12311.19920713578,
IoReadDisk: 0,
IoWriteDisk: 12178.394449950447,
ReqSelect: 2,
ReqSelectRate: 0,
ReqInsert: 6,
ReqInsertSuccess: 4,
ReqInsertRate: 0,
ReqInsertBatch: 10,
ReqInsertBatchSuccess: 8,
ReqInsertBatchRate: 0,
Errors: 2,
VnodesNum: 2,
Masters: 2,
HasMnode: 1,
HasQnode: 1,
HasSnode: 1,
HasBnode: 1,
},
DiskInfos: DiskInfo{
Datadir: []DataDir{
{
Name: "/root/TDengine/sim/dnode1/data",
Level: 0,
Avail: decimal.NewFromInt(171049893888),
Used: decimal.NewFromInt(39254581248),
Total: decimal.NewFromInt(210304475136),
},
{
Name: "/root/TDengine/sim/dnode2/data",
Level: 1,
Avail: decimal.NewFromInt(171049893888),
Used: decimal.NewFromInt(39254581248),
Total: decimal.NewFromInt(210304475136),
},
},
Logdir: LogDir{
Name: "/root/TDengine/sim/dnode1/log",
Avail: decimal.NewFromInt(171049771008),
Used: decimal.NewFromInt(39254704128),
Total: decimal.NewFromInt(210304475136),
},
Tempdir: TempDir{
Name: "/tmp",
Avail: decimal.NewFromInt(171049771008),
Used: decimal.NewFromInt(39254704128),
Total: decimal.NewFromInt(210304475136),
},
},
LogInfos: LogInfo{
Summary: []Summary{
{
Level: "error",
Total: 0,
}, {
Level: "info",
Total: 114,
}, {
Level: "debug",
Total: 117,
}, {
Level: "trace",
Total: 126,
},
},
},
}
func TestPutMetrics(t *testing.T) {
w := httptest.NewRecorder()
b, _ := json.Marshal(report)
body := strings.NewReader(string(b))
req, _ := http.NewRequest(http.MethodPost, "/report", body)
router.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
conn, err := db.NewConnectorWithDb(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host,
conf.TDengine.Port, dbName, conf.TDengine.Usessl)
if err != nil {
logger.Errorf("connect to database error, msg:%s", err)
return
}
defer func() {
_, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", conf.Metrics.Database.Name), util.GetQidOwn())
}()
ctx := context.Background()
data, err := conn.Query(ctx, "select info from log_summary", util.GetQidOwn())
if err != nil {
logger.Errorf("execute sql:%s, error:%s", "select * from log_summary", err)
t.Fatal(err)
}
for _, info := range data.Data {
assert.Equal(t, int32(114), info[0])
}
var tenMinutesBefore = now.Add(-10 * time.Minute)
var tenMinutesBeforeStr = tenMinutesBefore.Format(time.RFC3339Nano)
conf.FromTime = tenMinutesBeforeStr
conf.Transfer = "old_taosd_metric"
var cmd = cmd.NewCommand(conf)
cmd.Process(conf)
type TableInfo struct {
TsName string
RowNum int
}
tables := map[string]*TableInfo{
"taosd_cluster_basic": {"ts", 1},
"taosd_cluster_info": {"_ts", 1},
"taosd_vgroups_info": {"_ts", 1},
"taosd_dnodes_info": {"_ts", 1},
"taosd_dnodes_status": {"_ts", 1},
"taosd_dnodes_data_dirs": {"_ts", 1},
"taosd_dnodes_log_dirs": {"_ts", 2},
"taosd_mnodes_info": {"_ts", 1},
"taosd_vnodes_info": {"_ts", 1},
}
for table, tableInfo := range tables {
data, err = conn.Query(ctx, fmt.Sprintf("select %s from %s", tableInfo.TsName, table), util.GetQidOwn())
if err != nil {
logger.Errorf("execute sql:%s, error:%s", "select * from "+table, err)
t.Fatal(err)
}
assert.Equal(t, tableInfo.RowNum, len(data.Data))
assert.Equal(t, now.UnixMilli(), data.Data[0][0].(time.Time).UnixMilli())
}
conf.Transfer = ""
conf.Drop = "old_taosd_metric_stables"
cmd.Process(conf)
data, err = conn.Query(ctx, "select * from information_schema.ins_stables where stable_name = 'm_info'", util.GetQidOwn())
if err != nil {
logger.Errorf("execute sql:%s, error:%s", "m_info is not droped", err)
t.Fatal(err)
}
assert.Equal(t, 0, len(data.Data))
logger.Infof("ALL OK !!!")
}

View File

@ -0,0 +1,770 @@
package api
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"regexp"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
"github.com/taosdata/taoskeeper/db"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/infrastructure/log"
"github.com/taosdata/taoskeeper/util"
)
var re = regexp.MustCompile("'+")
var gmLogger = log.GetLogger("GEN")
var MAX_SQL_LEN = 1000000
var STABLE_NAME_KEY = "priv_stn"
type ColumnSeq struct {
tagNames []string
metricNames []string
}
var (
mu sync.RWMutex
gColumnSeqMap = make(map[string]ColumnSeq)
)
type GeneralMetric struct {
client *http.Client
conn *db.Connector
username string
password string
host string
port int
usessl bool
database string
url *url.URL
}
type Tag struct {
Name string `json:"name"`
Value string `json:"value"`
}
type Metric struct {
Name string `json:"name"`
Value float64 `json:"value"`
}
type MetricGroup struct {
Tags []Tag `json:"tags"`
Metrics []Metric `json:"metrics"`
}
type StableInfo struct {
Name string `json:"name"`
MetricGroups []MetricGroup `json:"metric_groups"`
}
type StableArrayInfo struct {
Ts string `json:"ts"`
Protocol int `json:"protocol"`
Tables []StableInfo `json:"tables"`
}
type ClusterBasic struct {
ClusterId string `json:"cluster_id"`
Ts string `json:"ts"`
FirstEp string `json:"first_ep"`
FirstEpDnodeId int32 `json:"first_ep_dnode_id"`
ClusterVersion string `json:"cluster_version"`
}
type SlowSqlDetailInfo struct {
StartTs string `json:"start_ts"`
RequestId string `json:"request_id"`
QueryTime int32 `json:"query_time"`
Code int32 `json:"code"`
ErrorInfo string `json:"error_info"`
Type int8 `json:"type"`
RowsNum int64 `json:"rows_num"`
Sql string `json:"sql"`
ProcessName string `json:"process_name"`
ProcessId string `json:"process_id"`
Db string `json:"db"`
User string `json:"user"`
Ip string `json:"ip"`
ClusterId string `json:"cluster_id"`
}
func (gm *GeneralMetric) Init(c gin.IRouter) error {
c.POST("/general-metric", gm.handleFunc())
c.POST("/taosd-cluster-basic", gm.handleTaosdClusterBasic())
c.POST("/slow-sql-detail-batch", gm.handleSlowSqlDetailBatch())
conn, err := db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl)
if err != nil {
gmLogger.Errorf("init db connect error, msg:%s", err)
return err
}
gm.conn = conn
err = gm.createSTables()
if err != nil {
gmLogger.Errorf("create stable error, msg:%s", err)
return err
}
err = gm.initColumnSeqMap()
if err != nil {
gmLogger.Errorf("init gColumnSeqMap error, msg:%s", err)
return err
}
return err
}
func NewGeneralMetric(conf *config.Config) *GeneralMetric {
client := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
DisableCompression: true,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
}
var protocol string
if conf.TDengine.Usessl {
protocol = "https"
} else {
protocol = "http"
}
imp := &GeneralMetric{
client: client,
username: conf.TDengine.Username,
password: conf.TDengine.Password,
host: conf.TDengine.Host,
port: conf.TDengine.Port,
usessl: conf.TDengine.Usessl,
database: conf.Metrics.Database.Name,
url: &url.URL{
Scheme: protocol,
Host: fmt.Sprintf("%s:%d", conf.TDengine.Host, conf.TDengine.Port),
Path: "/influxdb/v1/write",
RawQuery: fmt.Sprintf("db=%s&precision=ms&table_name_key=%s", conf.Metrics.Database.Name, STABLE_NAME_KEY),
},
}
return imp
}
func (gm *GeneralMetric) handleFunc() gin.HandlerFunc {
return func(c *gin.Context) {
qid := util.GetQid(c.GetHeader("X-QID"))
gmLogger := gmLogger.WithFields(
logrus.Fields{config.ReqIDKey: qid},
)
if gm.client == nil {
gmLogger.Error("no connection")
c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"})
return
}
data, err := c.GetRawData()
if err != nil {
gmLogger.Errorf("get general metric data error, msg:%s", err)
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get general metric data error. %s", err)})
return
}
var request []StableArrayInfo
if logger.Logger.IsLevelEnabled(logrus.TraceLevel) {
gmLogger.Tracef("data:%s", string(data))
}
if err := json.Unmarshal(data, &request); err != nil {
gmLogger.Errorf("parse general metric data error, data:%s, error:%s", string(data), err)
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse general metric data error: %s", err)})
return
}
if len(request) == 0 {
c.JSON(http.StatusOK, gin.H{})
return
}
err = gm.handleBatchMetrics(request, qid)
if err != nil {
gmLogger.Errorf("process records error. msg:%s", err)
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("process records error. %s", err)})
return
}
c.JSON(http.StatusOK, gin.H{})
}
}
func (gm *GeneralMetric) handleBatchMetrics(request []StableArrayInfo, qid uint64) error {
var buf bytes.Buffer
for _, stableArrayInfo := range request {
if stableArrayInfo.Ts == "" {
gmLogger.Error("ts data is empty")
continue
}
for _, table := range stableArrayInfo.Tables {
if table.Name == "" {
gmLogger.Error("stable name is empty")
continue
}
table.Name = strings.ToLower(table.Name)
if _, ok := Load(table.Name); !ok {
Init(table.Name)
}
for _, metricGroup := range table.MetricGroups {
buf.WriteString(table.Name)
writeTags(metricGroup.Tags, table.Name, &buf)
buf.WriteString(" ")
writeMetrics(metricGroup.Metrics, table.Name, &buf)
buf.WriteString(" ")
buf.WriteString(stableArrayInfo.Ts)
buf.WriteString("\n")
}
}
}
if buf.Len() > 0 {
return gm.lineWriteBody(&buf, qid)
}
return nil
}
func (gm *GeneralMetric) lineWriteBody(buf *bytes.Buffer, qid uint64) error {
gmLogger := gmLogger.WithFields(
logrus.Fields{config.ReqIDKey: qid},
)
header := map[string][]string{
"Connection": {"keep-alive"},
}
req_data := buf.String()
//build new URLadd qid to URL
urlWithQid := *gm.url
query := urlWithQid.Query()
query.Set("qid", fmt.Sprintf("%d", qid))
urlWithQid.RawQuery = query.Encode()
req := &http.Request{
Method: http.MethodPost,
URL: &urlWithQid,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: header,
Host: gm.url.Host,
}
req.SetBasicAuth(gm.username, gm.password)
req.Body = io.NopCloser(buf)
startTime := time.Now()
resp, err := gm.client.Do(req)
endTime := time.Now()
latency := endTime.Sub(startTime)
if err != nil {
gmLogger.Errorf("latency:%v, req_data:%v, url:%s, resp:%d, err:%s", latency, req_data, urlWithQid.String(), resp.StatusCode, err)
return err
}
if logger.Logger.IsLevelEnabled(logrus.TraceLevel) {
gmLogger.Tracef("latency:%v, req_data:%v, url:%s, resp:%d", latency, req_data, urlWithQid.String(), resp.StatusCode)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusNoContent {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("unexpected status code %d:body:%s", resp.StatusCode, string(body))
}
return nil
}
func (gm *GeneralMetric) handleTaosdClusterBasic() gin.HandlerFunc {
return func(c *gin.Context) {
qid := util.GetQid(c.GetHeader("X-QID"))
gmLogger := gmLogger.WithFields(
logrus.Fields{config.ReqIDKey: qid},
)
if gm.conn == nil {
gmLogger.Error("no connection")
c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"})
return
}
data, err := c.GetRawData()
if err != nil {
gmLogger.Errorf("get taosd cluster basic data error, msg:%s", err)
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get general metric data error. %s", err)})
return
}
if logger.Logger.IsLevelEnabled(logrus.TraceLevel) {
gmLogger.Tracef("receive taosd cluster basic data:%s", string(data))
}
var request ClusterBasic
if err := json.Unmarshal(data, &request); err != nil {
gmLogger.Errorf("parse general metric data error, data:%s, msg:%s", string(data), err)
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse general metric data error: %s", err)})
return
}
sql := fmt.Sprintf(
"insert into %s.taosd_cluster_basic_%s using taosd_cluster_basic tags ('%s') values (%s, '%s', %d, '%s') ",
gm.database, request.ClusterId, request.ClusterId, request.Ts, request.FirstEp, request.FirstEpDnodeId, request.ClusterVersion)
if _, err = gm.conn.Exec(context.Background(), sql, qid); err != nil {
gmLogger.Errorf("insert taosd_cluster_basic error, msg:%s", err)
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("insert taosd_cluster_basic error. %s", err)})
return
}
c.JSON(http.StatusOK, gin.H{})
}
}
func processString(input string) string {
// remove number in the beginning
re := regexp.MustCompile(`^\d+`)
input = re.ReplaceAllString(input, "")
// replage "." to "_"
input = strings.ReplaceAll(input, ".", "_")
// remove special characters
re = regexp.MustCompile(`[^a-zA-Z0-9_]`)
input = re.ReplaceAllString(input, "")
return input
}
func (gm *GeneralMetric) handleSlowSqlDetailBatch() gin.HandlerFunc {
return func(c *gin.Context) {
qid := util.GetQid(c.GetHeader("X-QID"))
gmLogger := gmLogger.WithFields(
logrus.Fields{config.ReqIDKey: qid},
)
if gm.conn == nil {
gmLogger.Error("no connection")
c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"})
return
}
data, err := c.GetRawData()
if err != nil {
gmLogger.Errorf("get taos slow sql detail data error, msg:%s", err)
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get taos slow sql detail data error. %s", err)})
return
}
if logger.Logger.IsLevelEnabled(logrus.TraceLevel) {
gmLogger.Tracef("receive taos slow sql detail data:%s", string(data))
}
var request []SlowSqlDetailInfo
if err := json.Unmarshal(data, &request); err != nil {
gmLogger.Errorf("parse taos slow sql detail error, msg:%s", string(data))
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse taos slow sql detail error: %s", err)})
return
}
var sql_head = "INSERT INTO `taos_slow_sql_detail` (tbname, `db`, `user`, `ip`, `cluster_id`, `start_ts`, `request_id`, `query_time`, `code`, `error_info`, `type`, `rows_num`, `sql`, `process_name`, `process_id`) values "
var buf bytes.Buffer
buf.WriteString(sql_head)
var qid_counter uint8 = 0
for _, slowSqlDetailInfo := range request {
if slowSqlDetailInfo.StartTs == "" {
gmLogger.Error("start_ts data is empty")
continue
}
// cut string to max len
slowSqlDetailInfo.Sql = re.ReplaceAllString(slowSqlDetailInfo.Sql, "'") // 将匹配到的部分替换为一个单引号
slowSqlDetailInfo.Sql = strings.ReplaceAll(slowSqlDetailInfo.Sql, "'", "''")
slowSqlDetailInfo.Sql = util.SafeSubstring(slowSqlDetailInfo.Sql, 16384)
slowSqlDetailInfo.ClusterId = util.SafeSubstring(slowSqlDetailInfo.ClusterId, 32)
slowSqlDetailInfo.Db = util.SafeSubstring(slowSqlDetailInfo.Db, 1024)
if slowSqlDetailInfo.Db == "" {
slowSqlDetailInfo.Db = "unknown"
}
slowSqlDetailInfo.User = util.SafeSubstring(slowSqlDetailInfo.User, 32)
slowSqlDetailInfo.Ip = util.SafeSubstring(slowSqlDetailInfo.Ip, 32)
slowSqlDetailInfo.ProcessName = util.SafeSubstring(slowSqlDetailInfo.ProcessName, 32)
slowSqlDetailInfo.ProcessId = util.SafeSubstring(slowSqlDetailInfo.ProcessId, 32)
slowSqlDetailInfo.ErrorInfo = util.SafeSubstring(slowSqlDetailInfo.ErrorInfo, 128)
// max len 192
var sub_table_name = slowSqlDetailInfo.User + "_" + util.SafeSubstring(slowSqlDetailInfo.Db, 80) + "_" + slowSqlDetailInfo.Ip + "_clusterId_" + slowSqlDetailInfo.ClusterId
sub_table_name = strings.ToLower(processString(sub_table_name))
var sql = fmt.Sprintf(
"('%s', '%s', '%s', '%s', '%s', %s, %s, %d, %d, '%s', %d, %d, '%s', '%s', '%s') ",
sub_table_name,
slowSqlDetailInfo.Db, slowSqlDetailInfo.User, slowSqlDetailInfo.Ip, slowSqlDetailInfo.ClusterId, slowSqlDetailInfo.StartTs, slowSqlDetailInfo.RequestId,
slowSqlDetailInfo.QueryTime, slowSqlDetailInfo.Code, slowSqlDetailInfo.ErrorInfo, slowSqlDetailInfo.Type, slowSqlDetailInfo.RowsNum, slowSqlDetailInfo.Sql,
slowSqlDetailInfo.ProcessName, slowSqlDetailInfo.ProcessId)
if (buf.Len() + len(sql)) < MAX_SQL_LEN {
buf.WriteString(sql)
} else {
if _, err = gm.conn.Exec(context.Background(), buf.String(), qid|uint64((qid_counter%255))); err != nil {
gmLogger.Errorf("insert taos_slow_sql_detail error, sql:%s, error:%s", buf.String(), err)
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("insert taos_slow_sql_detail error. %s", err)})
return
}
buf.Reset()
buf.WriteString(sql_head)
buf.WriteString(sql)
qid_counter++
}
}
if buf.Len() > len(sql_head) {
if _, err = gm.conn.Exec(context.Background(), buf.String(), qid|uint64((qid_counter%255))); err != nil {
gmLogger.Errorf("insert taos_slow_sql_detail error, data:%s, msg:%s", buf.String(), err)
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("insert taos_slow_sql_detail error. %s", err)})
return
}
}
c.JSON(http.StatusOK, gin.H{})
}
}
func writeTags(tags []Tag, stbName string, buf *bytes.Buffer) {
var nameArray []string
if columnSeq, ok := Load(stbName); ok {
if len(columnSeq.tagNames) < len(tags) {
// add column, only schema change will hit here
for _, tag := range tags {
if !contains(columnSeq.tagNames, tag.Name) {
columnSeq.tagNames = append(columnSeq.tagNames, tag.Name)
}
}
Store(stbName, columnSeq)
}
nameArray = columnSeq.tagNames
}
// 将 Tag 切片转换为 map
tagMap := make(map[string]string)
for _, tag := range tags {
tagMap[tag.Name] = tag.Value
}
for _, name := range nameArray {
if value, ok := tagMap[name]; ok {
if value != "" {
buf.WriteString(fmt.Sprintf(",%s=%s", name, util.EscapeInfluxProtocol(value)))
} else {
buf.WriteString(fmt.Sprintf(",%s=%s", name, "unknown"))
gmLogger.Errorf("tag value is empty, tag name:%s", name)
}
} else {
buf.WriteString(fmt.Sprintf(",%s=%s", name, "unknown"))
}
}
// have sub table name
if _, ok := tagMap[STABLE_NAME_KEY]; ok {
return
}
subTableName := get_sub_table_name_valid(stbName, tagMap)
if subTableName != "" {
buf.WriteString(fmt.Sprintf(",%s=%s", STABLE_NAME_KEY, subTableName))
} else {
gmLogger.Errorf("get sub stable name error, stable name:%s, tag map:%v", stbName, tagMap)
}
}
func checkKeysExist(data map[string]string, keys ...string) bool {
for _, key := range keys {
_, ok := data[key]
if !ok {
return false
}
}
return true
}
func get_sub_table_name_valid(stbName string, tagMap map[string]string) string {
stbName = get_sub_table_name(stbName, tagMap)
return util.ToValidTableName(stbName)
}
func get_sub_table_name(stbName string, tagMap map[string]string) string {
if strings.HasPrefix(stbName, "taosx") {
switch stbName {
case "taosx_sys":
if checkKeysExist(tagMap, "taosx_id") {
return fmt.Sprintf("sys_%s", tagMap["taosx_id"])
}
case "taosx_agent":
if checkKeysExist(tagMap, "taosx_id", "agent_id") {
return fmt.Sprintf("agent_%s_%s", tagMap["taosx_id"], tagMap["agent_id"])
}
case "taosx_connector":
if checkKeysExist(tagMap, "taosx_id", "ds_name", "task_id") {
return fmt.Sprintf("connector_%s_%s_%s", tagMap["taosx_id"], tagMap["ds_name"], tagMap["task_id"])
}
default:
if strings.HasPrefix(stbName, "taosx_task_") {
ds_name := stbName[len("taosx_task_"):]
if checkKeysExist(tagMap, "taosx_id", "task_id") {
return fmt.Sprintf("task_%s_%s_%s", tagMap["taosx_id"], ds_name, tagMap["task_id"])
}
}
return ""
}
}
switch stbName {
case "taosd_cluster_info":
if checkKeysExist(tagMap, "cluster_id") {
return fmt.Sprintf("cluster_%s", tagMap["cluster_id"])
}
case "taosd_vgroups_info":
if checkKeysExist(tagMap, "cluster_id", "vgroup_id", "database_name") {
return fmt.Sprintf("vginfo_%s_vgroup_%s_cluster_%s", tagMap["database_name"], tagMap["vgroup_id"], tagMap["cluster_id"])
}
case "taosd_dnodes_info":
if checkKeysExist(tagMap, "cluster_id", "dnode_id") {
return fmt.Sprintf("dinfo_%s_cluster_%s", tagMap["dnode_id"], tagMap["cluster_id"])
}
case "taosd_dnodes_status":
if checkKeysExist(tagMap, "cluster_id", "dnode_id") {
return fmt.Sprintf("dstatus_%s_cluster_%s", tagMap["dnode_id"], tagMap["cluster_id"])
}
case "taosd_dnodes_log_dirs":
if checkKeysExist(tagMap, "cluster_id", "dnode_id", "data_dir_name") {
subTableName := fmt.Sprintf("dlog_%s_%s_cluster_%s", tagMap["dnode_id"], tagMap["data_dir_name"], tagMap["cluster_id"])
if len(subTableName) <= util.MAX_TABLE_NAME_LEN {
return subTableName
}
return fmt.Sprintf("dlog_%s_%s_cluster_%s", tagMap["dnode_id"],
util.GetMd5HexStr(tagMap["data_dir_name"]),
tagMap["cluster_id"])
}
case "taosd_dnodes_data_dirs":
if checkKeysExist(tagMap, "cluster_id", "dnode_id", "data_dir_name", "data_dir_level") {
subTableName := fmt.Sprintf("ddata_%s_%s_level_%s_cluster_%s", tagMap["dnode_id"], tagMap["data_dir_name"], tagMap["data_dir_level"], tagMap["cluster_id"])
if len(subTableName) <= util.MAX_TABLE_NAME_LEN {
return subTableName
}
return fmt.Sprintf("ddata_%s_%s_level_%s_cluster_%s", tagMap["dnode_id"],
util.GetMd5HexStr(tagMap["data_dir_name"]),
tagMap["data_dir_level"],
tagMap["cluster_id"])
}
case "taosd_mnodes_info":
if checkKeysExist(tagMap, "cluster_id", "mnode_id") {
return fmt.Sprintf("minfo_%s_cluster_%s", tagMap["mnode_id"], tagMap["cluster_id"])
}
case "taosd_vnodes_info":
if checkKeysExist(tagMap, "cluster_id", "database_name", "vgroup_id", "dnode_id") {
return fmt.Sprintf("vninfo_%s_dnode_%s_vgroup_%s_cluster_%s", tagMap["database_name"], tagMap["dnode_id"], tagMap["vgroup_id"], tagMap["cluster_id"])
}
case "taosd_sql_req":
if checkKeysExist(tagMap, "username", "sql_type", "result", "dnode_id", "vgroup_id", "cluster_id") {
return fmt.Sprintf("taosdsql_%s_%s_%s_%s_vgroup_%s_cluster_%s", tagMap["username"],
tagMap["sql_type"], tagMap["result"], tagMap["dnode_id"], tagMap["vgroup_id"], tagMap["cluster_id"])
}
case "taos_sql_req":
if checkKeysExist(tagMap, "username", "sql_type", "result", "cluster_id") {
return fmt.Sprintf("taossql_%s_%s_%s_cluster_%s", tagMap["username"],
tagMap["sql_type"], tagMap["result"], tagMap["cluster_id"])
}
case "taos_slow_sql":
if checkKeysExist(tagMap, "username", "duration", "result", "cluster_id") {
return fmt.Sprintf("slowsql_%s_%s_%s_cluster_%s", tagMap["username"],
tagMap["duration"], tagMap["result"], tagMap["cluster_id"])
}
default:
return ""
}
return ""
}
func contains(array []string, item string) bool {
for _, value := range array {
if value == item {
return true
}
}
return false
}
func writeMetrics(metrics []Metric, stbName string, buf *bytes.Buffer) {
var nameArray []string
if columnSeq, ok := Load(stbName); ok {
if len(columnSeq.metricNames) < len(metrics) {
// add column, only schema change will hit here
for _, metric := range metrics {
if !contains(columnSeq.metricNames, metric.Name) {
columnSeq.metricNames = append(columnSeq.metricNames, metric.Name)
}
}
Store(stbName, columnSeq)
}
nameArray = columnSeq.metricNames
}
// 将 Metric 切片转换为 map
metricMap := make(map[string]float64)
for _, metric := range metrics {
metricMap[metric.Name] = metric.Value
}
for i, name := range nameArray {
if value, ok := metricMap[name]; ok {
buf.WriteString(fmt.Sprintf("%s=%sf64", name, strconv.FormatFloat(value, 'f', -1, 64)))
if i != len(nameArray)-1 {
buf.WriteString(",")
}
}
}
}
// 存储数据
func Store(key string, value ColumnSeq) {
mu.Lock()
defer mu.Unlock()
gColumnSeqMap[key] = value
}
// 加载数据
func Load(key string) (ColumnSeq, bool) {
mu.RLock()
defer mu.RUnlock()
value, ok := gColumnSeqMap[key]
return value, ok
}
// 初始化单表的列序列
func Init(key string) {
mu.Lock()
defer mu.Unlock()
if _, ok := gColumnSeqMap[key]; !ok {
columnSeq := ColumnSeq{
tagNames: []string{},
metricNames: []string{},
}
gColumnSeqMap[key] = columnSeq
}
}
// 初始化所有列序列
func (gm *GeneralMetric) initColumnSeqMap() error {
query := fmt.Sprintf(`
select stable_name
from information_schema.ins_stables
where db_name = '%s'
and (
stable_name like 'taos_%%'
or stable_name like 'taosd_%%'
or stable_name like 'taosx_%%'
)
order by stable_name asc;
`, gm.database)
data, err := gm.conn.Query(context.Background(), query, util.GetQidOwn())
if err != nil {
return err
}
//get all stables, then init gColumnSeqMap
for _, row := range data.Data {
stableName := row[0].(string)
Init(stableName)
}
//set gColumnSeqMap with desc stables
for tableName, columnSeq := range gColumnSeqMap {
data, err := gm.conn.Query(context.Background(), fmt.Sprintf(`desc %s.%s;`, gm.database, tableName), util.GetQidOwn())
if err != nil {
return err
}
if len(data.Data) < 1 || len(data.Data[0]) < 4 {
return fmt.Errorf("desc %s.%s error", gm.database, tableName)
}
for i, row := range data.Data {
if i == 0 {
continue
}
if row[3].(string) == "TAG" {
columnSeq.tagNames = append(columnSeq.tagNames, row[0].(string))
} else {
columnSeq.metricNames = append(columnSeq.metricNames, row[0].(string))
}
}
Store(tableName, columnSeq)
}
gmLogger.Infof("gColumnSeqMap:%v", gColumnSeqMap)
return nil
}
func (gm *GeneralMetric) createSTables() error {
var createTableSql = "create stable if not exists taosd_cluster_basic " +
"(ts timestamp, first_ep varchar(100), first_ep_dnode_id INT, cluster_version varchar(20)) " +
"tags (cluster_id varchar(50))"
if gm.conn == nil {
return errNoConnection
}
_, err := gm.conn.Exec(context.Background(), createTableSql, util.GetQidOwn())
if err != nil {
return err
}
createTableSql = "create stable if not exists taos_slow_sql_detail" +
" (start_ts TIMESTAMP, request_id BIGINT UNSIGNED PRIMARY KEY, query_time INT, code INT, error_info varchar(128), " +
"type TINYINT, rows_num BIGINT, sql varchar(16384), process_name varchar(32), process_id varchar(32)) " +
"tags (db varchar(1024), `user` varchar(32), ip varchar(32), cluster_id varchar(32))"
_, err = gm.conn.Exec(context.Background(), createTableSql, util.GetQidOwn())
return err
}

View File

@ -0,0 +1,358 @@
package api
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/taosdata/taoskeeper/db"
"github.com/taosdata/taoskeeper/util"
)
var router_inited bool = false
func TestClusterBasic(t *testing.T) {
cfg := util.GetCfg()
CreateDatabase(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.TDengine.Usessl, cfg.Metrics.Database.Name, cfg.Metrics.Database.Options)
gm := NewGeneralMetric(cfg)
if !router_inited {
err := gm.Init(router)
assert.NoError(t, err)
router_inited = true
}
testcfg := struct {
name string
ts int64
tbname string
data string
expect string
}{
name: "1",
tbname: "taosd_cluster_basic",
ts: 1705655770381,
data: `{"ts":"1705655770381","cluster_id":"7648966395564416484","protocol":2,"first_ep":"ssfood06:6130","first_ep_dnode_id":1,"cluster_version":"3.2.1.0.alp"}`,
expect: "7648966395564416484",
}
conn, err := db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl)
assert.NoError(t, err)
defer func() {
_, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", gm.database), util.GetQidOwn())
}()
t.Run(testcfg.name, func(t *testing.T) {
w := httptest.NewRecorder()
body := strings.NewReader(testcfg.data)
req, _ := http.NewRequest(http.MethodPost, "/taosd-cluster-basic", body)
router.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, cluster_id from %s.%s where ts=%d", gm.database, testcfg.tbname, testcfg.ts), util.GetQidOwn())
assert.NoError(t, err)
assert.Equal(t, 1, len(data.Data))
assert.Equal(t, testcfg.expect, data.Data[0][1])
})
testcfg = struct {
name string
ts int64
tbname string
data string
expect string
}{
name: "1",
tbname: "taos_slow_sql_detail",
ts: 1703226836762,
data: `[{
"start_ts": "1703226836762",
"request_id": "1",
"query_time": 100,
"code": 0,
"error_info": "",
"type": 1,
"rows_num": 5,
"sql": "select * from abc;",
"process_name": "abc",
"process_id": "123",
"db": "dbname",
"user": "root",
"ip": "127.0.0.1",
"cluster_id": "1234567"
},
{
"start_ts": "1703226836763",
"request_id": "2",
"query_time": 100,
"code": 0,
"error_info": "",
"type": 1,
"rows_num": 5,
"sql": "insert into abc ('a', 'b') values ('aaa', 'bbb');",
"process_name": "abc",
"process_id": "123",
"db": "dbname",
"user": "root",
"ip": "127.0.0.1",
"cluster_id": "1234567"
}]`,
expect: "1234567",
}
conn, err = db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl)
assert.NoError(t, err)
defer func() {
_, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", gm.database), util.GetQidOwn())
}()
t.Run(testcfg.name, func(t *testing.T) {
MAX_SQL_LEN = 1000000
w := httptest.NewRecorder()
body := strings.NewReader(testcfg.data)
req, _ := http.NewRequest(http.MethodPost, "/slow-sql-detail-batch", body)
router.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
data, err := conn.Query(context.Background(), fmt.Sprintf("select start_ts, cluster_id from %s.%s where start_ts=%d", gm.database, testcfg.tbname, testcfg.ts), util.GetQidOwn())
assert.NoError(t, err)
assert.Equal(t, 1, len(data.Data))
assert.Equal(t, testcfg.expect, data.Data[0][1])
})
}
func TestGenMetric(t *testing.T) {
cfg := util.GetCfg()
CreateDatabase(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.TDengine.Usessl, cfg.Metrics.Database.Name, cfg.Metrics.Database.Options)
gm := NewGeneralMetric(cfg)
if !router_inited {
err := gm.Init(router)
assert.NoError(t, err)
router_inited = true
}
testcfg := struct {
name string
ts []int64
tbname []string
data string
expect string
}{
name: "1",
tbname: []string{"taosd_cluster_info", "taosd_dnodes_info"},
ts: []int64{1703226836761, 1703226836762},
data: `[{
"ts": "1703226836761",
"protocol": 2,
"tables": [{
"name": "taosd_cluster_info",
"metric_groups": [{
"tags": [{
"name": "cluster_id",
"value": "1397715317673023180"
}],
"metrics": [{
"name": "dbs_total",
"value": 1
}, {
"name": "master_uptime",
"value": 0
}]
}]
}, {
"name": "taosd_dnodes_info",
"metric_groups": [{
"tags": [{
"name": "cluster_id",
"value": "1397715317673023180"
}, {
"name": "dnode_id",
"value": "1"
}, {
"name": "dnode_ep",
"value": "ssfood06:6130"
}],
"metrics": [{
"name": "uptime",
"value": 0
}, {
"name": "cpu_engine",
"value": 0
}]
}]
}]
}, {
"ts": "1703226836762",
"protocol": 2,
"tables": [{
"name": "taosd_cluster_info",
"metric_groups": [{
"tags": [{
"name": "cluster_id",
"value": "1397715317673023180"
}],
"metrics": [{
"name": "dbs_total",
"value": 1
}, {
"name": "master_uptime",
"value": 0
}]
}]
}, {
"name": "taosd_dnodes_info",
"metric_groups": [{
"tags": [{
"name": "cluster_id",
"value": "1397715317673023180"
}, {
"name": "dnode_id",
"value": "1"
}, {
"name": "dnode_ep",
"value": ", =\"ssfood06:6130"
}],
"metrics": [{
"name": "uptime",
"value": 0
}, {
"name": "cpu_engine",
"value": 0
}]
}]
}]
}]`,
expect: "1397715317673023180",
}
conn, err := db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl)
assert.NoError(t, err)
defer func() {
_, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", gm.database), util.GetQidOwn())
}()
t.Run(testcfg.name, func(t *testing.T) {
w := httptest.NewRecorder()
body := strings.NewReader(testcfg.data)
req, _ := http.NewRequest(http.MethodPost, "/general-metric", body)
router.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
for _, tbname := range testcfg.tbname {
for _, ts := range testcfg.ts {
data, err := conn.Query(context.Background(), fmt.Sprintf("select _ts, cluster_id from %s.%s where _ts=%d", gm.database, tbname, ts), util.GetQidOwn())
assert.NoError(t, err)
assert.Equal(t, 1, len(data.Data))
assert.Equal(t, testcfg.expect, data.Data[0][1])
}
}
})
}
func TestGetSubTableName(t *testing.T) {
tests := []struct {
stbName string
tagMap map[string]string
want string
}{
{
stbName: "taosx_sys",
tagMap: map[string]string{"taosx_id": "123"},
want: "sys_123",
},
{
stbName: "taosx_agent",
tagMap: map[string]string{"taosx_id": "123", "agent_id": "456"},
want: "agent_123_456",
},
{
stbName: "taosx_connector",
tagMap: map[string]string{"taosx_id": "123", "ds_name": "ds", "task_id": "789"},
want: "connector_123_ds_789",
},
{
stbName: "taosx_task_example",
tagMap: map[string]string{"taosx_id": "123", "task_id": "789"},
want: "task_123_example_789",
},
{
stbName: "taosd_cluster_info",
tagMap: map[string]string{"cluster_id": "123"},
want: "cluster_123",
},
{
stbName: "taosd_vgroups_info",
tagMap: map[string]string{"cluster_id": "123", "vgroup_id": "456", "database_name": "db"},
want: "vginfo_db_vgroup_456_cluster_123",
},
{
stbName: "taosd_dnodes_info",
tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123"},
want: "dinfo_123_cluster_123",
},
{
stbName: "taosd_dnodes_status",
tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123"},
want: "dstatus_123_cluster_123",
},
{
stbName: "taosd_dnodes_log_dirs",
tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "log"},
want: "dlog_123_log_cluster_123",
},
{
stbName: "taosd_dnodes_log_dirs",
tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "loglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglog"},
want: "dlog_123_9cdc719961a632a27603cd5ed9f1aee2_cluster_123",
},
{
stbName: "taosd_dnodes_data_dirs",
tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "data", "data_dir_level": "5"},
want: "ddata_123_data_level_5_cluster_123",
},
{
stbName: "taosd_dnodes_data_dirs",
tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "datadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadata", "data_dir_level": "5"},
want: "ddata_123_03bf8dffdf6b97e08f347c6ae795998b_level_5_cluster_123",
},
{
stbName: "taosd_mnodes_info",
tagMap: map[string]string{"cluster_id": "123", "mnode_id": "12"},
want: "minfo_12_cluster_123",
},
{
stbName: "taosd_vnodes_info",
tagMap: map[string]string{"cluster_id": "123", "database_name": "db", "vgroup_id": "456", "dnode_id": "789"},
want: "vninfo_db_dnode_789_vgroup_456_cluster_123",
},
{
stbName: "taosd_sql_req",
tagMap: map[string]string{"username": "user", "sql_type": "select", "result": "success", "dnode_id": "123", "vgroup_id": "456", "cluster_id": "123"},
want: "taosdsql_user_select_success_123_vgroup_456_cluster_123",
},
{
stbName: "taos_sql_req",
tagMap: map[string]string{"username": "user", "sql_type": "select", "result": "success", "cluster_id": "123"},
want: "taossql_user_select_success_cluster_123",
},
{
stbName: "taos_slow_sql",
tagMap: map[string]string{"username": "user", "duration": "100ms", "result": "success", "cluster_id": "123"},
want: "slowsql_user_100ms_success_cluster_123",
},
}
for _, tt := range tests {
t.Run(tt.stbName, func(t *testing.T) {
if got := get_sub_table_name_valid(tt.stbName, tt.tagMap); got != tt.want {
panic(fmt.Sprintf("get_sub_table_name() = %v, want %v", got, tt.want))
}
})
}
}

View File

@ -0,0 +1,127 @@
package api
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
crand "crypto/rand"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"log"
"math/big"
"net/http"
"net/http/httputil"
"net/url"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/taosdata/taoskeeper/db"
"github.com/taosdata/taoskeeper/util"
)
func TestHttps(t *testing.T) {
server := startProxy()
defer server.Shutdown(context.Background())
cfg := util.GetCfg()
cfg.TDengine.Usessl = true
cfg.TDengine.Port = 34443
CreateDatabase(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.TDengine.Usessl, cfg.Metrics.Database.Name, cfg.Metrics.Database.Options)
conn, err := db.NewConnectorWithDb(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.Metrics.Database.Name, cfg.TDengine.Usessl)
assert.NoError(t, err)
defer func() {
_, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", cfg.Metrics.Database.Name), util.GetQidOwn())
}()
data, err := conn.Query(context.Background(), "select server_version()", util.GetQidOwn())
assert.NoError(t, err)
assert.Equal(t, 1, len(data.Data))
}
func generateSelfSignedCert() (tls.Certificate, error) {
priv, err := ecdsa.GenerateKey(elliptic.P384(), crand.Reader)
if err != nil {
return tls.Certificate{}, err
}
notBefore := time.Now()
notAfter := notBefore.Add(365 * 24 * time.Hour)
serialNumber, err := crand.Int(crand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
if err != nil {
return tls.Certificate{}, err
}
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
Organization: []string{"Your Company"},
},
NotBefore: notBefore,
NotAfter: notAfter,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
certDER, err := x509.CreateCertificate(crand.Reader, &template, &template, &priv.PublicKey, priv)
if err != nil {
return tls.Certificate{}, err
}
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER})
keyPEM, err := x509.MarshalECPrivateKey(priv)
if err != nil {
return tls.Certificate{}, err
}
keyPEMBlock := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: keyPEM})
return tls.X509KeyPair(certPEM, keyPEMBlock)
}
func startProxy() *http.Server {
// Generate self-signed certificate
cert, err := generateSelfSignedCert()
if err != nil {
log.Fatalf("Failed to generate self-signed certificate: %v", err)
}
target := "http://127.0.0.1:6041"
proxyURL, err := url.Parse(target)
if err != nil {
log.Fatalf("Failed to parse target URL: %v", err)
}
proxy := httputil.NewSingleHostReverseProxy(proxyURL)
proxy.ErrorHandler = func(w http.ResponseWriter, r *http.Request, e error) {
http.Error(w, "Proxy error", http.StatusBadGateway)
}
mux := http.NewServeMux()
mux.Handle("/", proxy)
server := &http.Server{
Addr: ":34443",
Handler: mux,
TLSConfig: &tls.Config{Certificates: []tls.Certificate{cert}},
// Setup server timeouts for better handling of idle connections and slowloris attacks
WriteTimeout: 10 * time.Second,
ReadTimeout: 10 * time.Second,
IdleTimeout: 30 * time.Second,
}
log.Println("Starting server on :34443")
go func() {
err = server.ListenAndServeTLS("", "")
if err != nil && err != http.ErrServerClosed {
log.Fatalf("Failed to start HTTPS server: %v", err)
}
}()
return server
}

View File

@ -0,0 +1,32 @@
package api
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/taosdata/taoskeeper/process"
)
type NodeExporter struct {
processor *process.Processor
}
func NewNodeExporter(processor *process.Processor) *NodeExporter {
return &NodeExporter{processor: processor}
}
func (z *NodeExporter) Init(c gin.IRouter) {
reg := prometheus.NewPedanticRegistry()
reg.MustRegister(z.processor)
c.GET("metrics", z.myMiddleware(promhttp.HandlerFor(reg, promhttp.HandlerOpts{})))
}
func (z *NodeExporter) myMiddleware(next http.Handler) gin.HandlerFunc {
return func(c *gin.Context) {
z.processor.Process()
// call Prometheus handler
next.ServeHTTP(c.Writer, c.Request)
}
}

478
tools/keeper/api/report.go Normal file
View File

@ -0,0 +1,478 @@
package api
import (
"bytes"
"context"
"fmt"
"strconv"
"strings"
"sync/atomic"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
"github.com/taosdata/go-utils/json"
"github.com/taosdata/taoskeeper/db"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/infrastructure/log"
"github.com/taosdata/taoskeeper/util"
)
var logger = log.GetLogger("REP")
var createList = []string{
// CreateClusterInfoSql,
// CreateDnodeSql,
// CreateMnodeSql,
// CreateDnodeInfoSql,
// CreateDataDirSql,
// CreateLogDirSql,
// CreateTempDirSql,
// CreateVgroupsInfoSql,
// CreateVnodeRoleSql,
// CreateSummarySql,
// CreateGrantInfoSql,
CreateKeeperSql,
}
type Reporter struct {
username string
password string
host string
port int
usessl bool
dbname string
databaseOptions map[string]interface{}
totalRep atomic.Value
}
func NewReporter(conf *config.Config) *Reporter {
r := &Reporter{
username: conf.TDengine.Username,
password: conf.TDengine.Password,
host: conf.TDengine.Host,
port: conf.TDengine.Port,
usessl: conf.TDengine.Usessl,
dbname: conf.Metrics.Database.Name,
databaseOptions: conf.Metrics.Database.Options,
}
r.totalRep.Store(0)
return r
}
func (r *Reporter) Init(c gin.IRouter) {
c.POST("report", r.handlerFunc())
r.createDatabase()
r.creatTables()
// todo: it can delete in the future.
if r.shouldDetectFields() {
r.detectGrantInfoFieldType()
r.detectClusterInfoFieldType()
r.detectVgroupsInfoType()
}
}
func (r *Reporter) getConn() *db.Connector {
conn, err := db.NewConnector(r.username, r.password, r.host, r.port, r.usessl)
if err != nil {
qid := util.GetQidOwn()
logger := logger.WithFields(
logrus.Fields{config.ReqIDKey: qid},
)
logger.Errorf("connect to database error, msg:%s", err)
panic(err)
}
return conn
}
func (r *Reporter) detectGrantInfoFieldType() {
// `expire_time` `timeseries_used` `timeseries_total` in table `grant_info` changed to bigint from TS-3003.
ctx := context.Background()
conn := r.getConn()
defer r.closeConn(conn)
r.detectFieldType(ctx, conn, "grants_info", "expire_time", "bigint")
r.detectFieldType(ctx, conn, "grants_info", "timeseries_used", "bigint")
r.detectFieldType(ctx, conn, "grants_info", "timeseries_total", "bigint")
if r.tagExist(ctx, conn, "grants_info", "dnode_id") {
r.dropTag(ctx, conn, "grants_info", "dnode_id")
}
if r.tagExist(ctx, conn, "grants_info", "dnode_ep") {
r.dropTag(ctx, conn, "grants_info", "dnode_ep")
}
}
func (r *Reporter) detectClusterInfoFieldType() {
// `tbs_total` in table `cluster_info` changed to bigint from TS-3003.
ctx := context.Background()
conn := r.getConn()
defer r.closeConn(conn)
r.detectFieldType(ctx, conn, "cluster_info", "tbs_total", "bigint")
// add column `topics_total` and `streams_total` from TD-22032
// if exists, _ := r.columnInfo(ctx, conn, "cluster_info", "topics_total"); !exists {
// logger.Warningf("## %s.cluster_info.topics_total not exists, will add it", r.dbname)
// r.addColumn(ctx, conn, "cluster_info", "topics_total", "int")
// }
// if exists, _ := r.columnInfo(ctx, conn, "cluster_info", "streams_total"); !exists {
// logger.Warningf("## %s.cluster_info.streams_total not exists, will add it", r.dbname)
// r.addColumn(ctx, conn, "cluster_info", "streams_total", "int")
// }
}
func (r *Reporter) detectVgroupsInfoType() {
// `tables_num` in table `vgroups_info` changed to bigint from TS-3003.
ctx := context.Background()
conn := r.getConn()
defer r.closeConn(conn)
r.detectFieldType(ctx, conn, "vgroups_info", "tables_num", "bigint")
}
func (r *Reporter) detectFieldType(ctx context.Context, conn *db.Connector, table, field, fieldType string) {
_, colType := r.columnInfo(ctx, conn, table, field)
if colType == "INT" {
logger.Warningf("%s.%s.%s type is %s, will change to %s", r.dbname, table, field, colType, fieldType)
// drop column `tables_num`
r.dropColumn(ctx, conn, table, field)
// add column `tables_num`
r.addColumn(ctx, conn, table, field, fieldType)
}
}
func (r *Reporter) shouldDetectFields() bool {
ctx := context.Background()
conn := r.getConn()
defer r.closeConn(conn)
version, err := r.serverVersion(ctx, conn)
if err != nil {
logger.Errorf("get server version error:%s", err)
return false
}
// if server version is less than v3.0.3.0, should not detect fields.
versions := strings.Split(version, ".")
if len(versions) < 4 {
logger.Errorf("get server version error. version:%s", version)
return false
}
v1, _ := strconv.Atoi(versions[0])
v2, _ := strconv.Atoi(versions[1])
v3, _ := strconv.Atoi(versions[2])
if v1 > 3 || v2 > 0 || v3 >= 3 {
return true
}
return false
}
func (r *Reporter) serverVersion(ctx context.Context, conn *db.Connector) (version string, err error) {
res, err := conn.Query(ctx, "select server_version()", util.GetQidOwn())
if err != nil {
logger.Errorf("get server version error, msg:%s", err)
return
}
if len(res.Data) == 0 {
logger.Errorf("get server version error. response:%+v", res)
return
}
if len(res.Data) != 1 && len(res.Data[0]) != 1 {
logger.Errorf("get server version error. response:%+v", res)
return
}
version = res.Data[0][0].(string)
return
}
func (r *Reporter) columnInfo(ctx context.Context, conn *db.Connector, table string, field string) (exists bool, colType string) {
res, err := conn.Query(ctx, fmt.Sprintf("select col_type from information_schema.ins_columns where table_name='%s' and db_name='%s' and col_name='%s'", table, r.dbname, field), util.GetQidOwn())
if err != nil {
logger.Errorf("get %s field type error, msg:%s", r.dbname, err)
panic(err)
}
if len(res.Data) == 0 {
return
}
if len(res.Data) != 1 && len(res.Data[0]) != 1 {
logger.Errorf("get field type for %s error. response:%+v", table, res)
panic(fmt.Sprintf("get field type for %s error. response:%+v", table, res))
}
exists = true
colType = res.Data[0][0].(string)
colType = strings.ToUpper(colType)
return
}
func (r *Reporter) tagExist(ctx context.Context, conn *db.Connector, stable string, tag string) (exists bool) {
res, err := conn.Query(ctx, fmt.Sprintf("select tag_name from information_schema.ins_tags where stable_name='%s' and db_name='%s' and tag_name='%s'", stable, r.dbname, tag), util.GetQidOwn())
if err != nil {
logger.Errorf("get %s tag_name error, msg:%s", r.dbname, err)
panic(err)
}
if len(res.Data) == 0 {
exists = false
return
}
if len(res.Data) != 1 && len(res.Data[0]) != 1 {
logger.Errorf("get tag_name for %s error. response:%+v", stable, res)
panic(fmt.Sprintf("get tag_name for %s error. response:%+v", stable, res))
}
exists = true
return
}
func (r *Reporter) dropColumn(ctx context.Context, conn *db.Connector, table string, field string) {
if _, err := conn.Exec(ctx, fmt.Sprintf("alter table %s.%s drop column %s", r.dbname, table, field), util.GetQidOwn()); err != nil {
logger.Errorf("drop column %s from table %s error, msg:%s", field, table, err)
panic(err)
}
}
func (r *Reporter) dropTag(ctx context.Context, conn *db.Connector, stable string, tag string) {
if _, err := conn.Exec(ctx, fmt.Sprintf("alter stable %s.%s drop tag %s", r.dbname, stable, tag), util.GetQidOwn()); err != nil {
logger.Errorf("drop tag %s from stable %s error, msg:%s", tag, stable, err)
panic(err)
}
}
func (r *Reporter) addColumn(ctx context.Context, conn *db.Connector, table string, field string, fieldType string) {
if _, err := conn.Exec(ctx, fmt.Sprintf("alter table %s.%s add column %s %s", r.dbname, table, field, fieldType), util.GetQidOwn()); err != nil {
logger.Errorf("add column %s to table %s error, msg:%s", field, table, err)
panic(err)
}
}
func (r *Reporter) createDatabase() {
ctx := context.Background()
conn := r.getConn()
defer r.closeConn(conn)
createDBSql := r.generateCreateDBSql()
logger.Warningf("create database sql: %s", createDBSql)
if _, err := conn.Exec(ctx, createDBSql, util.GetQidOwn()); err != nil {
logger.Errorf("create database %s error, msg:%v", r.dbname, err)
panic(err)
}
}
func (r *Reporter) generateCreateDBSql() string {
var buf bytes.Buffer
buf.WriteString("create database if not exists ")
buf.WriteString(r.dbname)
for k, v := range r.databaseOptions {
buf.WriteString(" ")
buf.WriteString(k)
switch v := v.(type) {
case string:
buf.WriteString(fmt.Sprintf(" '%s'", v))
default:
buf.WriteString(fmt.Sprintf(" %v", v))
}
buf.WriteString(" ")
}
return buf.String()
}
func (r *Reporter) creatTables() {
ctx := context.Background()
conn, err := db.NewConnectorWithDb(r.username, r.password, r.host, r.port, r.dbname, r.usessl)
if err != nil {
logger.Errorf("connect to database error, msg:%s", err)
return
}
defer r.closeConn(conn)
for _, createSql := range createList {
logger.Infof("execute sql:%s", createSql)
if _, err = conn.Exec(ctx, createSql, util.GetQidOwn()); err != nil {
logger.Errorf("execute sql:%s, error:%s", createSql, err)
}
}
}
func (r *Reporter) closeConn(conn *db.Connector) {
if err := conn.Close(); err != nil {
logger.Errorf("close connection error, msg:%s", err)
}
}
func (r *Reporter) handlerFunc() gin.HandlerFunc {
return func(c *gin.Context) {
qid := util.GetQid(c.GetHeader("X-QID"))
logger := logger.WithFields(
logrus.Fields{config.ReqIDKey: qid},
)
r.recordTotalRep()
// data parse
data, err := c.GetRawData()
if err != nil {
logger.Errorf("receiving taosd data error, msg:%s", err)
return
}
var report Report
logger.Tracef("report data:%s", string(data))
if e := json.Unmarshal(data, &report); e != nil {
logger.Errorf("error occurred while unmarshal request, data:%s, error:%s", data, err)
return
}
var sqls []string
if report.ClusterInfo != nil {
sqls = append(sqls, insertClusterInfoSql(*report.ClusterInfo, report.ClusterID, report.Protocol, report.Ts)...)
}
sqls = append(sqls, insertDnodeSql(report.DnodeInfo, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts))
if report.GrantInfo != nil {
sqls = append(sqls, insertGrantSql(*report.GrantInfo, report.DnodeID, report.ClusterID, report.Ts))
}
sqls = append(sqls, insertDataDirSql(report.DiskInfos, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts)...)
for _, group := range report.VgroupInfos {
sqls = append(sqls, insertVgroupSql(group, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts)...)
}
sqls = append(sqls, insertLogSummary(report.LogInfos, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts))
conn, err := db.NewConnectorWithDb(r.username, r.password, r.host, r.port, r.dbname, r.usessl)
if err != nil {
logger.Errorf("connect to database error, msg:%s", err)
return
}
defer r.closeConn(conn)
ctx := context.Background()
for _, sql := range sqls {
logger.Tracef("execute sql:%s", sql)
if _, err := conn.Exec(ctx, sql, util.GetQidOwn()); err != nil {
logger.Errorf("execute sql error, sql:%s, error:%s", sql, err)
}
}
}
}
func (r *Reporter) recordTotalRep() {
old := r.totalRep.Load().(int)
for i := 0; i < 3; i++ {
r.totalRep.CompareAndSwap(old, old+1)
}
}
func (r *Reporter) GetTotalRep() *atomic.Value {
return &r.totalRep
}
func insertClusterInfoSql(info ClusterInfo, ClusterID string, protocol int, ts string) []string {
var sqls []string
var dtotal, dalive, mtotal, malive int
for _, dnode := range info.Dnodes {
sqls = append(sqls, fmt.Sprintf("insert into d_info_%s using d_info tags (%d, '%s', '%s') values ('%s', '%s')",
ClusterID+strconv.Itoa(dnode.DnodeID), dnode.DnodeID, dnode.DnodeEp, ClusterID, ts, dnode.Status))
dtotal++
if "ready" == dnode.Status {
dalive++
}
}
for _, mnode := range info.Mnodes {
sqls = append(sqls, fmt.Sprintf("insert into m_info_%s using m_info tags (%d, '%s', '%s') values ('%s', '%s')",
ClusterID+strconv.Itoa(mnode.MnodeID), mnode.MnodeID, mnode.MnodeEp, ClusterID, ts, mnode.Role))
mtotal++
//LEADER FOLLOWER CANDIDATE ERROR
if "ERROR" != mnode.Role {
malive++
}
}
sqls = append(sqls, fmt.Sprintf(
"insert into cluster_info_%s using cluster_info tags('%s') (ts, first_ep, first_ep_dnode_id, version, "+
"master_uptime, monitor_interval, dbs_total, tbs_total, stbs_total, dnodes_total, dnodes_alive, "+
"mnodes_total, mnodes_alive, vgroups_total, vgroups_alive, vnodes_total, vnodes_alive, connections_total, "+
"topics_total, streams_total, protocol) values ('%s', '%s', %d, '%s', %f, %d, %d, %d, %d, %d, %d, %d, %d, "+
"%d, %d, %d, %d, %d, %d, %d, %d)",
ClusterID, ClusterID, ts, info.FirstEp, info.FirstEpDnodeID, info.Version, info.MasterUptime, info.MonitorInterval,
info.DbsTotal, info.TbsTotal, info.StbsTotal, dtotal, dalive, mtotal, malive, info.VgroupsTotal, info.VgroupsAlive,
info.VnodesTotal, info.VnodesAlive, info.ConnectionsTotal, info.TopicsTotal, info.StreamsTotal, protocol))
return sqls
}
func insertDnodeSql(info DnodeInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) string {
return fmt.Sprintf("insert into dnode_info_%s using dnodes_info tags (%d, '%s', '%s') values ('%s', %f, %f, %f, %f, %d, %d, %d, %d, %d, %d, %f, %f, %f, %f, %f, %f, %d, %f, %d, %d, %f, %d, %d, %f, %d, %d, %d, %d, %d, %d, %d)",
ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID,
ts, info.Uptime, info.CPUEngine, info.CPUSystem, info.CPUCores, info.MemEngine, info.MemSystem, info.MemTotal,
info.DiskEngine, info.DiskUsed, info.DiskTotal, info.NetIn, info.NetOut, info.IoRead, info.IoWrite,
info.IoReadDisk, info.IoWriteDisk, info.ReqSelect, info.ReqSelectRate, info.ReqInsert, info.ReqInsertSuccess,
info.ReqInsertRate, info.ReqInsertBatch, info.ReqInsertBatchSuccess, info.ReqInsertBatchRate, info.Errors,
info.VnodesNum, info.Masters, info.HasMnode, info.HasQnode, info.HasSnode, info.HasBnode)
}
func insertDataDirSql(disk DiskInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) []string {
var sqls []string
for _, data := range disk.Datadir {
sqls = append(sqls,
fmt.Sprintf("insert into data_dir_%s using data_dir tags (%d, '%s', '%s') values ('%s', '%s', %d, %d, %d, %d)",
ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID,
ts, data.Name, data.Level, data.Avail.IntPart(), data.Used.IntPart(), data.Total.IntPart()),
)
}
sqls = append(sqls,
fmt.Sprintf("insert into log_dir_%s using log_dir tags (%d, '%s', '%s') values ('%s', '%s', %d, %d, %d)",
ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID,
ts, disk.Logdir.Name, disk.Logdir.Avail.IntPart(), disk.Logdir.Used.IntPart(), disk.Logdir.Total.IntPart()),
fmt.Sprintf("insert into temp_dir_%s using temp_dir tags (%d, '%s', '%s') values ('%s', '%s', %d, %d, %d)",
ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID,
ts, disk.Tempdir.Name, disk.Tempdir.Avail.IntPart(), disk.Tempdir.Used.IntPart(), disk.Tempdir.Total.IntPart()),
)
return sqls
}
func insertVgroupSql(g VgroupInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) []string {
var sqls []string
sqls = append(sqls, fmt.Sprintf("insert into vgroups_info_%s using vgroups_info tags (%d, '%s', '%s') "+
"(ts, vgroup_id, database_name, tables_num, status, ) values ( '%s','%d', '%s', %d, '%s')",
ClusterID+strconv.Itoa(DnodeID)+strconv.Itoa(g.VgroupID), DnodeID, DnodeEp, ClusterID,
ts, g.VgroupID, g.DatabaseName, g.TablesNum, g.Status))
for _, v := range g.Vnodes {
sqls = append(sqls, fmt.Sprintf("insert into vnodes_role_%s using vnodes_role tags (%d, '%s', '%s') values ('%s', '%s')",
ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, ts, v.VnodeRole))
}
return sqls
}
func insertLogSummary(log LogInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) string {
var e, info, debug, trace int
for _, s := range log.Summary {
switch s.Level {
case "error":
e = s.Total
case "info":
info = s.Total
case "debug":
debug = s.Total
case "trace":
trace = s.Total
}
}
return fmt.Sprintf("insert into log_summary_%s using log_summary tags (%d, '%s', '%s') values ('%s', %d, %d, %d, %d)",
ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, ts, e, info, debug, trace)
}
func insertGrantSql(g GrantInfo, DnodeID int, ClusterID string, ts string) string {
return fmt.Sprintf("insert into grants_info_%s using grants_info tags ('%s') (ts, expire_time, "+
"timeseries_used, timeseries_total) values ('%s', %d, %d, %d)", ClusterID+strconv.Itoa(DnodeID), ClusterID, ts, g.ExpireTime, g.TimeseriesUsed, g.TimeseriesTotal)
}

286
tools/keeper/api/tables.go Normal file
View File

@ -0,0 +1,286 @@
package api
import (
"strconv"
"github.com/shopspring/decimal"
)
type Report struct {
Ts string `json:"ts"`
DnodeID int `json:"dnode_id"`
DnodeEp string `json:"dnode_ep"`
ClusterID string `json:"cluster_id"`
Protocol int `json:"protocol"`
ClusterInfo *ClusterInfo `json:"cluster_info"` // only reported by master
StbInfos []StbInfo `json:"stb_infos"`
VgroupInfos []VgroupInfo `json:"vgroup_infos"` // only reported by master
GrantInfo *GrantInfo `json:"grant_info"` // only reported by master
DnodeInfo DnodeInfo `json:"dnode_info"`
DiskInfos DiskInfo `json:"disk_infos"`
LogInfos LogInfo `json:"log_infos"`
}
type ClusterInfo struct {
FirstEp string `json:"first_ep"`
FirstEpDnodeID int `json:"first_ep_dnode_id"`
Version string `json:"version"`
MasterUptime float32 `json:"master_uptime"`
MonitorInterval int `json:"monitor_interval"`
DbsTotal int `json:"dbs_total"`
TbsTotal int64 `json:"tbs_total"` // change to bigint since TS-3003
StbsTotal int `json:"stbs_total"`
VgroupsTotal int `json:"vgroups_total"`
VgroupsAlive int `json:"vgroups_alive"`
VnodesTotal int `json:"vnodes_total"`
VnodesAlive int `json:"vnodes_alive"`
ConnectionsTotal int `json:"connections_total"`
TopicsTotal int `json:"topics_total"`
StreamsTotal int `json:"streams_total"`
Dnodes []Dnode `json:"dnodes"`
Mnodes []Mnode `json:"mnodes"`
}
var dnodeEpLen = strconv.Itoa(255)
var CreateClusterInfoSql = "create table if not exists cluster_info (" +
"ts timestamp, " +
"first_ep binary(134), " +
"first_ep_dnode_id int, " +
"version binary(12), " +
"master_uptime float, " +
"monitor_interval int, " +
"dbs_total int, " +
"tbs_total bigint, " + // change to bigint since TS-3003
"stbs_total int, " +
"dnodes_total int, " +
"dnodes_alive int, " +
"mnodes_total int, " +
"mnodes_alive int, " +
"vgroups_total int, " +
"vgroups_alive int, " +
"vnodes_total int, " +
"vnodes_alive int, " +
"connections_total int, " +
"topics_total int, " +
"streams_total int, " +
"protocol int " +
") tags (cluster_id nchar(32))"
type Dnode struct {
DnodeID int `json:"dnode_id"`
DnodeEp string `json:"dnode_ep"`
Status string `json:"status"`
}
var CreateDnodeSql = "create table if not exists d_info (" +
"ts timestamp, " +
"status binary(10)" +
") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))"
type Mnode struct {
MnodeID int `json:"mnode_id"`
MnodeEp string `json:"mnode_ep"`
Role string `json:"role"`
}
var CreateMnodeSql = "create table if not exists m_info (" +
"ts timestamp, " +
"role binary(10)" +
") tags (mnode_id int, mnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))"
type DnodeInfo struct {
Uptime float32 `json:"uptime"`
CPUEngine float32 `json:"cpu_engine"`
CPUSystem float32 `json:"cpu_system"`
CPUCores float32 `json:"cpu_cores"`
MemEngine int `json:"mem_engine"`
MemSystem int `json:"mem_system"`
MemTotal int `json:"mem_total"`
DiskEngine int64 `json:"disk_engine"`
DiskUsed int64 `json:"disk_used"`
DiskTotal int64 `json:"disk_total"`
NetIn float32 `json:"net_in"`
NetOut float32 `json:"net_out"`
IoRead float32 `json:"io_read"`
IoWrite float32 `json:"io_write"`
IoReadDisk float32 `json:"io_read_disk"`
IoWriteDisk float32 `json:"io_write_disk"`
ReqSelect int `json:"req_select"`
ReqSelectRate float32 `json:"req_select_rate"`
ReqInsert int `json:"req_insert"`
ReqInsertSuccess int `json:"req_insert_success"`
ReqInsertRate float32 `json:"req_insert_rate"`
ReqInsertBatch int `json:"req_insert_batch"`
ReqInsertBatchSuccess int `json:"req_insert_batch_success"`
ReqInsertBatchRate float32 `json:"req_insert_batch_rate"`
Errors int `json:"errors"`
VnodesNum int `json:"vnodes_num"`
Masters int `json:"masters"`
HasMnode int8 `json:"has_mnode"`
HasQnode int8 `json:"has_qnode"`
HasSnode int8 `json:"has_snode"`
HasBnode int8 `json:"has_bnode"`
}
var CreateDnodeInfoSql = "create table if not exists dnodes_info (" +
"ts timestamp, " +
"uptime float, " +
"cpu_engine float, " +
"cpu_system float, " +
"cpu_cores float, " +
"mem_engine int, " +
"mem_system int, " +
"mem_total int, " +
"disk_engine bigint, " +
"disk_used bigint, " +
"disk_total bigint, " +
"net_in float, " +
"net_out float, " +
"io_read float, " +
"io_write float, " +
"io_read_disk float, " +
"io_write_disk float, " +
"req_select int, " +
"req_select_rate float, " +
"req_insert int, " +
"req_insert_success int, " +
"req_insert_rate float, " +
"req_insert_batch int, " +
"req_insert_batch_success int, " +
"req_insert_batch_rate float, " +
"errors int, " +
"vnodes_num int, " +
"masters int, " +
"has_mnode int, " +
"has_qnode int, " +
"has_snode int, " +
"has_bnode int " +
") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))"
type DiskInfo struct {
Datadir []DataDir `json:"datadir"`
Logdir LogDir `json:"logdir"`
Tempdir TempDir `json:"tempdir"`
}
type DataDir struct {
Name string `json:"name"`
Level int `json:"level"`
Avail decimal.Decimal `json:"avail"`
Used decimal.Decimal `json:"used"`
Total decimal.Decimal `json:"total"`
}
var CreateDataDirSql = "create table if not exists data_dir (" +
"ts timestamp, " +
"name nchar(200), " +
"`level` int, " +
"avail bigint, " +
"used bigint, " +
"total bigint" +
") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))"
type LogDir struct {
Name string `json:"name"`
Avail decimal.Decimal `json:"avail"`
Used decimal.Decimal `json:"used"`
Total decimal.Decimal `json:"total"`
}
var CreateLogDirSql = "create table if not exists log_dir (" +
"ts timestamp, " +
"name nchar(200), " +
"avail bigint, " +
"used bigint, " +
"total bigint" +
") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))"
type TempDir struct {
Name string `json:"name"`
Avail decimal.Decimal `json:"avail"`
Used decimal.Decimal `json:"used"`
Total decimal.Decimal `json:"total"`
}
var CreateTempDirSql = "create table if not exists temp_dir(" +
"ts timestamp, " +
"name nchar(200), " +
"avail bigint, " +
"used bigint, " +
"total bigint " +
") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))"
type StbInfo struct {
StbName string `json:"stb_name"`
DataBaseName string `json:"database_name"`
}
type VgroupInfo struct {
VgroupID int `json:"vgroup_id"`
DatabaseName string `json:"database_name"`
TablesNum int64 `json:"tables_num"`
Status string `json:"status"`
Vnodes []Vnode `json:"vnodes"`
}
var CreateVgroupsInfoSql = "create table if not exists vgroups_info (" +
"ts timestamp, " +
"vgroup_id int, " +
"database_name binary(33), " +
"tables_num bigint, " + // change to bigint since TS-3003
"status binary(512) " +
") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))"
type Vnode struct {
DnodeID int `json:"dnode_id"`
VnodeRole string `json:"vnode_role"`
}
var CreateVnodeRoleSql = "create table if not exists vnodes_role (" +
"ts timestamp, " +
"vnode_role binary(10) " +
") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))"
type LogInfo struct {
Summary []Summary `json:"summary"`
}
type Log struct {
Ts string `json:"ts"`
Level string `json:"level"`
Content string `json:"content"`
}
type Summary struct {
Level string `json:"level"`
Total int `json:"total"`
}
var CreateSummarySql = "create table if not exists log_summary(" +
"ts timestamp, " +
"error int, " +
"info int, " +
"debug int, " +
"trace int " +
") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))"
type GrantInfo struct {
ExpireTime int64 `json:"expire_time"`
TimeseriesUsed int64 `json:"timeseries_used"`
TimeseriesTotal int64 `json:"timeseries_total"`
}
var CreateGrantInfoSql = "create table if not exists grants_info(" +
"ts timestamp, " +
"expire_time bigint, " +
"timeseries_used bigint, " +
"timeseries_total bigint " +
") tags (cluster_id nchar(32))"
var CreateKeeperSql = "create table if not exists keeper_monitor (" +
"ts timestamp, " +
"cpu float, " +
"mem float, " +
"total_reports int " +
") tags (identify nchar(50))"

113
tools/keeper/api/zabbix.go Normal file
View File

@ -0,0 +1,113 @@
package api
import (
"net/http"
"sort"
"strings"
"github.com/gin-gonic/gin"
"github.com/taosdata/taoskeeper/process"
"github.com/taosdata/taoskeeper/util/pool"
)
type Zabbix struct {
processor *process.Processor
floatGroup []*process.Metric
strGroup []*process.Metric
}
func NewZabbix(processor *process.Processor) *Zabbix {
z := &Zabbix{processor: processor}
z.processorMetrics()
return z
}
type zabbixMetric struct {
Data []*ZMetric `json:"data"`
}
type ZMetric struct {
Metric string `json:"{#METRIC}"`
Key string `json:"key"`
Value interface{} `json:"value"`
}
const (
FloatType = iota + 1
StringType
)
func (z *Zabbix) Init(c gin.IRouter) {
api := c.Group("zabbix")
api.GET("float", z.getFloat)
api.GET("string", z.getString)
}
func (z *Zabbix) getFloat(c *gin.Context) {
z.returnData(c, FloatType)
}
func (z *Zabbix) getString(c *gin.Context) {
z.returnData(c, StringType)
}
func (z *Zabbix) returnData(c *gin.Context, valueType int) {
var metrics []*process.Metric
switch valueType {
case FloatType:
metrics = z.floatGroup
case StringType:
metrics = z.strGroup
}
var d zabbixMetric
b := pool.BytesPoolGet()
defer pool.BytesPoolPut(b)
for _, metric := range metrics {
values := metric.GetValue()
for _, value := range values {
label := z.sortLabel(value.Label)
b.Reset()
b.WriteString(metric.FQName)
if len(label) > 0 {
b.WriteByte(',')
b.WriteString(label)
}
metricName := b.String()
d.Data = append(d.Data, &ZMetric{
Metric: metricName,
Key: metricName,
Value: value.Value,
})
}
}
c.JSON(http.StatusOK, d)
}
func (z *Zabbix) sortLabel(labels map[string]string) string {
if len(labels) == 0 {
return ""
}
result := make([]string, 0, len(labels))
b := pool.BytesPoolGet()
defer pool.BytesPoolPut(b)
for k, v := range labels {
b.Reset()
b.WriteString(k)
b.WriteByte('=')
b.WriteString(v)
result = append(result, b.String())
}
sort.Strings(result)
return strings.Join(result, "_")
}
func (z *Zabbix) processorMetrics() {
metrics := z.processor.GetMetric()
for _, metric := range metrics {
if metric.Type == process.Gauge || metric.Type == process.Counter {
z.floatGroup = append(z.floatGroup, metric)
} else if metric.Type == process.Info {
z.strGroup = append(z.strGroup, metric)
}
}
}

461
tools/keeper/cmd/command.go Normal file
View File

@ -0,0 +1,461 @@
package cmd
import (
"bytes"
"context"
"crypto/tls"
"fmt"
"io"
"net"
"net/http"
"net/url"
"strconv"
"sync"
"time"
"github.com/sirupsen/logrus"
"github.com/taosdata/taoskeeper/db"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/infrastructure/log"
"github.com/taosdata/taoskeeper/util"
"github.com/taosdata/taoskeeper/util/pool"
)
var logger = log.GetLogger("CMD")
var MAX_SQL_LEN = 1000000
type Command struct {
fromTime time.Time
client *http.Client
conn *db.Connector
username string
password string
url *url.URL
}
func NewCommand(conf *config.Config) *Command {
client := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
DisableCompression: true,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
}
conn, err := db.NewConnectorWithDb(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.Metrics.Database.Name, conf.TDengine.Usessl)
if err != nil {
logger.Errorf("init db connect error, msg:%s", err)
panic(err)
}
imp := &Command{
client: client,
conn: conn,
username: conf.TDengine.Username,
password: conf.TDengine.Password,
url: &url.URL{
Scheme: "http",
Host: fmt.Sprintf("%s:%d", conf.TDengine.Host, conf.TDengine.Port),
Path: "/influxdb/v1/write",
RawQuery: fmt.Sprintf("db=%s&precision=ms", conf.Metrics.Database.Name),
},
}
return imp
}
func (cmd *Command) Process(conf *config.Config) {
if len(conf.Transfer) > 0 && len(conf.Drop) > 0 {
logger.Errorf("transfer and drop can't be set at the same time")
return
}
if len(conf.Transfer) > 0 && conf.Transfer != "old_taosd_metric" {
logger.Errorf("transfer only support old_taosd_metric")
return
}
if conf.Transfer == "old_taosd_metric" {
cmd.ProcessTransfer(conf)
return
}
if len(conf.Drop) > 0 && conf.Drop != "old_taosd_metric_stables" {
logger.Errorf("drop only support old_taosd_metric_stables")
return
}
if conf.Drop == "old_taosd_metric_stables" {
cmd.ProcessDrop(conf)
return
}
}
func (cmd *Command) ProcessTransfer(conf *config.Config) {
fromTime, err := time.Parse("2006-01-02T15:04:05Z07:00", conf.FromTime)
if err != nil {
logger.Errorf("parse fromTime error, msg:%s", err)
return
}
cmd.fromTime = fromTime
funcs := []func() error{
cmd.TransferTaosdClusterBasicInfo,
cmd.TransferTaosdClusterInfo,
cmd.TransferTaosdVgroupsInfo,
cmd.TransferTaosdDnodesInfo,
cmd.TransferTaosdDnodesStatus,
cmd.TransferTaosdDnodesLogDirs1,
cmd.TransferTaosdDnodesLogDirs2,
cmd.TransferTaosdDnodesDataDirs,
cmd.TransferTaosdMnodesInfo,
cmd.TransferTaosdVnodesInfo,
}
wg := sync.WaitGroup{}
wg.Add(len(funcs))
for i := range funcs {
index := i
err := pool.GoroutinePool.Submit(func() {
defer wg.Done()
funcs[index]()
})
if err != nil {
panic(err)
}
}
wg.Wait()
logger.Info("transfer all old taosd metric success!!")
}
func (cmd *Command) TransferTaosdClusterInfo() error {
sql := "select a.cluster_id, master_uptime * 3600 * 24 as cluster_uptime, dbs_total, tbs_total, stbs_total, dnodes_total, dnodes_alive, mnodes_total, mnodes_alive, vgroups_total, vgroups_alive, vnodes_total, vnodes_alive, connections_total, topics_total, streams_total, b.expire_time as grants_expire_time, b.timeseries_used as grants_timeseries_used, b.timeseries_total as grants_timeseries_total, a.ts from cluster_info a, grants_info b where a.ts = b.ts and a.cluster_id = b.cluster_id and"
dstTable := "taosd_cluster_info"
return cmd.TransferTableToDst(sql, dstTable, 1)
}
func (cmd *Command) TransferTaosdVgroupsInfo() error {
sql := "select cluster_id, vgroup_id, database_name, tables_num, CASE status WHEN 'ready' THEN 1 ELSE 0 END as status, ts from vgroups_info a where "
dstTable := "taosd_vgroups_info"
return cmd.TransferTableToDst(sql, dstTable, 3)
}
func (cmd *Command) TransferTaosdDnodesInfo() error {
sql := "select a.cluster_id, a.dnode_id, a.dnode_ep, uptime * 3600 * 24 as uptime, cpu_engine, cpu_system, cpu_cores, mem_engine, mem_system as mem_free, mem_total, disk_used, disk_total, disk_engine, net_in as system_net_in, net_out as system_net_out, io_read, io_write, io_read_disk, io_write_disk, vnodes_num, masters, has_mnode, has_qnode, has_snode, has_bnode, errors, b.error as error_log_count, b.info as info_log_count, b.debug as debug_log_count, b.trace as trace_log_count, a.ts as ts from dnodes_info a, log_summary b where a.ts = b.ts and a.dnode_id = b.dnode_id and a. dnode_ep = b.dnode_ep and "
dstTable := "taosd_dnodes_info"
return cmd.TransferTableToDst(sql, dstTable, 3)
}
func (cmd *Command) TransferTaosdDnodesStatus() error {
sql := "select cluster_id, dnode_id, dnode_ep, CASE status WHEN 'ready' THEN 1 ELSE 0 END as status, ts from d_info a where "
dstTable := "taosd_dnodes_status"
return cmd.TransferTableToDst(sql, dstTable, 3)
}
func (cmd *Command) TransferTaosdDnodesLogDirs1() error {
sql := "select cluster_id, dnode_id, dnode_ep, name as log_dir_name, avail, used, total, ts from log_dir a where "
dstTable := "taosd_dnodes_log_dirs"
return cmd.TransferTableToDst(sql, dstTable, 4)
}
func (cmd *Command) TransferTaosdDnodesLogDirs2() error {
sql := "select cluster_id, dnode_id, dnode_ep, name as log_dir_name, avail, used, total, ts from temp_dir a where "
dstTable := "taosd_dnodes_log_dirs"
return cmd.TransferTableToDst(sql, dstTable, 4)
}
func (cmd *Command) TransferTaosdDnodesDataDirs() error {
sql := "select cluster_id, dnode_id, dnode_ep, name as data_dir_name, `level` as data_dir_level, avail, used, total, ts from data_dir a where "
dstTable := "taosd_dnodes_data_dirs"
return cmd.TransferTableToDst(sql, dstTable, 5)
}
func (cmd *Command) TransferTaosdMnodesInfo() error {
sql := "select cluster_id, mnode_id, mnode_ep, CASE role WHEN 'offline' THEN 0 WHEN 'follower' THEN 100 WHEN 'candidate' THEN 101 WHEN 'leader' THEN 102 WHEN 'learner' THEN 104 ELSE 103 END as role, ts from m_info a where "
dstTable := "taosd_mnodes_info"
return cmd.TransferTableToDst(sql, dstTable, 3)
}
func (cmd *Command) TransferTaosdVnodesInfo() error {
sql := "select cluster_id, 0 as vgroup_id, 'UNKNOWN' as database_name, dnode_id, CASE vnode_role WHEN 'offline' THEN 0 WHEN 'follower' THEN 100 WHEN 'candidate' THEN 101 WHEN 'leader' THEN 102 WHEN 'learner' THEN 104 ELSE 103 END as role, ts from vnodes_role a where "
dstTable := "taosd_vnodes_info"
return cmd.TransferTableToDst(sql, dstTable, 4)
}
func (cmd *Command) ProcessDrop(conf *config.Config) {
var dropStableList = []string{
"log_dir",
"dnodes_info",
"data_dir",
"log_summary",
"m_info",
"vnodes_role",
"cluster_info",
"temp_dir",
"grants_info",
"vgroups_info",
"d_info",
"taosadapter_system_cpu_percent",
"taosadapter_restful_http_request_in_flight",
"taosadapter_restful_http_request_summary_milliseconds",
"taosadapter_restful_http_request_fail",
"taosadapter_system_mem_percent",
"taosadapter_restful_http_request_total",
}
ctx := context.Background()
logger.Infof("use database:%s", conf.Metrics.Database.Name)
for _, stable := range dropStableList {
if _, err := cmd.conn.Exec(ctx, "DROP STABLE IF EXISTS "+stable, util.GetQidOwn()); err != nil {
logger.Errorf("drop stable %s, error:%s", stable, err)
panic(err)
}
}
logger.Info("drop old taosd metric stables success!!")
}
func (cmd *Command) TransferDataToDest(data *db.Data, dstTable string, tagNum int) {
var buf bytes.Buffer
if len(data.Data) < 1 {
return
}
for _, row := range data.Data {
// get one row here
buf.WriteString(dstTable)
// write tags
var tag string
for j := 0; j < tagNum; j++ {
switch v := row[j].(type) {
case int:
tag = fmt.Sprint(v)
case int32:
tag = fmt.Sprint(v)
case int64:
tag = fmt.Sprint(v)
case string:
tag = v
default:
panic(fmt.Sprintf("Unexpected type for row[%d]: %T", j, row[j]))
}
if tag != "" {
buf.WriteString(fmt.Sprintf(",%s=%s", data.Head[j], util.EscapeInfluxProtocol(tag)))
} else {
buf.WriteString(fmt.Sprintf(",%s=%s", data.Head[j], "unknown"))
logger.Errorf("tag value is empty, tag_name:%s", data.Head[j])
}
}
buf.WriteString(" ")
// write metrics
for j := tagNum; j < len(row)-1; j++ {
switch v := row[j].(type) {
case int:
buf.WriteString(fmt.Sprintf("%s=%ff64", data.Head[j], float64(v)))
case int32:
buf.WriteString(fmt.Sprintf("%s=%ff64", data.Head[j], float64(v)))
case int64:
buf.WriteString(fmt.Sprintf("%s=%ff64", data.Head[j], float64(v)))
case float32:
buf.WriteString(fmt.Sprintf("%s=%sf64", data.Head[j], strconv.FormatFloat(float64(v), 'f', -1, 64)))
case float64:
buf.WriteString(fmt.Sprintf("%s=%sf64", data.Head[j], strconv.FormatFloat(v, 'f', -1, 64)))
default:
panic(fmt.Sprintf("Unexpected type for row[%d]: %T", j, row[j]))
}
if j != len(row)-2 {
buf.WriteString(",")
}
}
// write timestamp
buf.WriteString(" ")
buf.WriteString(fmt.Sprintf("%v", row[len(row)-1].(time.Time).UnixMilli()))
buf.WriteString("\n")
if buf.Len() >= MAX_SQL_LEN {
if logger.Logger.IsLevelEnabled(logrus.TraceLevel) {
logger.Tracef("buf:%v", buf.String())
}
err := cmd.lineWriteBody(&buf)
if err != nil {
logger.Errorf("insert data error, msg:%s", err)
panic(err)
}
buf.Reset()
}
}
if buf.Len() > 0 {
if logger.Logger.IsLevelEnabled(logrus.TraceLevel) {
logger.Tracef("buf:%v", buf.String())
}
err := cmd.lineWriteBody(&buf)
if err != nil {
logger.Errorf("insert data error, msg:%s", err)
panic(err)
}
}
}
// cluster_info
func (cmd *Command) TransferTaosdClusterBasicInfo() error {
ctx := context.Background()
endTime := time.Now()
delta := time.Hour * 24 * 10
var createTableSql = "create stable if not exists taosd_cluster_basic " +
"(ts timestamp, first_ep varchar(100), first_ep_dnode_id INT, cluster_version varchar(20)) " +
"tags (cluster_id varchar(50))"
if _, err := cmd.conn.Exec(ctx, createTableSql, util.GetQidOwn()); err != nil {
logger.Errorf("create taosd_cluster_basic error, msg:%s", err)
return err
}
logger.Tracef("fromeTime:%d", cmd.fromTime.UnixMilli())
for current := cmd.fromTime; current.Before(endTime); current = current.Add(time.Duration(delta)) {
querySql := fmt.Sprintf("select cluster_id, first_ep, first_ep_dnode_id, `version` as cluster_version, ts from cluster_info where ts > %d and ts <= %d",
current.UnixMilli(), current.Add(time.Duration(delta)).UnixMilli())
logger.Tracef("query sql:%s", querySql)
data, err := cmd.conn.Query(ctx, querySql, util.GetQidOwn())
if err != nil {
logger.Errorf("query cluster_info error, msg:%s", err)
return err
}
// transfer data to new table, only this table need use insert statement
var buf bytes.Buffer
// 使用 map 将二维数组切分为多个二维数组
result := make(map[string][][]interface{})
for _, row := range data.Data {
key := row[0].(string) // 使用第一列的值作为 key
result[key] = append(result[key], row)
}
// 按照不同 tag 来迁移数据
for _, dataByCluster := range result {
buf.Reset()
for _, row := range dataByCluster {
if len(buf.Bytes()) == 0 {
sql := fmt.Sprintf(
"insert into taosd_cluster_basic_%s using taosd_cluster_basic tags ('%s') values ",
row[0].(string), row[0].(string))
buf.WriteString(sql)
}
sql := fmt.Sprintf(
"(%d, '%s', %d, '%s')",
row[4].(time.Time).UnixMilli(), row[1].(string), row[2].(int32), row[3].(string))
buf.WriteString(sql)
if buf.Len() >= MAX_SQL_LEN {
rowsAffected, err := cmd.conn.Exec(context.Background(), buf.String(), util.GetQidOwn())
if err != nil {
logger.Errorf("insert taosd_cluster_basic error, msg:%s", err)
return err
}
if rowsAffected <= 0 {
logger.Errorf("insert taosd_cluster_basic failed, rowsAffected:%d", rowsAffected)
}
buf.Reset()
}
}
if buf.Len() > 0 {
rowsAffected, err := cmd.conn.Exec(context.Background(), buf.String(), util.GetQidOwn())
if err != nil {
logger.Errorf("insert taosd_cluster_basic error, msg:%s", err)
return err
}
if rowsAffected <= 0 {
logger.Errorf("insert taosd_cluster_basic failed, rowsAffected:%d", rowsAffected)
}
}
}
}
logger.Info("transfer stable taosd_cluster_basic success!!")
return nil
}
// cluster_info
func (cmd *Command) TransferTableToDst(sql string, dstTable string, tagNum int) error {
ctx := context.Background()
endTime := time.Now()
delta := time.Hour * 24 * 10
logger.Tracef("fromTime:%d", cmd.fromTime.UnixMilli())
for current := cmd.fromTime; current.Before(endTime); current = current.Add(time.Duration(delta)) {
querySql := fmt.Sprintf(sql+" a.ts > %d and a.ts <= %d",
current.UnixMilli(), current.Add(time.Duration(delta)).UnixMilli())
logger.Tracef("query sql:%s", querySql)
data, err := cmd.conn.Query(ctx, querySql, util.GetQidOwn())
if err != nil {
logger.Errorf("query cluster_info error, msg:%s", err)
return err
}
// transfer data to new table, only this table need use insert statement
cmd.TransferDataToDest(data, dstTable, tagNum)
}
logger.Info("transfer stable " + dstTable + " success!!")
return nil
}
func (cmd *Command) lineWriteBody(buf *bytes.Buffer) error {
header := map[string][]string{
"Connection": {"keep-alive"},
}
req := &http.Request{
Method: http.MethodPost,
URL: cmd.url,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: header,
Host: cmd.url.Host,
}
req.SetBasicAuth(cmd.username, cmd.password)
req.Body = io.NopCloser(buf)
resp, err := cmd.client.Do(req)
if err != nil {
logger.Errorf("writing metrics exception, msg:%s", err)
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusNoContent {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("unexpected status code %d:body:%s", resp.StatusCode, string(body))
}
return nil
}

View File

@ -0,0 +1,8 @@
package cmd
import (
"testing"
)
func TestEmpty(t *testing.T) {
}

View File

@ -0,0 +1,38 @@
prefix = "taos"
cluster = "localhost"
database = "log"
explicit = false
[tables.restful_info]
[tables.slowquery]
[tables.cluster_info]
[tables.grants_info]
[tables.disks_info]
[tables.logs]
explicit = true
[tables.logs.metrics.content]
type = "info"
help = "login types or messages"
[tables.logs.metrics.level]
type = "gauge"
help = "login level"
[tables.dnodes_info]
[tables.dnodes_info.metrics.has_mnode]
type = "gauge"
help = "check if the node has mnode"
[tables.vgroups_info]
column_as_variables = ["database_name", "dnode_roles", "dnode_ids"]
explicit = false
[tables.vgroups_info.metrics.tables_num]
type = "gauge"
help = "Tables count of the vgroup"
[tables.vgroups_info.metrics.online_vnodes]
type = "gauge"
help = "Online v-nodes of the v-group"
[tables.vgroups_info.metrics.status]
type = "info"
help = "Status of the v-group"

View File

@ -0,0 +1,53 @@
instanceId = 64
# Listen port, default is 6043
port = 6043
# go pool size
gopoolsize = 50000
# interval for metrics
RotationInterval = "15s"
[tdengine]
host = "127.0.0.1"
port = 6041
username = "root"
password = "taosdata"
usessl = false
[metrics]
# metrics prefix in metrics names.
prefix = "taos"
# export some tables that are not super table
tables = []
# database for storing metrics data
[metrics.database]
name = "log"
# database options for db storing metrics data
[metrics.database.options]
vgroups = 1
buffer = 64
keep = 90
cachemodel = "both"
[environment]
# Whether running in cgroup.
incgroup = false
[log]
# The directory where log files are stored.
# path = "/var/log/taos"
level = "info"
# Number of log file rotations before deletion.
rotationCount = 30
# The number of days to retain log files.
keepDays = 30
# The maximum size of a log file before rotation.
rotationSize = "1GB"
# If set to true, log files will be compressed.
compress = false
# Minimum disk space to reserve. Log files will not be written if disk space falls below this limit.
reservedDiskSize = "1GB"

View File

@ -0,0 +1,65 @@
instanceId = 64
# Listen port, default is 6043
port = 6043
# go pool size
gopoolsize = 50000
# interval for TDengine metrics
RotationInterval = "15s"
[tdengine]
host = "127.0.0.1"
port = 6041
username = "root"
password = "taosdata"
usessl = false
[metrics]
# metrics prefix in metrics names.
prefix = "taos"
# cluster identifier for multiple TDengine clusters
cluster = ""
# export some tables that are not super table
tables = []
# database for storing metrics data
[metrics.database]
name = "log"
# database options for db storing metrics data
[metrics.database.options]
vgroups = 1
buffer = 64
keep = 90
cachemodel = "both"
[environment]
# Whether running in cgroup.
incgroup = false
[audit]
enable = true
[audit.database]
name = "audit"
[audit.database.options]
vgroups = 1
buffer = 16
cachemodel = "both"
[log]
# The directory where log files are stored.
# path = "/var/log/taos"
level = "info"
# Number of log file rotations before deletion.
rotationCount = 30
# The number of days to retain log files.
keepDays = 30
# The maximum size of a log file before rotation.
rotationSize = "1GB"
# If set to true, log files will be compressed.
compress = false
# Minimum disk space to reserve. Log files will not be written if disk space falls below this limit.
reservedDiskSize = "1GB"

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 228 KiB

View File

@ -0,0 +1,177 @@
package db
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"os"
"strings"
"time"
"github.com/sirupsen/logrus"
"github.com/taosdata/driver-go/v3/common"
_ "github.com/taosdata/driver-go/v3/taosRestful"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/infrastructure/log"
"github.com/taosdata/taoskeeper/util"
)
type Connector struct {
db *sql.DB
}
type Data struct {
Head []string `json:"head"`
Data [][]interface{} `json:"data"`
}
var dbLogger = log.GetLogger("DB ")
func NewConnector(username, password, host string, port int, usessl bool) (*Connector, error) {
var protocol string
if usessl {
protocol = "https"
} else {
protocol = "http"
}
dbLogger := dbLogger.WithFields(logrus.Fields{config.ReqIDKey: util.GetQidOwn()})
dbLogger.Tracef("connect to adapter, host:%s, port:%d, usessl:%v", host, port, usessl)
db, err := sql.Open("taosRestful", fmt.Sprintf("%s:%s@%s(%s:%d)/?skipVerify=true", username, password, protocol, host, port))
if err != nil {
dbLogger.Errorf("connect to adapter failed, host:%s, port:%d, usessl:%v, error:%s", host, port, usessl, err)
return nil, err
}
dbLogger.Tracef("connect to adapter success, host:%s, port:%d, usessl:%v", host, port, usessl)
return &Connector{db: db}, nil
}
func NewConnectorWithDb(username, password, host string, port int, dbname string, usessl bool) (*Connector, error) {
var protocol string
if usessl {
protocol = "https"
} else {
protocol = "http"
}
dbLogger := dbLogger.WithFields(logrus.Fields{config.ReqIDKey: util.GetQidOwn()})
dbLogger.Tracef("connect to adapter, host:%s, port:%d, usessl:%v", host, port, usessl)
db, err := sql.Open("taosRestful", fmt.Sprintf("%s:%s@%s(%s:%d)/%s?skipVerify=true", username, password, protocol, host, port, dbname))
if err != nil {
dbLogger.Errorf("connect to adapter failed, host:%s, port:%d, db:%s, usessl:%v, error:%s", host, port, dbname, usessl, err)
return nil, err
}
dbLogger.Tracef("connect to adapter success, host:%s, port:%d, db:%s, usessl:%v", host, port, dbname, usessl)
return &Connector{db: db}, nil
}
func (c *Connector) Exec(ctx context.Context, sql string, qid uint64) (int64, error) {
dbLogger := dbLogger.WithFields(logrus.Fields{config.ReqIDKey: qid})
ctx = context.WithValue(ctx, common.ReqIDKey, int64(qid))
dbLogger.Tracef("call adapter to execute sql:%s", sql)
startTime := time.Now()
res, err := c.db.ExecContext(ctx, sql)
endTime := time.Now()
latency := endTime.Sub(startTime)
if err != nil {
if strings.Contains(err.Error(), "Authentication failure") {
dbLogger.Error("Authentication failure")
ctxLog, cancelLog := context.WithTimeout(context.Background(), 3*time.Second)
defer cancelLog()
log.Close(ctxLog)
os.Exit(1)
}
dbLogger.Errorf("latency:%v, sql:%s, err:%s", latency, sql, err)
return 0, err
}
rowsAffected, err := res.RowsAffected()
if err != nil {
dbLogger.Errorf("latency:%v, err:%s", latency, err)
return rowsAffected, err
}
dbLogger.Tracef("response ok, rowsAffected:%v, latency:%v", rowsAffected, latency)
return rowsAffected, err
}
func logData(data *Data, logger *logrus.Entry) {
if data == nil {
logger.Tracef("No data to display")
return
}
jsonData, err := json.Marshal(data)
if err != nil {
logger.Errorf("Failed to marshal data to JSON: %v", err)
return
}
logger.Tracef("query result data:%s", jsonData)
}
func (c *Connector) Query(ctx context.Context, sql string, qid uint64) (*Data, error) {
dbLogger := dbLogger.WithFields(logrus.Fields{config.ReqIDKey: qid})
ctx = context.WithValue(ctx, common.ReqIDKey, int64(qid))
dbLogger.Tracef("call adapter to execute query, sql:%s", sql)
startTime := time.Now()
rows, err := c.db.QueryContext(ctx, sql)
endTime := time.Now()
latency := endTime.Sub(startTime)
if err != nil {
if strings.Contains(err.Error(), "Authentication failure") {
dbLogger.Error("Authentication failure")
ctxLog, cancelLog := context.WithTimeout(context.Background(), 3*time.Second)
defer cancelLog()
log.Close(ctxLog)
os.Exit(1)
}
dbLogger.Errorf("latency:%v, sql:%s, err:%s", latency, sql, err)
return nil, err
}
dbLogger.Tracef("response ok, latency:%v, sql:%s", latency, sql)
data := &Data{}
data.Head, err = rows.Columns()
columnCount := len(data.Head)
if err != nil {
dbLogger.Errorf("get columns error, msg:%s", err)
return nil, err
}
scanData := make([]interface{}, columnCount)
for rows.Next() {
tmp := make([]interface{}, columnCount)
for i := 0; i < columnCount; i++ {
scanData[i] = &tmp[i]
}
err = rows.Scan(scanData...)
if err != nil {
rows.Close()
dbLogger.Errorf("rows scan error, msg:%s", err)
return nil, err
}
data.Data = append(data.Data, tmp)
}
if dbLogger.Logger.IsLevelEnabled(logrus.TraceLevel) {
logData(data, dbLogger)
}
return data, nil
}
func (c *Connector) Close() error {
return c.db.Close()
}

View File

@ -0,0 +1,8 @@
package db
import (
"testing"
)
func TestEmpty(t *testing.T) {
}

View File

@ -0,0 +1,29 @@
version: "3.7"
services:
tdengine:
image: tdengine/tdengine:3.0.1.6
environment:
TZ: Asia/Shanghai
TAOS_FQDN: tdengine
volumes:
- taosdata:/var/lib/taos
taoskeeper:
build: ./
depends_on:
- tdengine
environment:
TDENGINE_HOST: tdengine
TDENGINE_PORT: 6041
volumes:
- ./config/taoskeeper.toml:/etc/taos/taoskeeper.toml
ports:
- 6043:6043
prometheus:
image: prom/prometheus
volumes:
- ./prometheus/:/etc/prometheus/
ports:
- 9090:9090
volumes:
taosdata:

View File

@ -0,0 +1,38 @@
prefix = "taos"
cluster = "localhost"
database = "log"
explicit = false
[tables.restful_info]
[tables.slowquery]
[tables.cluster_info]
[tables.grants_info]
[tables.disks_info]
[tables.logs]
explicit = true
[tables.logs.metrics.content]
type = "info"
help = "login types or messages"
[tables.logs.metrics.level]
type = "gauge"
help = "login level"
[tables.dnodes_info]
[tables.dnodes_info.metrics.has_mnode]
type = "gauge"
help = "check if the node has mnode"
[tables.vgroups_info]
column_as_variables = ["database_name", "dnode_roles", "dnode_ids"]
explicit = false
[tables.vgroups_info.metrics.tables_num]
type = "gauge"
help = "Tables count of the vgroup"
[tables.vgroups_info.metrics.online_vnodes]
type = "gauge"
help = "Online v-nodes of the v-group"
[tables.vgroups_info.metrics.status]
type = "info"
help = "Status of the v-group"

80
tools/keeper/go.mod Normal file
View File

@ -0,0 +1,80 @@
module github.com/taosdata/taoskeeper
go 1.18
require (
github.com/BurntSushi/toml v0.4.1
github.com/gin-gonic/gin v1.9.1
github.com/kardianos/service v1.2.1
github.com/panjf2000/ants/v2 v2.4.6
github.com/prometheus/client_golang v1.12.2
github.com/shirou/gopsutil/v3 v3.22.4
github.com/shopspring/decimal v1.3.1
github.com/sirupsen/logrus v1.8.1
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.12.0
github.com/stretchr/testify v1.9.0
github.com/taosdata/driver-go/v3 v3.5.8
github.com/taosdata/file-rotatelogs/v2 v2.5.2
github.com/taosdata/go-utils v0.0.0-20211022070036-018cc5f2432a
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/bytedance/sonic v1.9.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gin-contrib/cors v1.3.1 // indirect
github.com/gin-contrib/gzip v0.0.3 // indirect
github.com/gin-contrib/pprof v1.3.0 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.14.0 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/lestrrat-go/strftime v1.0.6 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/magiconair/properties v1.8.6 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/spf13/afero v1.8.2 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/subosito/gotenv v1.3.0 // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/arch v0.3.0 // indirect
golang.org/x/crypto v0.9.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/sys v0.24.0 // indirect
golang.org/x/text v0.9.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/ini.v1 v1.66.4 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

764
tools/keeper/go.sum Normal file
View File

@ -0,0 +1,764 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Shopify/sarama v1.29.1/go.mod h1:mdtqvCSg8JOxk8PmpTNGyo6wzd4BMm4QXSfDnTXmgkE=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s=
github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deathowl/go-metrics-prometheus v0.0.0-20200518174047-74482eab5bfb/go.mod h1:kZ9Xvhj+PTMJ415unU/sutrnWDVqG0PDS/Sl4Rt3xkE=
github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/eclipse/paho.mqtt.golang v1.3.5/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gin-contrib/cors v1.3.1 h1:doAsuITavI4IOcd0Y19U4B+O0dNWihRyX//nn4sEmgA=
github.com/gin-contrib/cors v1.3.1/go.mod h1:jjEJ4268OPZUcU7k9Pm653S7lXUGcqMADzFA61xsmDk=
github.com/gin-contrib/gzip v0.0.3 h1:etUaeesHhEORpZMp18zoOhepboiWnFtXrBZxszWUn4k=
github.com/gin-contrib/gzip v0.0.3/go.mod h1:YxxswVZIqOvcHEQpsSn+QF5guQtO1dCfy0shBPy4jFc=
github.com/gin-contrib/pprof v1.3.0 h1:G9eK6HnbkSqDZBYbzG4wrjCsA4e+cvYAHUZw6W+W9K0=
github.com/gin-contrib/pprof v1.3.0/go.mod h1:waMjT1H9b179t3CxuG1cV3DHpga6ybizwfBaM5OXaB0=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
github.com/gin-gonic/gin v1.6.2/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
github.com/gin-gonic/gin v1.7.2/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js=
github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc=
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kardianos/service v1.2.1 h1:AYndMsehS+ywIS6RB9KOlcXzteWUzxgMgBymJD7+BYk=
github.com/kardianos/service v1.2.1/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc h1:RKf14vYWi2ttpEmkA4aQ3j4u9dStX2t4M8UM6qqNsG8=
github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc/go.mod h1:kopuH9ugFRkIXf3YoqHKyrJ9YfUFsckUU9S7B+XP+is=
github.com/lestrrat-go/strftime v1.0.6 h1:CFGsDEt1pOpFNU+TJB0nhz9jl+K0hZSLE205AhTIGQQ=
github.com/lestrrat-go/strftime v1.0.6/go.mod h1:f7jQKgV5nnJpYgdEasS+/y7EsTb8ykN2z68n3TtcTaw=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q=
github.com/nats-io/jwt/v2 v2.0.3/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY=
github.com/nats-io/nats-server/v2 v2.5.0/go.mod h1:Kj86UtrXAL6LwYRA6H4RqzkHhK0Vcv2ZnKD5WbQ1t3g=
github.com/nats-io/nats.go v1.12.1/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s=
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/panjf2000/ants/v2 v2.4.6 h1:drmj9mcygn2gawZ155dRbo+NfXEfAssjZNU1qoIb4gQ=
github.com/panjf2000/ants/v2 v2.4.6/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
github.com/shirou/gopsutil/v3 v3.22.4 h1:srAQaiX6jX/cYL6q29aE0m8lOskT9CurZ9N61YR3yoI=
github.com/shirou/gopsutil/v3 v3.22.4/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo=
github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ=
github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI=
github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs=
github.com/taosdata/driver-go/v2 v2.0.1-0.20211018081904-0a2a3ef6c829/go.mod h1:W7pu74rSvDmGjJPO6fzp+GCtwOelrMgXEhPD0aQJ1xw=
github.com/taosdata/driver-go/v3 v3.5.8 h1:JT5lNFUCOHD9Hs4Phjg8RBkGOWlePRnpGqq8kIRHT98=
github.com/taosdata/driver-go/v3 v3.5.8/go.mod h1:H2vo/At+rOPY1aMzUV9P49SVX7NlXb3LAbKw+MCLrmU=
github.com/taosdata/file-rotatelogs/v2 v2.5.2 h1:6ryjwDdKqQtWrkVq9OKj4gvMING/f+fDluMAAe2DIXQ=
github.com/taosdata/file-rotatelogs/v2 v2.5.2/go.mod h1:Qm99Lh0iMZouGgyy++JgTqKvP5FQw1ruR5jkWF7e1n0=
github.com/taosdata/go-utils v0.0.0-20211022070036-018cc5f2432a h1:WGFREiuYBrTXTS9GVQQpDvVgGRyByfo0V5//o7tv/ho=
github.com/taosdata/go-utils v0.0.0-20211022070036-018cc5f2432a/go.mod h1:hlvGgM/HN3AqWMajvMQe80qoLNJ4KIxs8YOVqEqnxUo=
github.com/tidwall/gjson v1.9.1/go.mod h1:jydLKE7s8J0+1/5jC4eXcuFlzKizGrCKvLmBVX/5oXc=
github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4=
gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

View File

@ -0,0 +1,6 @@
package config
type AuditConfig struct {
Enable bool `toml:"enable"`
Database Database `toml:"database"`
}

View File

@ -0,0 +1,294 @@
package config
import (
"fmt"
"io/fs"
"os"
"runtime"
"time"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/taosdata/go-utils/web"
"github.com/taosdata/taoskeeper/util/pool"
"github.com/taosdata/taoskeeper/version"
)
var Name = fmt.Sprintf("%skeeper", version.CUS_PROMPT)
const ReqIDKey = "QID"
const ModelKey = "model"
type Config struct {
InstanceID uint8
Cors web.CorsConfig `toml:"cors"`
Port int `toml:"port"`
LogLevel string `toml:"loglevel"`
GoPoolSize int `toml:"gopoolsize"`
RotationInterval string `toml:"RotationInterval"`
TDengine TDengineRestful `toml:"tdengine"`
Metrics MetricsConfig `toml:"metrics"`
Env Environment `toml:"environment"`
Audit AuditConfig `toml:"audit"`
Log Log `mapstructure:"-"`
Transfer string
FromTime string
Drop string
}
type TDengineRestful struct {
Host string `toml:"host"`
Port int `toml:"port"`
Username string `toml:"username"`
Password string `toml:"password"`
Usessl bool `toml:"usessl"`
}
var Conf *Config
func InitConfig() *Config {
viper.SetConfigType("toml")
viper.SetConfigName(Name)
viper.AddConfigPath("/etc/taos")
var cp *string
switch runtime.GOOS {
case "windows":
viper.AddConfigPath(fmt.Sprintf("C:\\%s\\cfg", version.CUS_NAME))
cp = pflag.StringP("config", "c", "", fmt.Sprintf("config path default C:\\%s\\cfg\\%s.toml", version.CUS_NAME, Name))
default:
viper.AddConfigPath(fmt.Sprintf("/etc/%s", version.CUS_PROMPT))
cp = pflag.StringP("config", "c", "", fmt.Sprintf("config path default /etc/%s/%s.toml", version.CUS_PROMPT, Name))
}
transfer := pflag.StringP("transfer", "", "", "run "+Name+" in command mode, only support old_taosd_metric. transfer old metrics data to new tables and exit")
fromTime := pflag.StringP("fromTime", "", "2020-01-01T00:00:00+08:00", "parameter of transfer, example: 2020-01-01T00:00:00+08:00")
drop := pflag.StringP("drop", "", "", "run "+Name+" in command mode, only support old_taosd_metric_stables. ")
v := pflag.BoolP("version", "V", false, "Print the version and exit")
help := pflag.BoolP("help", "h", false, "Print this help message and exit")
pflag.Parse()
if *help {
fmt.Fprintf(os.Stderr, "Usage of %s v%s:\n", Name, version.Version)
pflag.PrintDefaults()
os.Exit(0)
}
if *v {
fmt.Printf("%s version: %s\n", Name, version.Version)
fmt.Printf("git: %s\n", version.Gitinfo)
fmt.Printf("build: %s\n", version.BuildInfo)
os.Exit(0)
}
if *cp != "" {
viper.SetConfigFile(*cp)
}
viper.SetEnvPrefix(Name)
err := viper.BindPFlags(pflag.CommandLine)
if err != nil {
panic(err)
}
viper.AutomaticEnv()
gotoStep := false
ReadConfig:
if err := viper.ReadInConfig(); err != nil {
_, isConfigNotFoundError := err.(viper.ConfigFileNotFoundError)
_, isPathError := err.(*fs.PathError)
if isConfigNotFoundError || isPathError {
fmt.Println("config file not found")
if !gotoStep {
fmt.Println("use keeper.toml instead")
viper.SetConfigName("keeper")
gotoStep = true
goto ReadConfig
}
} else {
panic(err)
}
}
// if old format, change to new format
if !viper.IsSet("metrics.database.name") {
databaseName := viper.GetString("metrics.database")
viper.Set("metrics.database.name", databaseName)
viper.Set("metrics.database.options", viper.Get("metrics.databaseoptions"))
}
var conf Config
if err = viper.Unmarshal(&conf); err != nil {
panic(err)
}
conf.Transfer = *transfer
conf.FromTime = *fromTime
conf.Drop = *drop
conf.Cors.Init()
pool.Init(conf.GoPoolSize)
conf.Log.SetValue()
// set log level default value: info
if conf.LogLevel == "" {
conf.LogLevel = "info"
}
if viper.IsSet("log.level") {
conf.LogLevel = conf.Log.Level
} else {
viper.Set("log.level", "")
}
if !viper.IsSet("logLevel") {
viper.Set("logLevel", "")
}
Conf = &conf
return &conf
}
func init() {
viper.SetDefault("instanceId", 64)
_ = viper.BindEnv("instanceId", "TAOS_KEEPER_INSTANCE_ID")
pflag.Int("instanceId", 64, `instance ID. Env "TAOS_KEEPER_INSTANCE_ID"`)
viper.SetDefault("port", 6043)
_ = viper.BindEnv("port", "TAOS_KEEPER_PORT")
pflag.IntP("port", "P", 6043, `http port. Env "TAOS_KEEPER_PORT"`)
_ = viper.BindEnv("logLevel", "TAOS_KEEPER_LOG_LEVEL")
pflag.String("logLevel", "info", `log level (trace debug info warning error). Env "TAOS_KEEPER_LOG_LEVEL"`)
viper.SetDefault("gopoolsize", 50000)
_ = viper.BindEnv("gopoolsize", "TAOS_KEEPER_POOL_SIZE")
pflag.Int("gopoolsize", 50000, `coroutine size. Env "TAOS_KEEPER_POOL_SIZE"`)
viper.SetDefault("RotationInterval", "15s")
_ = viper.BindEnv("RotationInterval", "TAOS_KEEPER_ROTATION_INTERVAL")
pflag.StringP("RotationInterval", "R", "15s", `interval for refresh metrics, such as "300ms", Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Env "TAOS_KEEPER_ROTATION_INTERVAL"`)
viper.SetDefault("tdengine.host", "127.0.0.1")
_ = viper.BindEnv("tdengine.host", "TAOS_KEEPER_TDENGINE_HOST")
pflag.String("tdengine.host", "127.0.0.1", `TDengine server's ip. Env "TAOS_KEEPER_TDENGINE_HOST"`)
viper.SetDefault("tdengine.port", 6041)
_ = viper.BindEnv("tdengine.port", "TAOS_KEEPER_TDENGINE_PORT")
pflag.Int("tdengine.port", 6041, `TDengine REST server(taosAdapter)'s port. Env "TAOS_KEEPER_TDENGINE_PORT"`)
viper.SetDefault("tdengine.username", "root")
_ = viper.BindEnv("tdengine.username", "TAOS_KEEPER_TDENGINE_USERNAME")
pflag.String("tdengine.username", "root", `TDengine server's username. Env "TAOS_KEEPER_TDENGINE_USERNAME"`)
viper.SetDefault("tdengine.password", "taosdata")
_ = viper.BindEnv("tdengine.password", "TAOS_KEEPER_TDENGINE_PASSWORD")
pflag.String("tdengine.password", "taosdata", `TDengine server's password. Env "TAOS_KEEPER_TDENGINE_PASSWORD"`)
viper.SetDefault("tdengine.usessl", false)
_ = viper.BindEnv("tdengine.usessl", "TAOS_KEEPER_TDENGINE_USESSL")
pflag.Bool("tdengine.usessl", false, `TDengine server use ssl or not. Env "TAOS_KEEPER_TDENGINE_USESSL"`)
viper.SetDefault("metrics.prefix", "")
_ = viper.BindEnv("metrics.prefix", "TAOS_KEEPER_METRICS_PREFIX")
pflag.String("metrics.prefix", "", `prefix in metrics names. Env "TAOS_KEEPER_METRICS_PREFIX"`)
viper.SetDefault("metrics.database.name", "log")
_ = viper.BindEnv("metrics.database.name", "TAOS_KEEPER_METRICS_DATABASE")
pflag.String("metrics.database.name", "log", `database for storing metrics data. Env "TAOS_KEEPER_METRICS_DATABASE"`)
viper.SetDefault("metrics.database.options.vgroups", 1)
_ = viper.BindEnv("metrics.database.options.vgroups", "TAOS_KEEPER_METRICS_VGROUPS")
pflag.Int("metrics.database.options.vgroups", 1, `database option vgroups for audit database. Env "TAOS_KEEPER_METRICS_VGROUPS"`)
viper.SetDefault("metrics.database.options.buffer", 64)
_ = viper.BindEnv("metrics.database.options.buffer", "TAOS_KEEPER_METRICS_BUFFER")
pflag.Int("metrics.database.options.buffer", 64, `database option buffer for audit database. Env "TAOS_KEEPER_METRICS_BUFFER"`)
viper.SetDefault("metrics.database.options.keep", 90)
_ = viper.BindEnv("metrics.database.options.keep", "TAOS_KEEPER_METRICS_KEEP")
pflag.Int("metrics.database.options.keep", 90, `database option buffer for audit database. Env "TAOS_KEEPER_METRICS_KEEP"`)
viper.SetDefault("metrics.database.options.cachemodel", "both")
_ = viper.BindEnv("metrics.database.options.cachemodel", "TAOS_KEEPER_METRICS_CACHEMODEL")
pflag.String("metrics.database.options.cachemodel", "both", `database option cachemodel for audit database. Env "TAOS_KEEPER_METRICS_CACHEMODEL"`)
viper.SetDefault("metrics.tables", []string{})
_ = viper.BindEnv("metrics.tables", "TAOS_KEEPER_METRICS_TABLES")
pflag.StringArray("metrics.tables", []string{}, `export some tables that are not super table, multiple values split with white space. Env "TAOS_KEEPER_METRICS_TABLES"`)
viper.SetDefault("environment.incgroup", false)
_ = viper.BindEnv("environment.incgroup", "TAOS_KEEPER_ENVIRONMENT_INCGROUP")
pflag.Bool("environment.incgroup", false, `whether running in cgroup. Env "TAOS_KEEPER_ENVIRONMENT_INCGROUP"`)
initLog()
if version.IsEnterprise == "true" {
initAudit()
}
}
func initLog() {
switch runtime.GOOS {
case "windows":
viper.SetDefault("log.path", fmt.Sprintf("C:\\%s\\log", version.CUS_NAME))
_ = viper.BindEnv("log.path", "TAOS_KEEPER_LOG_PATH")
pflag.String("log.path", fmt.Sprintf("C:\\%s\\log", version.CUS_NAME), `log path. Env "TAOS_KEEPER_LOG_PATH"`)
default:
viper.SetDefault("log.path", fmt.Sprintf("/var/log/%s", version.CUS_PROMPT))
_ = viper.BindEnv("log.path", "TAOS_KEEPER_LOG_PATH")
pflag.String("log.path", fmt.Sprintf("/var/log/%s", version.CUS_PROMPT), `log path. Env "TAOS_KEEPER_LOG_PATH"`)
}
_ = viper.BindEnv("log.level", "TAOS_KEEPER_LOG_LEVEL")
pflag.String("log.level", "info", `log level (trace debug info warning error). Env "TAOS_KEEPER_LOG_LEVEL"`)
viper.SetDefault("log.rotationCount", 5)
_ = viper.BindEnv("log.rotationCount", "TAOS_KEEPER_LOG_ROTATION_COUNT")
pflag.Uint("log.rotationCount", 5, `log rotation count. Env "TAOS_KEEPER_LOG_ROTATION_COUNT"`)
viper.SetDefault("log.keepDays", 30)
_ = viper.BindEnv("log.keepDays", "TAOS_KEEPER_LOG_KEEP_DAYS")
pflag.Uint("log.keepDays", 30, `log retention days, must be a positive integer. Env "TAOS_KEEPER_LOG_KEEP_DAYS"`)
viper.SetDefault("log.rotationTime", time.Hour*24)
_ = viper.BindEnv("log.rotationTime", "TAOS_KEEPER_LOG_ROTATION_TIME")
pflag.Duration("log.rotationTime", time.Hour*24, `deprecated: log rotation time always 24 hours. Env "TAOS_KEEPER_LOG_ROTATION_TIME"`)
viper.SetDefault("log.rotationSize", "1GB")
_ = viper.BindEnv("log.rotationSize", "TAOS_KEEPER_LOG_ROTATION_SIZE")
pflag.String("log.rotationSize", "1GB", `log rotation size(KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_ROTATION_SIZE"`)
viper.SetDefault("log.compress", false)
_ = viper.BindEnv("log.compress", "TAOS_KEEPER_LOG_COMPRESS")
pflag.Bool("log.compress", false, `whether to compress old log. Env "TAOS_KEEPER_LOG_COMPRESS"`)
viper.SetDefault("log.reservedDiskSize", "1GB")
_ = viper.BindEnv("log.reservedDiskSize", "TAOS_KEEPER_LOG_RESERVED_DISK_SIZE")
pflag.String("log.reservedDiskSize", "1GB", `reserved disk size for log dir (KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_RESERVED_DISK_SIZE"`)
}
func initAudit() {
viper.SetDefault("audit.enable", "true")
_ = viper.BindEnv("audit.enable", "TAOS_KEEPER_AUDIT_ENABLE")
pflag.String("audit.enable", "true", `database for enable audit data. Env "TAOS_KEEPER_AUDIT_ENABLE"`)
viper.SetDefault("audit.database.name", "audit")
_ = viper.BindEnv("audit.database.name", "TAOS_KEEPER_AUDIT_DATABASE")
pflag.String("audit.database.name", "audit", `database for storing audit data. Env "TAOS_KEEPER_AUDIT_DATABASE"`)
viper.SetDefault("audit.database.options.vgroups", 1)
_ = viper.BindEnv("audit.database.options.vgroups", "TAOS_KEEPER_AUDIT_VGROUPS")
pflag.Int("audit.database.options.vgroups", 1, `database option vgroups for audit database. Env "TAOS_KEEPER_AUDIT_VGROUPS"`)
viper.SetDefault("audit.database.options.buffer", 16)
_ = viper.BindEnv("audit.database.options.buffer", "TAOS_KEEPER_AUDIT_BUFFER")
pflag.Int("audit.database.options.buffer", 16, `database option buffer for audit database. Env "TAOS_KEEPER_AUDIT_BUFFER"`)
viper.SetDefault("audit.database.options.cachemodel", "both")
_ = viper.BindEnv("audit.database.options.cachemodel", "TAOS_KEEPER_AUDIT_CACHEMODEL")
pflag.String("audit.database.options.cachemodel", "both", `database option cachemodel for audit database. Env "TAOS_KEEPER_AUDIT_CACHEMODEL"`)
}

View File

@ -0,0 +1,29 @@
package config
import (
"time"
"github.com/spf13/viper"
)
type Log struct {
Level string
Path string
RotationCount uint
RotationTime time.Duration
RotationSize uint
KeepDays uint
Compress bool
ReservedDiskSize uint
}
func (l *Log) SetValue() {
l.Level = viper.GetString("log.level")
l.Path = viper.GetString("log.path")
l.RotationCount = viper.GetUint("log.rotationCount")
l.RotationTime = viper.GetDuration("log.rotationTime")
l.RotationSize = viper.GetSizeInBytes("log.rotationSize")
l.KeepDays = viper.GetUint("log.keepDays")
l.Compress = viper.GetBool("log.compress")
l.ReservedDiskSize = viper.GetSizeInBytes("log.reservedDiskSize")
}

View File

@ -0,0 +1,85 @@
package config_test
import (
"fmt"
"io"
"os"
"runtime"
"testing"
"github.com/BurntSushi/toml"
"github.com/stretchr/testify/assert"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/version"
)
func TestConfig(t *testing.T) {
data := `
# Start with debug middleware for gin
debug = true
# Listen port, default is 6043
port = 9000
# log level
loglevel = "error"
# go pool size
gopoolsize = 5000
# interval for TDengine metrics
RotationInterval = "10s"
[tdengine]
address = "http://localhost:6041"
authtype = "Basic"
username = "root"
password = "taosdata"
`
var c config.Config
_, err := toml.Decode(data, &c)
if err != nil {
t.Error(err)
return
}
assert.EqualValues(t, c, c)
fmt.Print(c)
}
func TestBakConfig(t *testing.T) {
isOk := copyConfigFile()
if isOk {
config.Name = "aaa"
config.InitConfig()
config.Name = "taoskeeper"
}
}
func copyConfigFile() bool {
var sourceFile string
var destinationFile string
switch runtime.GOOS {
case "windows":
sourceFile = fmt.Sprintf("C:\\%s\\cfg\\%s.toml", version.CUS_NAME, "taoskeeper")
destinationFile = fmt.Sprintf("C:\\%s\\cfg\\%s.toml", version.CUS_NAME, "keeper")
default:
sourceFile = fmt.Sprintf("/etc/%s/%s.toml", version.CUS_PROMPT, "taoskeeper")
destinationFile = fmt.Sprintf("/etc/%s/%s.toml", version.CUS_PROMPT, "keeper")
}
_, err := os.Stat(sourceFile)
if os.IsNotExist(err) {
return false
}
source, err := os.Open(sourceFile) //open the source file
if err != nil {
panic(err)
}
defer source.Close()
destination, err := os.Create(destinationFile) //create the destination file
if err != nil {
panic(err)
}
defer destination.Close()
_, err = io.Copy(destination, source) //copy the contents of source to destination file
if err != nil {
panic(err)
}
return true
}

View File

@ -0,0 +1,29 @@
package config
type MetricsConfig struct {
Cluster string `toml:"cluster"`
Prefix string `toml:"prefix"`
Database Database `toml:"database"`
Tables []string `toml:"tables"`
}
type TaosAdapter struct {
Address []string `toml:"address"`
}
type Metric struct {
Alias string `toml:"alias"`
Help string `toml:"help"`
Unit string `toml:"unit"`
Type string `toml:"type"`
Labels map[string]string `toml:"labels"`
}
type Environment struct {
InCGroup bool `toml:"incgroup"`
}
type Database struct {
Name string `toml:"name"`
Options map[string]interface{} `toml:"options"`
}

View File

@ -0,0 +1,8 @@
package log
import (
"testing"
)
func TestEmpty(t *testing.T) {
}

View File

@ -0,0 +1,278 @@
package log
import (
"bytes"
"context"
"fmt"
"io"
"os"
"path/filepath"
"sync"
"time"
"github.com/sirupsen/logrus"
rotatelogs "github.com/taosdata/file-rotatelogs/v2"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/version"
)
var logger = logrus.New()
var ServerID = randomID()
var globalLogFormatter = &TaosLogFormatter{}
var finish = make(chan struct{})
var exist = make(chan struct{})
var bufferPool = &defaultPool{
pool: &sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
},
}
type defaultPool struct {
pool *sync.Pool
}
func (p *defaultPool) Put(buf *bytes.Buffer) {
buf.Reset()
p.pool.Put(buf)
}
func (p *defaultPool) Get() *bytes.Buffer {
return p.pool.Get().(*bytes.Buffer)
}
type FileHook struct {
formatter logrus.Formatter
writer io.Writer
buf *bytes.Buffer
sync.Mutex
}
func NewFileHook(formatter logrus.Formatter, writer io.WriteCloser) *FileHook {
fh := &FileHook{formatter: formatter, writer: writer, buf: &bytes.Buffer{}}
ticker := time.NewTicker(time.Second * 5)
go func() {
for {
select {
case <-ticker.C:
//can be optimized by tryLock
fh.Lock()
if fh.buf.Len() > 0 {
fh.flush()
}
fh.Unlock()
case <-exist:
fh.Lock()
fh.flush()
fh.Unlock()
writer.Close()
ticker.Stop()
close(finish)
return
}
}
}()
return fh
}
func (f *FileHook) Levels() []logrus.Level {
return logrus.AllLevels
}
func (f *FileHook) Fire(entry *logrus.Entry) error {
if entry.Buffer == nil {
entry.Buffer = bufferPool.Get()
defer func() {
bufferPool.Put(entry.Buffer)
entry.Buffer = nil
}()
}
data, err := f.formatter.Format(entry)
if err != nil {
return err
}
f.Lock()
f.buf.Write(data)
if f.buf.Len() > 1024 || entry.Level == logrus.FatalLevel || entry.Level == logrus.PanicLevel {
err = f.flush()
}
f.Unlock()
return err
}
func (f *FileHook) flush() error {
_, err := f.writer.Write(f.buf.Bytes())
f.buf.Reset()
return err
}
var once sync.Once
func ConfigLog() {
once.Do(func() {
err := SetLevel(config.Conf.LogLevel)
if err != nil {
panic(err)
}
writer, err := rotatelogs.New(
filepath.Join(config.Conf.Log.Path, fmt.Sprintf("%skeeper_%d_%%Y%%m%%d.log", version.CUS_PROMPT, config.Conf.InstanceID)),
rotatelogs.WithRotationCount(config.Conf.Log.RotationCount),
rotatelogs.WithRotationTime(time.Hour*24),
rotatelogs.WithRotationSize(int64(config.Conf.Log.RotationSize)),
rotatelogs.WithReservedDiskSize(int64(config.Conf.Log.ReservedDiskSize)),
rotatelogs.WithRotateGlobPattern(filepath.Join(config.Conf.Log.Path, fmt.Sprintf("%skeeper_%d_*.log*", version.CUS_PROMPT, config.Conf.InstanceID))),
rotatelogs.WithCompress(config.Conf.Log.Compress),
rotatelogs.WithCleanLockFile(filepath.Join(config.Conf.Log.Path, fmt.Sprintf(".%skeeper_%d_rotate_lock", version.CUS_PROMPT, config.Conf.InstanceID))),
rotatelogs.ForceNewFile(),
rotatelogs.WithMaxAge(time.Hour*24*time.Duration(config.Conf.Log.KeepDays)),
)
if err != nil {
panic(err)
}
fmt.Fprintln(writer, "==================================================")
fmt.Fprintln(writer, " new log file")
fmt.Fprintln(writer, "==================================================")
fmt.Fprintf(writer, "config:%+v\n", config.Conf)
fmt.Fprintf(writer, "%-45s%v\n", "version", version.Version)
fmt.Fprintf(writer, "%-45s%v\n", "gitinfo", version.CommitID)
fmt.Fprintf(writer, "%-45s%v\n", "buildinfo", version.BuildInfo)
hook := NewFileHook(globalLogFormatter, writer)
logger.AddHook(hook)
})
}
func SetLevel(level string) error {
l, err := logrus.ParseLevel(level)
if err != nil {
return err
}
logger.SetLevel(l)
return nil
}
func GetLogger(model string) *logrus.Entry {
return logger.WithFields(logrus.Fields{config.ModelKey: model})
}
func init() {
logrus.SetBufferPool(bufferPool)
logger.SetFormatter(globalLogFormatter)
logger.SetOutput(os.Stdout)
}
func randomID() string {
return fmt.Sprintf("%08d", os.Getpid())
}
type TaosLogFormatter struct {
}
func (t *TaosLogFormatter) Format(entry *logrus.Entry) ([]byte, error) {
var b *bytes.Buffer
if entry.Buffer != nil {
b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
b.Reset()
b.WriteString(entry.Time.Format("01/02 15:04:05.000000"))
b.WriteByte(' ')
b.WriteString(ServerID)
b.WriteByte(' ')
v, exist := entry.Data[config.ModelKey]
if exist && v != nil {
b.WriteString(v.(string))
b.WriteByte(' ')
} else {
b.WriteString("CLI ")
}
switch entry.Level {
case logrus.PanicLevel:
b.WriteString("PANIC ")
case logrus.FatalLevel:
b.WriteString("FATAL ")
case logrus.ErrorLevel:
b.WriteString("ERROR ")
case logrus.WarnLevel:
b.WriteString("WARN ")
case logrus.InfoLevel:
b.WriteString("INFO ")
case logrus.DebugLevel:
b.WriteString("DEBUG ")
case logrus.TraceLevel:
b.WriteString("TRACE ")
}
// request id
v, exist = entry.Data[config.ReqIDKey]
if exist && v != nil {
b.WriteString(config.ReqIDKey)
b.WriteByte(':')
fmt.Fprintf(b, "0x%x ", v)
}
if len(entry.Message) > 0 && entry.Message[len(entry.Message)-1] == '\n' {
b.WriteString(entry.Message[:len(entry.Message)-1])
} else {
b.WriteString(entry.Message)
}
// sort the keys
keys := make([]string, 0, len(entry.Data))
for k := range entry.Data {
if k == config.ModelKey || k == config.ReqIDKey {
continue
}
keys = append(keys, k)
}
for _, k := range keys {
v := entry.Data[k]
if k == config.ReqIDKey && v == nil {
continue
}
b.WriteString(", ")
b.WriteString(k)
b.WriteByte(':')
fmt.Fprintf(b, "%v", v)
}
b.WriteByte('\n')
return b.Bytes(), nil
}
func IsDebug() bool {
return logger.IsLevelEnabled(logrus.DebugLevel)
}
func GetLogLevel() logrus.Level {
return logger.Level
}
var zeroTime = time.Time{}
var zeroDuration = time.Duration(0)
func GetLogNow(isDebug bool) time.Time {
if isDebug {
return time.Now()
}
return zeroTime
}
func GetLogDuration(isDebug bool, s time.Time) time.Duration {
if isDebug {
return time.Since(s)
}
return zeroDuration
}
func Close(ctx context.Context) {
close(exist)
select {
case <-finish:
return
case <-ctx.Done():
return
}
}

View File

@ -0,0 +1,23 @@
package log
import (
"context"
"fmt"
"testing"
"time"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/taosdata/taoskeeper/infrastructure/config"
)
func TestConfigLog(t *testing.T) {
config.InitConfig()
config.Conf.LogLevel = "debug"
ConfigLog()
debug, _ := logrus.ParseLevel("debug")
assert.Equal(t, logger.Level, debug)
assert.Equal(t, true, IsDebug())
fmt.Print(GetLogNow(true), GetLogDuration(true, time.Now()))
Close(context.Background())
}

View File

@ -0,0 +1,55 @@
package log
import (
"time"
"github.com/gin-gonic/gin"
"github.com/sirupsen/logrus"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/util"
)
func GinLog() gin.HandlerFunc {
logger := GetLogger("WEB")
return func(c *gin.Context) {
qid := util.GetQid(c.GetHeader("X-QID"))
logger := logger.WithFields(
logrus.Fields{config.ReqIDKey: qid},
)
statusCode := c.Writer.Status()
startTime := time.Now()
c.Next()
endTime := time.Now()
latencyTime := endTime.Sub(startTime)
reqMethod := c.Request.Method
reqUri := c.Request.RequestURI
clientIP := c.ClientIP()
if statusCode != 200 {
logger.Errorf("finish request, status_code:%3d, latency:%v, client_ip:%s, method:%s, uri:%s", statusCode, latencyTime, clientIP, reqMethod, reqUri)
return
}
logger.Infof("finish request, status_code:%3d, latency:%v, client_ip:%s, method:%s, uri:%s", statusCode, latencyTime, clientIP, reqMethod, reqUri)
}
}
type recoverLog struct {
logger logrus.FieldLogger
}
func (r *recoverLog) Write(p []byte) (n int, err error) {
r.logger.Errorln(string(p))
return len(p), nil
}
func GinRecoverLog() gin.HandlerFunc {
logger := GetLogger("WEB")
return func(c *gin.Context) {
writer := &recoverLog{logger: logger}
gin.RecoveryWithWriter(writer)(c)
}
}

11
tools/keeper/main.go Normal file
View File

@ -0,0 +1,11 @@
package main
import (
"github.com/taosdata/taoskeeper/system"
)
func main() {
r := system.Init()
system.Start(r)
// config.IsEnterprise
}

View File

@ -0,0 +1,99 @@
package monitor
import (
"math"
"os"
"runtime"
"github.com/shirou/gopsutil/v3/mem"
"github.com/shirou/gopsutil/v3/process"
"github.com/taosdata/taoskeeper/util"
)
type SysCollector interface {
CpuPercent() (float64, error)
MemPercent() (float64, error)
}
type NormalCollector struct {
p *process.Process
}
func NewNormalCollector() (*NormalCollector, error) {
p, err := process.NewProcess(int32(os.Getpid()))
if err != nil {
return nil, err
}
return &NormalCollector{p: p}, nil
}
func (n *NormalCollector) CpuPercent() (float64, error) {
cpuPercent, err := n.p.Percent(0)
if err != nil {
return 0, err
}
return cpuPercent / float64(runtime.NumCPU()), nil
}
func (n *NormalCollector) MemPercent() (float64, error) {
memPercent, err := n.p.MemoryPercent()
if err != nil {
return 0, err
}
return float64(memPercent), nil
}
const (
CGroupCpuQuotaPath = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
CGroupCpuPeriodPath = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
CGroupMemLimitPath = "/sys/fs/cgroup/memory/memory.limit_in_bytes"
)
type CGroupCollector struct {
p *process.Process
cpuCore float64
totalMemory uint64
}
func NewCGroupCollector() (*CGroupCollector, error) {
p, err := process.NewProcess(int32(os.Getpid()))
if err != nil {
return nil, err
}
cpuPeriod, err := util.ReadUint(CGroupCpuPeriodPath)
if err != nil {
return nil, err
}
cpuQuota, err := util.ReadUint(CGroupCpuQuotaPath)
if err != nil {
return nil, err
}
cpuCore := float64(cpuQuota) / float64(cpuPeriod)
limitMemory, err := util.ReadUint(CGroupMemLimitPath)
if err != nil {
return nil, err
}
machineMemory, err := mem.VirtualMemory()
if err != nil {
return nil, err
}
totalMemory := uint64(math.Min(float64(limitMemory), float64(machineMemory.Total)))
return &CGroupCollector{p: p, cpuCore: cpuCore, totalMemory: totalMemory}, nil
}
func (c *CGroupCollector) CpuPercent() (float64, error) {
cpuPercent, err := c.p.Percent(0)
if err != nil {
return 0, err
}
cpuPercent = cpuPercent / c.cpuCore
return cpuPercent, nil
}
func (c *CGroupCollector) MemPercent() (float64, error) {
memInfo, err := c.p.MemoryInfo()
if err != nil {
return 0, err
}
return 100 * float64(memInfo.RSS) / float64(c.totalMemory), nil
}

View File

@ -0,0 +1,8 @@
package monitor
import (
"testing"
)
func TestEmpty(t *testing.T) {
}

View File

@ -0,0 +1,89 @@
package monitor
import (
"context"
"fmt"
"os"
"time"
"github.com/taosdata/taoskeeper/api"
"github.com/taosdata/taoskeeper/db"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/infrastructure/log"
"github.com/taosdata/taoskeeper/util"
"github.com/taosdata/taoskeeper/util/pool"
)
var logger = log.GetLogger("MON")
func StartMonitor(identity string, conf *config.Config, reporter *api.Reporter) {
if len(identity) == 0 {
hostname, err := os.Hostname()
if err != nil {
logger.Errorf("can not get hostname, error:%s", err)
}
if len(hostname) > 40 {
hostname = hostname[:40]
}
identity = fmt.Sprintf("%s:%d", hostname, conf.Port)
}
systemStatus := make(chan SysStatus)
_ = pool.GoroutinePool.Submit(func() {
var (
cpuPercent float64
memPercent float64
totalReport int
)
for status := range systemStatus {
if status.CpuError == nil {
cpuPercent = status.CpuPercent
}
if status.MemError == nil {
memPercent = status.MemPercent
}
totalResp := reporter.GetTotalRep()
for i := 0; i < 3; i++ {
totalReport = totalResp.Load().(int)
if totalResp.CompareAndSwap(totalReport, 0) {
break
}
logger.Warn("Reset keeper_monitor total resp via cas fail! Maybe to many concurrent ")
reporter.GetTotalRep().Store(0)
}
var kn string
if len(identity) <= util.MAX_TABLE_NAME_LEN {
kn = util.ToValidTableName(identity)
} else {
kn = util.GetMd5HexStr(identity)
}
sql := fmt.Sprintf("insert into `km_%s` using keeper_monitor tags ('%s') values ( now, "+
" %f, %f, %d)", kn, identity, cpuPercent, memPercent, totalReport)
conn, err := db.NewConnectorWithDb(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host,
conf.TDengine.Port, conf.Metrics.Database.Name, conf.TDengine.Usessl)
if err != nil {
logger.Errorf("connect to database error, msg:%s", err)
return
}
ctx := context.Background()
if _, err = conn.Exec(ctx, sql, util.GetQidOwn()); err != nil {
logger.Errorf("execute sql:%s, error:%s", sql, err)
}
if err := conn.Close(); err != nil {
logger.Errorf("close connection error, msg:%s", err)
}
}
})
SysMonitor.Register(systemStatus)
interval, err := time.ParseDuration(conf.RotationInterval)
if err != nil {
panic(err)
}
Start(interval, conf.Env.InCGroup)
}

View File

@ -0,0 +1,58 @@
package monitor
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/taosdata/go-utils/web"
"github.com/taosdata/taoskeeper/api"
"github.com/taosdata/taoskeeper/db"
"github.com/taosdata/taoskeeper/util"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/infrastructure/log"
)
func TestStart(t *testing.T) {
conf := config.InitConfig()
if conf == nil {
panic("config error")
}
conf.Env.InCGroup = true
cpuCgroupDir := "/sys/fs/cgroup/cpu"
if _, err := os.Stat(cpuCgroupDir); os.IsNotExist(err) {
conf.Env.InCGroup = false
}
log.ConfigLog()
router := web.CreateRouter(false, &conf.Cors, false)
conf.Metrics.Database.Name = "monitor"
reporter := api.NewReporter(conf)
reporter.Init(router)
conf.RotationInterval = "1s"
StartMonitor("", conf, reporter)
time.Sleep(2 * time.Second)
for k, _ := range SysMonitor.outputs {
SysMonitor.Deregister(k)
}
conn, err := db.NewConnectorWithDb(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.Metrics.Database.Name, conf.TDengine.Usessl)
assert.NoError(t, err)
conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", conf.Metrics.Database.Name), util.GetQidOwn())
}
func TestParseUint(t *testing.T) {
num, err := util.ParseUint("-1", 10, 8)
assert.Equal(t, nil, err)
assert.Equal(t, uint64(0), num)
num, err = util.ParseUint("0", 10, 8)
assert.Equal(t, nil, err)
assert.Equal(t, uint64(0), num)
num, err = util.ParseUint("257", 10, 8)
assert.Equal(t, "strconv.ParseUint: parsing \"257\": value out of range", err.Error())
assert.Equal(t, uint64(0), num)
}

View File

@ -0,0 +1,97 @@
package monitor
import (
"math"
"runtime"
"sync"
"time"
"github.com/taosdata/taoskeeper/util/pool"
)
type SysStatus struct {
CollectTime time.Time
CpuPercent float64
CpuError error
MemPercent float64
MemError error
GoroutineCounts int
ThreadCounts int
}
type sysMonitor struct {
sync.Mutex
collectDuration time.Duration
collector SysCollector
status *SysStatus
outputs map[chan<- SysStatus]struct{}
ticker *time.Ticker
}
func (s *sysMonitor) collect() {
s.status.CollectTime = time.Now()
s.status.CpuPercent, s.status.CpuError = s.collector.CpuPercent()
s.status.MemPercent, s.status.MemError = s.collector.MemPercent()
s.status.GoroutineCounts = runtime.NumGoroutine()
s.status.ThreadCounts, _ = runtime.ThreadCreateProfile(nil)
// skip when inf or nan
if math.IsInf(s.status.CpuPercent, 0) || math.IsNaN(s.status.CpuPercent) ||
math.IsInf(s.status.MemPercent, 0) || math.IsNaN(s.status.MemPercent) {
return
}
s.Lock()
for output := range s.outputs {
select {
case output <- *s.status:
default:
}
}
s.Unlock()
}
func (s *sysMonitor) Register(c chan<- SysStatus) {
s.Lock()
if s.outputs == nil {
s.outputs = map[chan<- SysStatus]struct{}{
c: {},
}
} else {
s.outputs[c] = struct{}{}
}
s.Unlock()
}
func (s *sysMonitor) Deregister(c chan<- SysStatus) {
s.Lock()
if s.outputs != nil {
delete(s.outputs, c)
}
s.Unlock()
}
var SysMonitor = &sysMonitor{status: &SysStatus{}}
func Start(collectDuration time.Duration, inCGroup bool) {
SysMonitor.collectDuration = collectDuration
if inCGroup {
collector, err := NewCGroupCollector()
if err != nil {
logger.Errorf("new normal group controller error, msg:%s", err)
}
SysMonitor.collector = collector
} else {
collector, err := NewNormalCollector()
if err != nil {
logger.Errorf("new normal controller error, msg:%s", err)
}
SysMonitor.collector = collector
}
SysMonitor.collect()
SysMonitor.ticker = time.NewTicker(SysMonitor.collectDuration)
pool.GoroutinePool.Submit(func() {
for range SysMonitor.ticker.C {
SysMonitor.collect()
}
})
}

View File

@ -0,0 +1,55 @@
package process
import (
"context"
"fmt"
"github.com/taosdata/taoskeeper/db"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/infrastructure/log"
"github.com/taosdata/taoskeeper/util"
)
var builderLogger = log.GetLogger("BLD")
func ExpandMetricsFromConfig(ctx context.Context, conn *db.Connector, cfg *config.MetricsConfig) (tables map[string]struct{}, err error) {
tables = make(map[string]struct{})
for _, name := range cfg.Tables {
builderLogger.Debug("normal table: ", name)
_, exist := tables[name]
if exist {
builderLogger.Debug(name, "is exist in config")
continue
}
tables[name] = struct{}{}
}
sql := fmt.Sprintf(GetStableNameListSql(), cfg.Database.Name)
data, err := conn.Query(ctx, sql, util.GetQidOwn())
if err != nil {
return nil, err
}
builderLogger.Debugf("show stables:%s", sql)
for _, info := range data.Data {
name := info[0].(string)
builderLogger.Debug("stable:", info)
_, exist := tables[name]
if exist {
builderLogger.Debug(name, "is exist in config")
continue
}
tables[name] = struct{}{}
}
return
}
func GetStableNameListSql() string {
return "select stable_name from information_schema.ins_stables " +
" where db_name = '%s' " +
" and (stable_name not like 'taosx\\_%%')" +
" and (stable_name not like 'taosadapter%%')" +
" and (stable_name != 'temp_dir' and stable_name != 'data_dir')"
}

View File

@ -0,0 +1,8 @@
package process
import (
"testing"
)
func TestEmpty(t *testing.T) {
}

View File

@ -0,0 +1,666 @@
package process
import (
"context"
"errors"
"fmt"
"math"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
taosError "github.com/taosdata/driver-go/v3/errors"
"github.com/taosdata/taoskeeper/db"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/infrastructure/log"
"github.com/taosdata/taoskeeper/util"
"github.com/taosdata/taoskeeper/util/pool"
)
var logger = log.GetLogger("HND")
var metricNameMap = map[string]string{
"taosd_cluster_basic_first_ep": "cluster_info_first_ep",
"taosd_cluster_basic_first_ep_dnode_id": "cluster_info_first_ep_dnode_id",
"taosd_cluster_basic_cluster_version": "cluster_info_version",
"taosd_cluster_info_cluster_uptime": "cluster_info_master_uptime",
"taosd_cluster_info_dbs_total": "cluster_info_dbs_total",
"taosd_cluster_info_tbs_total": "cluster_info_tbs_total",
"taosd_cluster_info_stbs_total": "cluster_info_stbs_total",
"taosd_cluster_info_dnodes_total": "cluster_info_dnodes_total",
"taosd_cluster_info_dnodes_alive": "cluster_info_dnodes_alive",
"taosd_cluster_info_mnodes_total": "cluster_info_mnodes_total",
"taosd_cluster_info_mnodes_alive": "cluster_info_mnodes_alive",
"taosd_cluster_info_vgroups_total": "cluster_info_vgroups_total",
"taosd_cluster_info_vgroups_alive": "cluster_info_vgroups_alive",
"taosd_cluster_info_vnodes_total": "cluster_info_vnodes_total",
"taosd_cluster_info_vnodes_alive": "cluster_info_vnodes_alive",
"taosd_cluster_info_connections_total": "cluster_info_connections_total",
"taosd_cluster_info_topics_total": "cluster_info_topics_total",
"taosd_cluster_info_streams_total": "cluster_info_streams_total",
"taosd_cluster_info_grants_expire_time": "grants_info_expire_time",
"taosd_cluster_info_grants_timeseries_used": "grants_info_timeseries_used",
"taosd_cluster_info_grants_timeseries_total": "grants_info_timeseries_total",
"taosd_dnodes_info_uptime": "dnodes_info_uptime",
"taosd_dnodes_info_cpu_engine": "dnodes_info_cpu_engine",
"taosd_dnodes_info_cpu_system": "dnodes_info_cpu_system",
"taosd_dnodes_info_cpu_cores": "dnodes_info_cpu_cores",
"taosd_dnodes_info_mem_engine": "dnodes_info_mem_engine",
"taosd_dnodes_info_mem_free": "dnodes_info_mem_system",
"taosd_dnodes_info_mem_total": "dnodes_info_mem_total",
"taosd_dnodes_info_disk_engine": "dnodes_info_disk_engine",
"taosd_dnodes_info_disk_used": "dnodes_info_disk_used",
"taosd_dnodes_info_disk_total": "dnodes_info_disk_total",
"taosd_dnodes_info_system_net_in": "dnodes_info_net_in",
"taosd_dnodes_info_system_net_out": "dnodes_info_net_out",
"taosd_dnodes_info_io_read": "dnodes_info_io_read",
"taosd_dnodes_info_io_write": "dnodes_info_io_write",
"taosd_dnodes_info_io_read_disk": "dnodes_info_io_read_disk",
"taosd_dnodes_info_io_write_disk": "dnodes_info_io_write_disk",
"taosd_dnodes_info_vnodes_num": "dnodes_info_vnodes_num",
"taosd_dnodes_info_masters": "dnodes_info_masters",
"taosd_dnodes_info_has_mnode": "dnodes_info_has_mnode",
"taosd_dnodes_info_has_qnode": "dnodes_info_has_qnode",
"taosd_dnodes_info_has_snode": "dnodes_info_has_snode",
"taosd_dnodes_info_has_bnode": "dnodes_info_has_bnode",
"taosd_dnodes_info_errors": "dnodes_info_errors",
"taosd_dnodes_info_error_log_count": "log_summary_error",
"taosd_dnodes_info_info_log_count": "log_summary_info",
"taosd_dnodes_info_debug_log_count": "log_summary_debug",
"taosd_dnodes_info_trace_log_count": "log_summary_trace",
"taosd_dnodes_status_status": "d_info_status",
"taosd_mnodes_info_role": "m_info_role",
}
var metricTypeMap = map[string]CollectType{
"taosd_cluster_basic_first_ep": Info,
"taosd_cluster_basic_first_ep_dnode_id": Counter,
"taosd_cluster_basic_cluster_version": Info,
"taosd_cluster_info_cluster_uptime": Gauge,
"taosd_cluster_info_dbs_total": Counter,
"taosd_cluster_info_tbs_total": Counter,
"taosd_cluster_info_stbs_total": Counter,
"taosd_cluster_info_dnodes_total": Counter,
"taosd_cluster_info_dnodes_alive": Counter,
"taosd_cluster_info_mnodes_total": Counter,
"taosd_cluster_info_mnodes_alive": Counter,
"taosd_cluster_info_vgroups_total": Counter,
"taosd_cluster_info_vgroups_alive": Counter,
"taosd_cluster_info_vnodes_total": Counter,
"taosd_cluster_info_vnodes_alive": Counter,
"taosd_cluster_info_connections_total": Counter,
"taosd_cluster_info_topics_total": Counter,
"taosd_cluster_info_streams_total": Counter,
"taosd_cluster_info_grants_expire_time": Counter,
"taosd_cluster_info_grants_timeseries_used": Counter,
"taosd_cluster_info_grants_timeseries_total": Counter,
"taosd_dnodes_info_uptime": Gauge,
"taosd_dnodes_info_cpu_engine": Gauge,
"taosd_dnodes_info_cpu_system": Gauge,
"taosd_dnodes_info_cpu_cores": Gauge,
"taosd_dnodes_info_mem_engine": Counter,
"taosd_dnodes_info_mem_free": Counter,
"taosd_dnodes_info_mem_total": Counter,
"taosd_dnodes_info_disk_engine": Counter,
"taosd_dnodes_info_disk_used": Counter,
"taosd_dnodes_info_disk_total": Counter,
"taosd_dnodes_info_system_net_in": Gauge,
"taosd_dnodes_info_system_net_out": Gauge,
"taosd_dnodes_info_io_read": Gauge,
"taosd_dnodes_info_io_write": Gauge,
"taosd_dnodes_info_io_read_disk": Gauge,
"taosd_dnodes_info_io_write_disk": Gauge,
"taosd_dnodes_info_vnodes_num": Counter,
"taosd_dnodes_info_masters": Counter,
"taosd_dnodes_info_has_mnode": Counter,
"taosd_dnodes_info_has_qnode": Counter,
"taosd_dnodes_info_has_snode": Counter,
"taosd_dnodes_info_has_bnode": Counter,
"taosd_dnodes_info_errors": Counter,
"taosd_dnodes_info_error_log_count": Counter,
"taosd_dnodes_info_info_log_count": Counter,
"taosd_dnodes_info_debug_log_count": Counter,
"taosd_dnodes_info_trace_log_count": Counter,
"taosd_dnodes_status_status": Info,
"taosd_mnodes_info_role": Info,
}
type CollectType string
const (
Counter CollectType = "counter"
Gauge CollectType = "gauge"
Info CollectType = "info"
Summary CollectType = "summary"
)
type Processor struct {
prefix string
db string
tableMap map[string]*Table //tableName:*Table{}
metricMap map[string]*Metric //Fqname:*Metric{}
tableList []string
ctx context.Context
rotationInterval time.Duration
exitChan chan struct{}
dbConn *db.Connector
summaryTable map[string]*Table
tables map[string]struct{}
}
func (p *Processor) Describe(descs chan<- *prometheus.Desc) {
for _, metric := range p.metricMap {
descs <- metric.Desc
}
}
func (p *Processor) Collect(metrics chan<- prometheus.Metric) {
for _, metric := range p.metricMap {
logger.Tracef("metric name:%v", metric.FQName)
switch metric.Type {
case Gauge:
gv := prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: metric.FQName,
Help: metric.Help,
ConstLabels: metric.ConstLabels,
}, metric.Variables)
for _, value := range metric.GetValue() {
if value.Value == nil {
continue
}
g := gv.With(value.Label)
g.Set(value.Value.(float64))
metrics <- g
}
case Counter:
cv := prometheus.NewCounterVec(prometheus.CounterOpts{
Name: metric.FQName,
Help: metric.Help,
ConstLabels: metric.ConstLabels,
}, metric.Variables)
for _, value := range metric.GetValue() {
if value.Value == nil {
continue
}
v := i2float(value.Value)
if v < 0 {
logger.Warningf("negative value for prometheus counter. label %v value %v",
value.Label, value.Value)
continue
}
c := cv.With(value.Label)
c.Add(v)
metrics <- c
}
case Info:
lbs := []string{"value"}
lbs = append(lbs, metric.Variables...)
gf := prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: metric.FQName,
Help: metric.Help,
ConstLabels: metric.ConstLabels,
}, lbs)
for _, value := range metric.GetValue() {
if value == nil {
continue
}
v := make(map[string]string, len(value.Label)+1)
v["value"] = value.Value.(string)
for k, l := range value.Label {
v[k] = l
}
g := gf.With(v)
g.Set(1)
metrics <- g
}
case Summary:
}
}
}
type Table struct {
tsName string
Variables []string
ColumnList []string
}
type Metric struct {
sync.RWMutex
FQName string
Help string
Type CollectType
ColType int
ConstLabels map[string]string
Variables []string
Desc *prometheus.Desc
LastValue []*Value
}
func (m *Metric) SetValue(v []*Value) {
m.Lock()
defer m.Unlock()
m.LastValue = v
}
func (m *Metric) GetValue() []*Value {
m.RLock()
defer m.RUnlock()
return m.LastValue
}
type Value struct {
Label map[string]string
Value interface{}
}
func NewProcessor(conf *config.Config) *Processor {
conn, err := db.NewConnector(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.TDengine.Usessl)
if err != nil {
panic(err)
}
interval, err := time.ParseDuration(conf.RotationInterval)
if err != nil {
panic(err)
}
ctx := context.Background()
tables, err := ExpandMetricsFromConfig(ctx, conn, &conf.Metrics)
if err != nil {
panic(err)
}
p := &Processor{
prefix: conf.Metrics.Prefix,
db: conf.Metrics.Database.Name,
tableMap: map[string]*Table{},
metricMap: map[string]*Metric{},
ctx: ctx,
rotationInterval: interval,
exitChan: make(chan struct{}),
dbConn: conn,
summaryTable: map[string]*Table{"taosadapter_restful_http_request_summary_milliseconds": nil},
tables: tables,
}
p.Prepare()
p.Process()
return p
}
func (p *Processor) Prepare() {
locker := sync.RWMutex{}
wg := sync.WaitGroup{}
wg.Add(len(p.tables))
for tn := range p.tables {
tableName := tn
err := pool.GoroutinePool.Submit(func() {
defer wg.Done()
data, err := p.dbConn.Query(p.ctx, fmt.Sprintf("describe %s", p.withDBName(tableName)), util.GetQidOwn())
if err != nil {
var tdEngineError *taosError.TaosError
if errors.As(err, &tdEngineError) {
logger.Errorf("table %s not exist, skip it, error:%s", tableName, err)
} else {
logger.Errorf("could not get table %s metadata, skip it, error:%s", tableName, err)
}
return
}
tags := make([]string, 0, len(data.Data))
columns := make([]string, 0, len(data.Data))
typeList := make([]string, 0, len(data.Data))
columnMap := make(map[string]struct{}, len(data.Data))
variablesMap := make(map[string]struct{}, len(data.Data))
for _, info := range data.Data {
if info[3].(string) != "" {
variable := info[0].(string)
tags = append(tags, variable)
variablesMap[variable] = struct{}{}
} else {
column := info[0].(string)
columns = append(columns, column)
typeList = append(typeList, info[1].(string))
columnMap[column] = struct{}{}
}
}
// metrics := make([]*Metric, 0, len(columns))
// newMetrics := make(map[string]*Metric, len(columns))
columnList := make([]string, 0, len(columns))
timestampColumn := "ts"
_, exist := p.summaryTable[tableName]
for i, column := range columns {
if _, columnExist := variablesMap[column]; columnExist {
continue
}
if typeList[i] == "TIMESTAMP" {
timestampColumn = column
continue
}
columnName, metricType := "", Summary
if !exist {
columnName = column
if _, ok := metricTypeMap[tableName+"_"+columnName]; ok {
metricType = metricTypeMap[tableName+"_"+columnName]
} else {
metricType = exchangeDBType(typeList[i])
}
// 为了兼容性,硬编码,后续要优化
if strings.HasSuffix(columnName, "role") {
metricType = Info
}
}
labels := make(map[string]string)
fqName := p.buildFQName(tableName, columnName)
pDesc := prometheus.NewDesc(fqName, "", nil, labels)
metric := &Metric{
Type: metricType,
Desc: pDesc,
FQName: fqName,
Help: "",
ConstLabels: labels,
Variables: tags,
}
// metrics = append(metrics, metric)
// newMetrics[column] = metric
locker.Lock()
p.metricMap[fqName] = metric
locker.Unlock()
columnList = append(columnList, column)
}
t := &Table{
tsName: timestampColumn,
Variables: tags,
ColumnList: columnList,
}
locker.Lock()
p.tableMap[tableName] = t
p.tableList = append(p.tableList, tableName)
locker.Unlock()
})
if err != nil {
panic(err)
}
}
wg.Wait()
}
func (p *Processor) withDBName(tableName string) string {
b := pool.BytesPoolGet()
b.WriteString(p.db)
b.WriteByte('.')
b.WriteString(tableName)
return b.String()
}
func (p *Processor) Process() {
// 首先清空所有指标值
for _, metric := range p.metricMap {
metric.SetValue(nil)
}
for _, tableName := range p.tableList {
tagIndex := 0
hasTag := false
b := pool.BytesPoolGet()
b.WriteString("select ")
table := p.tableMap[tableName]
columns := table.ColumnList
for i, column := range columns {
b.WriteString("last_row(`" + column + "`) as `" + column + "`")
if i != len(columns)-1 {
b.WriteByte(',')
}
}
if len(table.Variables) > 0 {
tagIndex = len(columns)
for _, tag := range table.Variables {
b.WriteString(", last_row(`" + tag + "`) as `" + tag + "`")
}
}
b.WriteString(" from ")
b.WriteString(p.withDBName(tableName))
b.WriteString(" WHERE " + p.tableMap[tableName].tsName + " > (NOW() - 1m) ")
if len(table.Variables) > 0 {
tagIndex = len(columns)
b.WriteString(" group by ")
for i, tag := range table.Variables {
b.WriteString("`" + tag + "`")
if i != len(table.Variables)-1 {
b.WriteByte(',')
}
}
}
sql := b.String()
pool.BytesPoolPut(b)
data, err := p.dbConn.Query(p.ctx, sql, util.GetQidOwn())
logger.Debug(sql)
if err != nil {
logger.WithError(err).Errorln("select data sql:", sql)
continue
}
if tagIndex > 0 {
hasTag = true
}
if len(data.Data) == 0 {
continue
}
values := make([][]*Value, len(table.ColumnList))
for _, row := range data.Data {
label := map[string]string{}
valuesMap := make(map[string]interface{})
colEndIndex := len(columns)
if hasTag {
for i := tagIndex; i < len(data.Head); i++ {
if row[i] != nil {
label[data.Head[i]] = fmt.Sprintf("%v", row[i])
}
}
}
// values array to map
for i := 0; i < colEndIndex; i++ {
valuesMap[columns[i]] = row[i]
}
for i, column := range table.ColumnList {
var v interface{}
metric := p.metricMap[p.buildFQName(tableName, column)]
switch metric.Type {
case Info:
_, isFloat := valuesMap[column].(float64)
if strings.HasSuffix(column, "role") && valuesMap[column] != nil && isFloat {
v = getRoleStr(valuesMap[column].(float64))
break
}
if strings.HasSuffix(column, "status") && valuesMap[column] != nil && isFloat {
v = getStatusStr(valuesMap[column].(float64))
break
}
if valuesMap[column] != nil {
v = i2string(valuesMap[column])
} else {
v = nil
}
case Counter, Gauge, Summary:
if valuesMap[column] != nil {
v = i2float(valuesMap[column])
if column == "cluster_uptime" {
v = i2float(valuesMap[column]) / 86400
}
} else {
v = nil
}
}
values[i] = append(values[i], &Value{
Label: label,
Value: v,
})
}
}
for i, column := range table.ColumnList {
metric := p.metricMap[p.buildFQName(tableName, column)]
for _, value := range values[i] {
logger.Tracef("set metric:%s, Label:%v, Value:%v", column, value.Label, value.Value)
}
if metric.GetValue() != nil {
values[i] = append(values[i], metric.GetValue()...)
}
metric.SetValue(values[i])
}
}
}
func (p *Processor) buildFQName(tableName string, column string) string {
// keep same metric name
tempFQName := tableName + "_" + column
if _, ok := metricNameMap[tempFQName]; ok {
return p.prefix + "_" + metricNameMap[tempFQName]
}
b := pool.BytesPoolGet()
b.WriteString(p.prefix)
b.WriteByte('_')
b.WriteString(tableName)
if column != "" {
b.WriteByte('_')
b.WriteString(column)
}
fqName := b.String()
pool.BytesPoolPut(b)
return fqName
}
func (p *Processor) GetMetric() map[string]*Metric {
return p.metricMap
}
func (p *Processor) Close() error {
close(p.exitChan)
return p.dbConn.Close()
}
func getRoleStr(v float64) string {
rounded := math.Round(v)
integer := int(rounded)
switch integer {
case 0:
return "offline"
case 100:
return "follower"
case 101:
return "candidate"
case 102:
return "leader"
case 103:
return "error"
case 104:
return "learner"
}
return "unknown"
}
func getStatusStr(v float64) string {
rounded := math.Round(v)
integer := int(rounded)
switch integer {
case 0:
return "offline"
case 1:
return "ready"
}
return "unknown"
}
func exchangeDBType(t string) CollectType {
switch t {
case "BOOL", "FLOAT", "DOUBLE":
return Gauge
case "TINYINT", "SMALLINT", "INT", "BIGINT", "TINYINT UNSIGNED", "SMALLINT UNSIGNED", "INT UNSIGNED", "BIGINT UNSIGNED":
return Counter
case "BINARY", "NCHAR", "VARCHAR":
return Info
default:
panic("unsupported type")
}
}
func i2string(value interface{}) string {
switch v := value.(type) {
case string:
return v
case []byte:
return string(v)
default:
panic("unexpected type to string")
}
}
func i2float(value interface{}) float64 {
switch v := value.(type) {
case int8:
return float64(v)
case int16:
return float64(v)
case int32:
return float64(v)
case int64:
return float64(v)
case uint8:
return float64(v)
case uint16:
return float64(v)
case uint32:
return float64(v)
case uint64:
return float64(v)
case float64:
return v
case float32:
return float64(v)
case bool:
if v {
return 1
}
return 0
default:
panic("unexpected type to float64")
}
}

View File

@ -0,0 +1,13 @@
global:
scrape_interval: 5s
scrape_configs:
- job_name: "prometheus"
static_configs:
- targets: ["localhost:9090"]
- job_name: "taoskeeper"
static_configs:
- targets: ["taoskeeper:6043"]
- job_name: "node"
static_configs:
- targets: ["nodeexporter:9100"]

View File

@ -0,0 +1,8 @@
package system
import (
"testing"
)
func TestEmpty(t *testing.T) {
}

View File

@ -0,0 +1,146 @@
package system
import (
"context"
"fmt"
"net/http"
"os"
"strconv"
"time"
"github.com/kardianos/service"
"github.com/taosdata/go-utils/web"
"github.com/taosdata/taoskeeper/api"
"github.com/taosdata/taoskeeper/cmd"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/infrastructure/log"
"github.com/taosdata/taoskeeper/monitor"
"github.com/taosdata/taoskeeper/process"
"github.com/taosdata/taoskeeper/version"
)
var logger = log.GetLogger("PRG")
func Init() *http.Server {
conf := config.InitConfig()
log.ConfigLog()
if len(conf.Transfer) > 0 || len(conf.Drop) > 0 {
cmd := cmd.NewCommand(conf)
cmd.Process(conf)
os.Exit(0)
return nil
}
router := web.CreateRouter(false, &conf.Cors, false)
router.Use(log.GinLog())
router.Use(log.GinRecoverLog())
reporter := api.NewReporter(conf)
reporter.Init(router)
monitor.StartMonitor(conf.Metrics.Cluster, conf, reporter)
go func() {
// wait for monitor to all metric received
time.Sleep(time.Second * 35)
processor := process.NewProcessor(conf)
node := api.NewNodeExporter(processor)
node.Init(router)
if version.IsEnterprise == "true" {
zabbix := api.NewZabbix(processor)
zabbix.Init(router)
}
}()
checkHealth := api.NewCheckHealth(version.Version)
checkHealth.Init(router)
if version.IsEnterprise == "true" {
if conf.Audit.Enable {
audit, err := api.NewAudit(conf)
if err != nil {
panic(err)
}
if err = audit.Init(router); err != nil {
panic(err)
}
}
}
adapter := api.NewAdapter(conf)
if err := adapter.Init(router); err != nil {
panic(err)
}
gen_metric := api.NewGeneralMetric(conf)
if err := gen_metric.Init(router); err != nil {
panic(err)
}
server := &http.Server{
Addr: ":" + strconv.Itoa(conf.Port),
Handler: router,
}
return server
}
func Start(server *http.Server) {
prg := newProgram(server)
svcConfig := &service.Config{
Name: "taoskeeper",
DisplayName: "taoskeeper",
Description: "taosKeeper is a tool for TDengine that exports monitoring metrics",
}
s, err := service.New(prg, svcConfig)
if err != nil {
logger.Fatal(err)
}
err = s.Run()
if err != nil {
logger.Fatal(err)
}
}
type program struct {
server *http.Server
}
func newProgram(server *http.Server) *program {
return &program{server: server}
}
func (p *program) Start(s service.Service) error {
if service.Interactive() {
logger.Info("Running in terminal.")
} else {
logger.Info("Running under service manager.")
}
server := p.server
go func() {
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
panic(fmt.Errorf("taoskeeper start up fail! %v", err))
}
}()
return nil
}
func (p *program) Stop(s service.Service) error {
logger.Println("Shutdown WebServer ...")
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := p.server.Shutdown(ctx); err != nil {
logger.Println("WebServer Shutdown error:", err)
}
logger.Println("Server exiting")
ctxLog, cancelLog := context.WithTimeout(context.Background(), 5*time.Second)
defer cancelLog()
logger.Println("Flushing Log")
log.Close(ctxLog)
return nil
}

View File

@ -0,0 +1,22 @@
package system
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/taosdata/taoskeeper/db"
"github.com/taosdata/taoskeeper/infrastructure/config"
"github.com/taosdata/taoskeeper/util"
)
func TestStart(t *testing.T) {
server := Init()
assert.NotNil(t, server)
conn, err := db.NewConnectorWithDb(config.Conf.TDengine.Username, config.Conf.TDengine.Password, config.Conf.TDengine.Host, config.Conf.TDengine.Port, config.Conf.Metrics.Database.Name, config.Conf.TDengine.Usessl)
assert.NoError(t, err)
conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", config.Conf.Metrics.Database.Name), util.GetQidOwn())
conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", config.Conf.Audit.Database.Name), util.GetQidOwn())
}

View File

@ -0,0 +1,19 @@
[Unit]
Description=TaosKeeper - TDengine Metrics Exporter for Kinds of Collectors
Documentation=https://www.taosdata.com
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/bin/taoskeeper
TimeoutSec=0
RestartSec=2
StandardOutput=null
StandardError=journal
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,6 @@
[[inputs.prometheus]]
# An array of urls to scrape metrics from.
urls = ["${TAOSKEEPER}"]
[[outputs.file]]
files = ["stdout"]

View File

@ -0,0 +1,9 @@
version: "3.6"
services:
telegraf:
image: telegraf:1.20-alpine
hostname: telegraf
volumes:
- ./telegraf.conf:/etc/telegraf/telegraf.conf:ro
environment:
TAOSKEEPER: http://taoskeeper:6043/metrics

View File

@ -0,0 +1,8 @@
package util
import (
"testing"
)
func TestEmpty(t *testing.T) {
}

View File

@ -0,0 +1,15 @@
package pool
import (
"github.com/panjf2000/ants/v2"
)
var GoroutinePool *ants.Pool
func Init(size int) {
var err error
GoroutinePool, err = ants.NewPool(size)
if err != nil {
panic(err)
}
}

View File

@ -0,0 +1,23 @@
package pool
import (
"bytes"
"sync"
)
var bytesBufferPool sync.Pool
func init() {
bytesBufferPool.New = func() interface{} {
return &bytes.Buffer{}
}
}
func BytesPoolGet() *bytes.Buffer {
return bytesBufferPool.Get().(*bytes.Buffer)
}
func BytesPoolPut(b *bytes.Buffer) {
b.Reset()
bytesBufferPool.Put(b)
}

View File

@ -0,0 +1,8 @@
package pool
import (
"testing"
)
func TestEmpty(t *testing.T) {
}

154
tools/keeper/util/util.go Normal file
View File

@ -0,0 +1,154 @@
package util
import (
"crypto/md5"
"encoding/hex"
"os"
"strconv"
"strings"
"sync/atomic"
"time"
"unicode"
"github.com/taosdata/taoskeeper/infrastructure/config"
)
// https://github.com/containerd/cgroups/blob/main/utils.go
var globalCounter64 uint64
var globalCounter32 uint32
var MAX_TABLE_NAME_LEN = 190
func init() {
atomic.StoreUint64(&globalCounter64, 0)
atomic.StoreUint32(&globalCounter32, 0)
}
func ReadUint(path string) (uint64, error) {
v, err := os.ReadFile(path)
if err != nil {
return 0, err
}
return ParseUint(strings.TrimSpace(string(v)), 10, 64)
}
func ParseUint(s string, base, bitSize int) (uint64, error) {
v, err := strconv.ParseUint(s, base, bitSize)
if err != nil {
intValue, intErr := strconv.ParseInt(s, base, bitSize)
// 1. Handle negative values greater than MinInt64 (and)
// 2. Handle negative values lesser than MinInt64
if intErr == nil && intValue < 0 {
return 0, nil
} else if intErr != nil &&
intErr.(*strconv.NumError).Err == strconv.ErrRange &&
intValue < 0 {
return 0, nil
}
return 0, err
}
return v, nil
}
func EscapeInfluxProtocol(s string) string {
s = strings.TrimSuffix(s, "\\")
s = strings.ReplaceAll(s, ",", "\\,")
s = strings.ReplaceAll(s, "=", "\\=")
s = strings.ReplaceAll(s, " ", "\\ ")
s = strings.ReplaceAll(s, "\"", "\\\"")
return s
}
func GetCfg() *config.Config {
c := &config.Config{
InstanceID: 64,
Port: 6043,
LogLevel: "trace",
TDengine: config.TDengineRestful{
Host: "127.0.0.1",
Port: 6041,
Username: "root",
Password: "taosdata",
Usessl: false,
},
Metrics: config.MetricsConfig{
Database: config.Database{
Name: "keeper_test_log",
Options: map[string]interface{}{},
},
},
Log: config.Log{
Level: "trace",
Path: "/var/log/taos",
RotationCount: 10,
RotationTime: 24 * time.Hour,
RotationSize: 1073741824,
Compress: true,
ReservedDiskSize: 1073741824,
},
}
return c
}
func SafeSubstring(s string, n int) string {
if len(s) > n {
return s[:n]
}
return s
}
func GetQid(qidStr string) uint64 {
if qidStr == "" || !strings.HasPrefix(qidStr, "0x") {
qid32 := atomic.AddUint32(&globalCounter32, 1)
qid64 := uint64(qid32) << 8
return qid64
}
qid, err := strconv.ParseUint(qidStr[2:], 16, 64)
if err != nil {
qid32 := atomic.AddUint32(&globalCounter32, 1)
qid64 := uint64(qid32) << 8
return qid64
}
// clear the last byte
qid = qid &^ 0xFF
return qid
}
func GetQidOwn() uint64 {
id := atomic.AddUint64(&globalCounter64, 1)
if id > 0x00ffffffffffffff {
atomic.StoreUint64(&globalCounter64, 1)
id = 1
}
qid64 := uint64(config.Conf.InstanceID)<<56 | id
return qid64
}
func GetMd5HexStr(str string) string {
sum := md5.Sum([]byte(str))
return hex.EncodeToString(sum[:])
}
func isValidChar(r rune) bool {
return unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_'
}
func ToValidTableName(input string) string {
var builder strings.Builder
for _, r := range input {
if isValidChar(r) {
builder.WriteRune(unicode.ToLower(r))
} else {
builder.WriteRune('_')
}
}
result := builder.String()
return result
}

View File

@ -0,0 +1,11 @@
package version
var Version = "0.0.0.0"
var Gitinfo = "unknown"
var BuildInfo = "1970-01-01 00:00:00 +08:00"
var CommitID = "unknown"
var CUS_NAME = "TDengine"
var CUS_PROMPT = "taos"
var IsEnterprise = "false"

View File

@ -0,0 +1,111 @@
<?xml version="1.0" encoding="UTF-8"?>
<zabbix_export>
<version>5.0</version>
<date>2021-12-06T05:55:45Z</date>
<groups>
<group>
<name>taos</name>
</group>
</groups>
<templates>
<template>
<template>TDengine</template>
<name>TDengine</name>
<description>TDengine Monitoring</description>
<groups>
<group>
<name>taos</name>
</group>
</groups>
<items>
<item>
<name>float</name>
<type>HTTP_AGENT</type>
<key>TDengine.float</key>
<delay>{$COLLECTION_INTERVAL}</delay>
<trends>0</trends>
<value_type>TEXT</value_type>
<url>{$TAOSKEEPER_HOST}/float</url>
</item>
<item>
<name>string</name>
<type>HTTP_AGENT</type>
<key>TDengine.string</key>
<delay>{$COLLECTION_INTERVAL}</delay>
<trends>0</trends>
<value_type>TEXT</value_type>
<url>{$TAOSKEEPER_HOST}/string</url>
</item>
</items>
<discovery_rules>
<discovery_rule>
<name>taos_float_metric</name>
<type>DEPENDENT</type>
<key>taos.discovery_float_metric</key>
<delay>0</delay>
<item_prototypes>
<item_prototype>
<name>taos.float.[{#METRIC}]</name>
<type>DEPENDENT</type>
<key>taos.float.[{#METRIC}]</key>
<delay>0</delay>
<value_type>FLOAT</value_type>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.data[?(@.key == '{#METRIC}')].value.first()</params>
</step>
</preprocessing>
<master_item>
<key>TDengine.float</key>
</master_item>
</item_prototype>
</item_prototypes>
<master_item>
<key>TDengine.float</key>
</master_item>
</discovery_rule>
<discovery_rule>
<name>taos_string_metric</name>
<type>DEPENDENT</type>
<key>taos.discovery_string_metric</key>
<delay>0</delay>
<item_prototypes>
<item_prototype>
<name>taos.string.[{#METRIC}]</name>
<type>DEPENDENT</type>
<key>taos.string.[{#METRIC}]</key>
<delay>0</delay>
<trends>0</trends>
<value_type>TEXT</value_type>
<preprocessing>
<step>
<type>JSONPATH</type>
<params>$.data[?(@.key == '{#METRIC}')].value.first()</params>
</step>
</preprocessing>
<master_item>
<key>TDengine.string</key>
</master_item>
</item_prototype>
</item_prototypes>
<master_item>
<key>TDengine.string</key>
</master_item>
</discovery_rule>
</discovery_rules>
<macros>
<macro>
<macro>{$COLLECTION_INTERVAL}</macro>
<value>30s</value>
<description>indicator collection interval</description>
</macro>
<macro>
<macro>{$TAOSKEEPER_HOST}</macro>
<value>http://taoskeeper:6043/zabbix</value>
<description>taoskeepeer host for zabbix</description>
</macro>
</macros>
</template>
</templates>
</zabbix_export>