Merge pull request 'Agent v2' (#203) from v2 into master
Reviewed-on: https://git.infini.ltd/infini/console/pulls/203
This commit is contained in:
commit
d2a448b53c
|
@ -52,7 +52,7 @@ func checkElasticsearchRequirements() error{
|
||||||
if targetEsConfig == nil {
|
if targetEsConfig == nil {
|
||||||
return fmt.Errorf("cluster config %s was not found", esID)
|
return fmt.Errorf("cluster config %s was not found", esID)
|
||||||
}
|
}
|
||||||
var req = util.NewGetRequest(targetEsConfig.Endpoint, nil)
|
var req = util.NewGetRequest(targetEsConfig.GetAnyEndpoint(), nil)
|
||||||
if targetEsConfig.BasicAuth != nil {
|
if targetEsConfig.BasicAuth != nil {
|
||||||
req.SetBasicAuth(targetEsConfig.BasicAuth.Username, targetEsConfig.BasicAuth.Password)
|
req.SetBasicAuth(targetEsConfig.BasicAuth.Username, targetEsConfig.BasicAuth.Password)
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,34 @@
|
||||||
|
|
||||||
|
#TODO, need to replace cleartext password to keystore, and ingest endpoint
|
||||||
|
POST .infini_configs/_doc/system_ingest_config_yml
|
||||||
|
{
|
||||||
|
"id": "system_ingest_config_yml",
|
||||||
|
"updated": "2023-10-18T14:49:56.768754+08:00",
|
||||||
|
"metadata": {
|
||||||
|
"category": "app_settings",
|
||||||
|
"name": "agent",
|
||||||
|
"labels": {
|
||||||
|
"instance": "_all"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"payload": {
|
||||||
|
"name": "system_ingest_config.yml",
|
||||||
|
"location": "system_ingest_config.yml",
|
||||||
|
"content": """
|
||||||
|
|
||||||
|
configs.template:
|
||||||
|
- name: "default_ingest_config"
|
||||||
|
path: ./config/ingest_config.tpl
|
||||||
|
variable:
|
||||||
|
INGEST_CLUSTER_ID: infini_default_ingest_cluster
|
||||||
|
INGEST_CLUSTER_ENDPOINT: [ "http://192.168.3.185:8000" ]
|
||||||
|
INGEST_CLUSTER_USERNAME: "ingest"
|
||||||
|
INGEST_CLUSTER_PASSWORD: "password"
|
||||||
|
CLUSTER_VER: "1.6.0"
|
||||||
|
CLUSTER_DISTRIBUTION: "easysearch"
|
||||||
|
INDEX_PREFIX: ".infini_"
|
||||||
|
|
||||||
|
""",
|
||||||
|
"version": 3
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,200 @@
|
||||||
|
#agent
|
||||||
|
|
||||||
|
POST .infini_configs/_doc/task_config_tpl
|
||||||
|
{
|
||||||
|
"id": "task_config_tpl",
|
||||||
|
"updated": "2023-10-19T14:49:56.768754+08:00",
|
||||||
|
"metadata": {
|
||||||
|
"category": "app_settings",
|
||||||
|
"name": "agent",
|
||||||
|
"labels": {
|
||||||
|
"instance": "_all"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"payload": {
|
||||||
|
"name": "task_config.tpl",
|
||||||
|
"location": "task_config.tpl",
|
||||||
|
"content": """
|
||||||
|
|
||||||
|
env:
|
||||||
|
CLUSTER_PASSWORD: $[[keystore.$[[CLUSTER_ID]]_password]]
|
||||||
|
|
||||||
|
elasticsearch:
|
||||||
|
- id: $[[TASK_ID]]
|
||||||
|
name: $[[TASK_ID]]
|
||||||
|
cluster_uuid: $[[CLUSTER_UUID]]
|
||||||
|
enabled: true
|
||||||
|
endpoints: $[[CLUSTER_ENDPOINT]]
|
||||||
|
discovery:
|
||||||
|
enabled: false
|
||||||
|
basic_auth:
|
||||||
|
username: $[[CLUSTER_USERNAME]]
|
||||||
|
password: $[[CLUSTER_PASSWORD]]
|
||||||
|
traffic_control:
|
||||||
|
enabled: true
|
||||||
|
max_qps_per_node: 100
|
||||||
|
max_bytes_per_node: 10485760
|
||||||
|
max_connection_per_node: 5
|
||||||
|
|
||||||
|
pipeline:
|
||||||
|
|
||||||
|
#node level metrics
|
||||||
|
- auto_start: $[[NODE_LEVEL_TASKS_ENABLED]]
|
||||||
|
enabled: $[[NODE_LEVEL_TASKS_ENABLED]]
|
||||||
|
keep_running: true
|
||||||
|
name: collect_$[[TASK_ID]]_es_node_stats
|
||||||
|
retry_delay_in_ms: 10000
|
||||||
|
processor:
|
||||||
|
- es_node_stats:
|
||||||
|
elasticsearch: $[[TASK_ID]]
|
||||||
|
labels:
|
||||||
|
cluster_id: $[[CLUSTER_ID]]
|
||||||
|
when:
|
||||||
|
cluster_available: ["$[[TASK_ID]]"]
|
||||||
|
|
||||||
|
#node logs
|
||||||
|
- auto_start: $[[NODE_LEVEL_TASKS_ENABLED]]
|
||||||
|
enabled: $[[NODE_LEVEL_TASKS_ENABLED]]
|
||||||
|
keep_running: true
|
||||||
|
name: collect_$[[TASK_ID]]_es_logs
|
||||||
|
retry_delay_in_ms: 10000
|
||||||
|
processor:
|
||||||
|
- es_logs_processor:
|
||||||
|
elasticsearch: $[[TASK_ID]]
|
||||||
|
labels:
|
||||||
|
cluster_id: $[[CLUSTER_ID]]
|
||||||
|
logs_path: $[[NODE_LOGS_PATH]]
|
||||||
|
queue_name: logs
|
||||||
|
when:
|
||||||
|
cluster_available: ["$[[TASK_ID]]"]
|
||||||
|
""",
|
||||||
|
"version": 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#system ingest template
|
||||||
|
POST .infini_configs/_doc/ingest_config_tpl
|
||||||
|
{
|
||||||
|
"id": "ingest_config_tpl",
|
||||||
|
"updated": "2023-10-19T14:49:56.768754+08:00",
|
||||||
|
"metadata": {
|
||||||
|
"category": "app_settings",
|
||||||
|
"name": "agent",
|
||||||
|
"labels": {
|
||||||
|
"instance": "_all"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"payload": {
|
||||||
|
"name": "ingest_config.tpl",
|
||||||
|
"location": "ingest_config.tpl",
|
||||||
|
"content": """
|
||||||
|
|
||||||
|
elasticsearch:
|
||||||
|
- name: $[[INGEST_CLUSTER_ID]]
|
||||||
|
enabled: true
|
||||||
|
endpoints: $[[INGEST_CLUSTER_ENDPOINT]]
|
||||||
|
discovery:
|
||||||
|
enabled: false
|
||||||
|
basic_auth:
|
||||||
|
username: $[[INGEST_CLUSTER_USERNAME]]
|
||||||
|
password: $[[INGEST_CLUSTER_PASSWORD]]
|
||||||
|
traffic_control:
|
||||||
|
enabled: true
|
||||||
|
max_qps_per_node: 1000
|
||||||
|
max_bytes_per_node: 10485760
|
||||||
|
max_connection_per_node: 10
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
enabled: true
|
||||||
|
queue: metrics
|
||||||
|
network:
|
||||||
|
enabled: true
|
||||||
|
summary: true
|
||||||
|
sockets: true
|
||||||
|
#throughput: true
|
||||||
|
details: true
|
||||||
|
memory:
|
||||||
|
metrics:
|
||||||
|
- swap
|
||||||
|
- memory
|
||||||
|
disk:
|
||||||
|
metrics:
|
||||||
|
- iops
|
||||||
|
- usage
|
||||||
|
cpu:
|
||||||
|
metrics:
|
||||||
|
- idle
|
||||||
|
- system
|
||||||
|
- user
|
||||||
|
- iowait
|
||||||
|
- load
|
||||||
|
instance:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
elastic:
|
||||||
|
availability_check:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
pipeline:
|
||||||
|
- name: merge_logs
|
||||||
|
auto_start: true
|
||||||
|
keep_running: true
|
||||||
|
retry_delay_in_ms: 10000
|
||||||
|
processor:
|
||||||
|
- indexing_merge:
|
||||||
|
elasticsearch: "$[[INGEST_CLUSTER_ID]]"
|
||||||
|
index_name: ".infini_logs"
|
||||||
|
type_name: "_doc"
|
||||||
|
input_queue: "logs"
|
||||||
|
idle_timeout_in_seconds: 10
|
||||||
|
output_queue:
|
||||||
|
name: "merged_requests"
|
||||||
|
worker_size: 1
|
||||||
|
bulk_size_in_mb: 5
|
||||||
|
- name: merge_metrics
|
||||||
|
auto_start: true
|
||||||
|
keep_running: true
|
||||||
|
retry_delay_in_ms: 10000
|
||||||
|
processor:
|
||||||
|
- indexing_merge:
|
||||||
|
elasticsearch: "$[[INGEST_CLUSTER_ID]]"
|
||||||
|
index_name: ".infini_metrics"
|
||||||
|
type_name: "_doc"
|
||||||
|
input_queue: "metrics"
|
||||||
|
output_queue:
|
||||||
|
name: "merged_requests"
|
||||||
|
worker_size: 1
|
||||||
|
bulk_size_in_mb: 5
|
||||||
|
- name: ingest_merged_requests
|
||||||
|
enabled: true
|
||||||
|
auto_start: true
|
||||||
|
keep_running: true
|
||||||
|
retry_delay_in_ms: 10000
|
||||||
|
processor:
|
||||||
|
- bulk_indexing:
|
||||||
|
max_worker_size: 1
|
||||||
|
verbose_bulk_result: false
|
||||||
|
bulk:
|
||||||
|
batch_size_in_mb: 5
|
||||||
|
batch_size_in_docs: 5000
|
||||||
|
max_retry_times: 0
|
||||||
|
invalid_queue: ""
|
||||||
|
response_handle:
|
||||||
|
include_index_stats: false
|
||||||
|
include_action_stats: false
|
||||||
|
output_bulk_stats: false
|
||||||
|
include_error_details: true
|
||||||
|
save_error_results: true
|
||||||
|
save_success_results: false
|
||||||
|
save_busy_results: false
|
||||||
|
consumer:
|
||||||
|
fetch_max_messages: 5
|
||||||
|
queues:
|
||||||
|
type: indexing_merge
|
||||||
|
when:
|
||||||
|
cluster_available: ["$[[INGEST_CLUSTER_ID]]"]
|
||||||
|
""",
|
||||||
|
"version": 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,114 @@
|
||||||
|
POST .infini_configs/_doc/agent_relay_gateway_config_yml
|
||||||
|
{
|
||||||
|
"id": "agent_relay_gateway_config_yml",
|
||||||
|
"updated": "2023-10-19T14:49:56.768754+08:00",
|
||||||
|
"metadata": {
|
||||||
|
"category": "app_settings",
|
||||||
|
"name": "gateway",
|
||||||
|
"labels": {
|
||||||
|
"instance": "_all"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"payload": {
|
||||||
|
"name": "agent_relay_gateway_config.yml",
|
||||||
|
"location": "agent_relay_gateway_config.yml",
|
||||||
|
"content": """
|
||||||
|
env:
|
||||||
|
CLUSTER_ENDPOINTS: ["http://10.0.0.3:7102"]
|
||||||
|
CLUSTER_USERNAME: admin
|
||||||
|
CLUSTER_PASSWORD: admin
|
||||||
|
|
||||||
|
path.data: data
|
||||||
|
path.logs: log
|
||||||
|
|
||||||
|
entry:
|
||||||
|
- name: my_es_entry
|
||||||
|
enabled: true
|
||||||
|
router: my_router
|
||||||
|
max_concurrency: 200000
|
||||||
|
network:
|
||||||
|
binding: 0.0.0.0:8000
|
||||||
|
|
||||||
|
flow:
|
||||||
|
- name: async_bulk
|
||||||
|
filter:
|
||||||
|
- basic_auth:
|
||||||
|
valid_users:
|
||||||
|
ingest: password
|
||||||
|
- bulk_reshuffle:
|
||||||
|
when:
|
||||||
|
contains:
|
||||||
|
_ctx.request.path: /_bulk
|
||||||
|
elasticsearch: prod
|
||||||
|
level: cluster
|
||||||
|
partition_size: 3
|
||||||
|
fix_null_id: true
|
||||||
|
- elasticsearch:
|
||||||
|
elasticsearch: prod #elasticsearch configure reference name
|
||||||
|
max_connection_per_node: 1000 #max tcp connection to upstream, default for all nodes
|
||||||
|
max_response_size: -1 #default for all nodes
|
||||||
|
balancer: weight
|
||||||
|
refresh: # refresh upstream nodes list, need to enable this feature to use elasticsearch nodes auto discovery
|
||||||
|
enabled: true
|
||||||
|
interval: 60s
|
||||||
|
filter:
|
||||||
|
roles:
|
||||||
|
exclude:
|
||||||
|
- master
|
||||||
|
|
||||||
|
router:
|
||||||
|
- name: my_router
|
||||||
|
default_flow: async_bulk
|
||||||
|
|
||||||
|
elasticsearch:
|
||||||
|
- name: prod
|
||||||
|
enabled: true
|
||||||
|
endpoints: $[[env.CLUSTER_ENDPOINTS]]
|
||||||
|
discovery:
|
||||||
|
enabled: false
|
||||||
|
basic_auth:
|
||||||
|
username: $[[env.CLUSTER_USERNAME]]
|
||||||
|
password: $[[env.CLUSTER_PASSWORD]]
|
||||||
|
traffic_control:
|
||||||
|
enabled: true
|
||||||
|
max_qps_per_node: 100
|
||||||
|
max_bytes_per_node: 10485760
|
||||||
|
max_connection_per_node: 5
|
||||||
|
|
||||||
|
elastic:
|
||||||
|
enabled: true
|
||||||
|
remote_configs: false
|
||||||
|
elasticsearch: prod
|
||||||
|
metadata_refresh:
|
||||||
|
enabled: true
|
||||||
|
interval: 30s
|
||||||
|
discovery:
|
||||||
|
enabled: true
|
||||||
|
refresh:
|
||||||
|
enabled: true
|
||||||
|
interval: 30s
|
||||||
|
|
||||||
|
pipeline:
|
||||||
|
- name: bulk_request_ingest
|
||||||
|
auto_start: true
|
||||||
|
keep_running: true
|
||||||
|
retry_delay_in_ms: 1000
|
||||||
|
processor:
|
||||||
|
- bulk_indexing:
|
||||||
|
max_connection_per_node: 100
|
||||||
|
num_of_slices: 3
|
||||||
|
max_worker_size: 30
|
||||||
|
idle_timeout_in_seconds: 10
|
||||||
|
bulk:
|
||||||
|
compress: false
|
||||||
|
batch_size_in_mb: 10
|
||||||
|
batch_size_in_docs: 10000
|
||||||
|
consumer:
|
||||||
|
fetch_max_messages: 100
|
||||||
|
queue_selector:
|
||||||
|
labels:
|
||||||
|
type: bulk_reshuffle
|
||||||
|
""",
|
||||||
|
"version": 1
|
||||||
|
}
|
||||||
|
}
|
|
@ -3,22 +3,12 @@ package config
|
||||||
import "infini.sh/framework/core/config"
|
import "infini.sh/framework/core/config"
|
||||||
|
|
||||||
type AppConfig struct {
|
type AppConfig struct {
|
||||||
|
config.APIConfig
|
||||||
UI UIConfig `config:"ui"`
|
UI UIConfig `config:"ui"`
|
||||||
Network config.NetworkConfig `config:"network"`
|
|
||||||
TLSConfig config.TLSConfig `config:"tls"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type UIConfig struct {
|
type UIConfig struct {
|
||||||
Enabled bool `config:"enabled"`
|
|
||||||
LocalPath string `config:"path"`
|
LocalPath string `config:"path"`
|
||||||
LocalEnabled bool `config:"local"`
|
LocalEnabled bool `config:"local"`
|
||||||
VFSEnabled bool `config:"vfs"`
|
VFSEnabled bool `config:"vfs"`
|
||||||
APIEndpoint string `config:"api_endpoint"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (config *AppConfig) GetSchema() string {
|
|
||||||
if config.TLSConfig.TLSEnabled {
|
|
||||||
return "https"
|
|
||||||
}
|
|
||||||
return "http"
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -83,7 +83,9 @@ PUT _template/$[[INDEX_PREFIX]]metrics-rollover
|
||||||
},
|
},
|
||||||
"codec" : "best_compression",
|
"codec" : "best_compression",
|
||||||
"number_of_shards" : "1",
|
"number_of_shards" : "1",
|
||||||
"translog.durability":"async"
|
"translog.durability":"async",
|
||||||
|
"index.mapping.coerce": false,
|
||||||
|
"index.mapping.ignore_malformed": true
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"mappings" : {
|
"mappings" : {
|
||||||
|
@ -5952,8 +5954,4 @@ POST $[[INDEX_PREFIX]]layout/_doc/cicmhbt3q95ich72lrvg
|
||||||
},
|
},
|
||||||
"type": "workspace",
|
"type": "workspace",
|
||||||
"is_fixed": true
|
"is_fixed": true
|
||||||
}
|
}
|
||||||
|
|
||||||
GET /
|
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
set -eo pipefail
|
set -eo pipefail
|
||||||
|
|
||||||
function print_usage() {
|
function print_usage() {
|
||||||
echo "Usage: curl -sSL http://get.infini.sh/agent.html | sudo bash -s -- [-u url_for_download_program] [-v version_for_program ] [-t taget_install_dir] [-p prot_for_program]"
|
echo "Usage: curl -sSL http://get.infini.cloud/ | sudo bash -s -- [-u url_for_download_program] [-v version_for_program ] [-t target_install_dir] [-p port_for_program]"
|
||||||
echo "Options:"
|
echo "Options:"
|
||||||
echo " -u, --url <url> Download url of the program to install which default is http://localhost"
|
echo " -u, --url <url> Download url of the program to install which default is http://localhost"
|
||||||
echo " -v, --version <version> Version of the program to install which default is latest from "
|
echo " -v, --version <version> Version of the program to install which default is latest from "
|
||||||
|
@ -234,6 +234,32 @@ path.data: data
|
||||||
path.logs: log
|
path.logs: log
|
||||||
path.configs: config
|
path.configs: config
|
||||||
|
|
||||||
|
resource_limit.cpu.max_num_of_cpus: 1
|
||||||
|
resource_limit.memory.max_in_bytes: 533708800
|
||||||
|
|
||||||
|
stats:
|
||||||
|
include_storage_stats_in_api: false
|
||||||
|
|
||||||
|
elastic:
|
||||||
|
skip_init_metadata_on_start: true
|
||||||
|
health_check:
|
||||||
|
enabled: true
|
||||||
|
interval: 60s
|
||||||
|
availability_check:
|
||||||
|
enabled: false
|
||||||
|
interval: 60s
|
||||||
|
|
||||||
|
disk_queue:
|
||||||
|
max_msg_size: 20485760
|
||||||
|
max_bytes_per_file: 20485760
|
||||||
|
max_used_bytes: 1024288000
|
||||||
|
retention.max_num_of_local_files: 1
|
||||||
|
compress:
|
||||||
|
idle_threshold: 0
|
||||||
|
num_of_files_decompress_ahead: 0
|
||||||
|
segment:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
api:
|
api:
|
||||||
enabled: true
|
enabled: true
|
||||||
tls:
|
tls:
|
||||||
|
@ -246,11 +272,28 @@ api:
|
||||||
binding: \$[[env.API_BINDING]]
|
binding: \$[[env.API_BINDING]]
|
||||||
|
|
||||||
badger:
|
badger:
|
||||||
|
value_threshold: 1024
|
||||||
|
mem_table_size: 1048576
|
||||||
value_log_max_entries: 1000000
|
value_log_max_entries: 1000000
|
||||||
value_log_file_size: 104857600
|
value_log_file_size: 104857600
|
||||||
value_threshold: 1024
|
|
||||||
|
|
||||||
agent:
|
configs:
|
||||||
|
#for managed client's setting
|
||||||
|
managed: true # managed by remote servers
|
||||||
|
panic_on_config_error: false #ignore config error
|
||||||
|
interval: "10s"
|
||||||
|
servers: # config servers
|
||||||
|
- "http://localhost:9000"
|
||||||
|
soft_delete: false
|
||||||
|
max_backup_files: 5
|
||||||
|
tls: #for mTLS connection with config servers
|
||||||
|
enabled: true
|
||||||
|
cert_file: "config/client.crt"
|
||||||
|
key_file: "config/client.key"
|
||||||
|
ca_file: "config/ca.crt"
|
||||||
|
skip_insecure_verify: false
|
||||||
|
|
||||||
|
node:
|
||||||
major_ip_pattern: ".*"
|
major_ip_pattern: ".*"
|
||||||
EOF
|
EOF
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,7 +7,7 @@ elasticsearch:
|
||||||
enabled: true
|
enabled: true
|
||||||
monitored: true
|
monitored: true
|
||||||
reserved: true
|
reserved: true
|
||||||
endpoint: $[[CLUSTER_ENDPINT]]
|
endpoint: $[[CLUSTER_ENDPOINT]]
|
||||||
discovery:
|
discovery:
|
||||||
enabled: false
|
enabled: false
|
||||||
basic_auth:
|
basic_auth:
|
||||||
|
@ -17,7 +17,7 @@ elasticsearch:
|
||||||
elastic.elasticsearch: $[[CLUSTER_ID]]
|
elastic.elasticsearch: $[[CLUSTER_ID]]
|
||||||
|
|
||||||
pipeline:
|
pipeline:
|
||||||
- name: indexing_merge
|
- name: merge_metrics
|
||||||
auto_start: true
|
auto_start: true
|
||||||
keep_running: true
|
keep_running: true
|
||||||
processor:
|
processor:
|
||||||
|
@ -31,22 +31,7 @@ pipeline:
|
||||||
tag: "metrics"
|
tag: "metrics"
|
||||||
worker_size: 1
|
worker_size: 1
|
||||||
bulk_size_in_mb: 5
|
bulk_size_in_mb: 5
|
||||||
- name: consume-metrics_requests
|
|
||||||
auto_start: true
|
|
||||||
keep_running: true
|
|
||||||
processor:
|
|
||||||
- bulk_indexing:
|
|
||||||
bulk:
|
|
||||||
compress: true
|
|
||||||
batch_size_in_mb: 5
|
|
||||||
batch_size_in_docs: 5000
|
|
||||||
consumer:
|
|
||||||
fetch_max_messages: 100
|
|
||||||
queues:
|
|
||||||
type: indexing_merge
|
|
||||||
tag: "metrics"
|
|
||||||
when:
|
|
||||||
cluster_available: ["$[[CLUSTER_ID]]"]
|
|
||||||
- name: metadata_ingest
|
- name: metadata_ingest
|
||||||
auto_start: true
|
auto_start: true
|
||||||
keep_running: true
|
keep_running: true
|
||||||
|
@ -91,7 +76,7 @@ pipeline:
|
||||||
when:
|
when:
|
||||||
cluster_available: ["$[[CLUSTER_ID]]"]
|
cluster_available: ["$[[CLUSTER_ID]]"]
|
||||||
|
|
||||||
- name: logging_indexing_merge
|
- name: merge_logging
|
||||||
auto_start: true
|
auto_start: true
|
||||||
keep_running: true
|
keep_running: true
|
||||||
processor:
|
processor:
|
||||||
|
@ -106,19 +91,22 @@ pipeline:
|
||||||
tag: "request_logging"
|
tag: "request_logging"
|
||||||
worker_size: 1
|
worker_size: 1
|
||||||
bulk_size_in_kb: 1
|
bulk_size_in_kb: 1
|
||||||
- name: consume-logging_requests
|
|
||||||
|
- name: ingest_merged_requests
|
||||||
auto_start: true
|
auto_start: true
|
||||||
keep_running: true
|
keep_running: true
|
||||||
|
retry_delay_in_ms: 5000
|
||||||
|
max_running_in_ms: 30000
|
||||||
processor:
|
processor:
|
||||||
- bulk_indexing:
|
- bulk_indexing:
|
||||||
|
idle_timeout_in_seconds: 5
|
||||||
bulk:
|
bulk:
|
||||||
compress: true
|
compress: true
|
||||||
batch_size_in_mb: 1
|
batch_size_in_mb: 10
|
||||||
batch_size_in_docs: 1
|
batch_size_in_docs: 1000
|
||||||
consumer:
|
consumer:
|
||||||
fetch_max_messages: 100
|
fetch_max_messages: 100
|
||||||
queues:
|
queues:
|
||||||
type: indexing_merge
|
type: indexing_merge
|
||||||
tag: "request_logging"
|
|
||||||
when:
|
when:
|
||||||
cluster_available: ["$[[CLUSTER_ID]]"]
|
cluster_available: ["$[[CLUSTER_ID]]"]
|
|
@ -0,0 +1,14 @@
|
||||||
|
configs.template:
|
||||||
|
- name: "default_ingest_config"
|
||||||
|
path: ./config/ingest_config.tpl
|
||||||
|
variable:
|
||||||
|
INGEST_CLUSTER_ID: infini_default_ingest_cluster
|
||||||
|
INGEST_CLUSTER_ENDPOINT: [ "https://localhost:9200" ]
|
||||||
|
INGEST_CLUSTER_USERNAME: "admin"
|
||||||
|
CLUSTER_VER: "1.6.0"
|
||||||
|
CLUSTER_DISTRIBUTION: "easysearch"
|
||||||
|
INDEX_PREFIX: ".infini_"
|
||||||
|
|
||||||
|
|
||||||
|
#MANAGED_CONFIG_VERSION: 2
|
||||||
|
#MANAGED: true
|
|
@ -0,0 +1,24 @@
|
||||||
|
configs: #define configs group
|
||||||
|
general_ingest_template: #group name
|
||||||
|
files:
|
||||||
|
- ./templates/ingest_config.tpl
|
||||||
|
- ./templates/task_config.tpl
|
||||||
|
- ./configs/ingest_config.yml
|
||||||
|
#instances: #define which config instance should fetch
|
||||||
|
# _all: #instance group
|
||||||
|
# plugins:
|
||||||
|
# - ingest
|
||||||
|
# instances:
|
||||||
|
# - ck0mkk805f5virpsejp0
|
||||||
|
# - ckjrpdg05f5lrfp8qlng
|
||||||
|
# configs:
|
||||||
|
# - general_ingest_template
|
||||||
|
# secrets:
|
||||||
|
# - ingest_cluster_password
|
||||||
|
|
||||||
|
secrets:
|
||||||
|
ingest_cluster_password: #group name
|
||||||
|
keystore:
|
||||||
|
ingest_cluster_password:
|
||||||
|
type: plaintext
|
||||||
|
value: "d7cc48e69a41dac719fb"
|
|
@ -0,0 +1,97 @@
|
||||||
|
elasticsearch:
|
||||||
|
- name: $[[INGEST_CLUSTER_ID]]
|
||||||
|
enabled: true
|
||||||
|
endpoints: $[[INGEST_CLUSTER_ENDPOINT]]
|
||||||
|
discovery:
|
||||||
|
enabled: false
|
||||||
|
basic_auth:
|
||||||
|
username: $[[INGEST_CLUSTER_USERNAME]]
|
||||||
|
password: $[[keystore.ingest_cluster_password]]
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
enabled: true
|
||||||
|
queue: metrics
|
||||||
|
network:
|
||||||
|
enabled: true
|
||||||
|
summary: true
|
||||||
|
details: true
|
||||||
|
memory:
|
||||||
|
metrics:
|
||||||
|
- swap
|
||||||
|
- memory
|
||||||
|
disk:
|
||||||
|
metrics:
|
||||||
|
- iops
|
||||||
|
- usage
|
||||||
|
cpu:
|
||||||
|
metrics:
|
||||||
|
- idle
|
||||||
|
- system
|
||||||
|
- user
|
||||||
|
- iowait
|
||||||
|
- load
|
||||||
|
instance:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
elastic:
|
||||||
|
availability_check:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
pipeline:
|
||||||
|
- name: merge_logs
|
||||||
|
auto_start: true
|
||||||
|
keep_running: true
|
||||||
|
processor:
|
||||||
|
- indexing_merge:
|
||||||
|
elasticsearch: "$[[INGEST_CLUSTER_ID]]"
|
||||||
|
index_name: ".infini_logs"
|
||||||
|
type_name: "_doc"
|
||||||
|
input_queue: "logs"
|
||||||
|
idle_timeout_in_seconds: 10
|
||||||
|
output_queue:
|
||||||
|
name: "merged_requests"
|
||||||
|
worker_size: 1
|
||||||
|
bulk_size_in_mb: 5
|
||||||
|
- name: merge_metrics
|
||||||
|
auto_start: true
|
||||||
|
keep_running: true
|
||||||
|
processor:
|
||||||
|
- indexing_merge:
|
||||||
|
elasticsearch: "$[[INGEST_CLUSTER_ID]]"
|
||||||
|
index_name: ".infini_metrics"
|
||||||
|
type_name: "_doc"
|
||||||
|
input_queue: "metrics"
|
||||||
|
output_queue:
|
||||||
|
name: "merged_requests"
|
||||||
|
worker_size: 1
|
||||||
|
bulk_size_in_mb: 5
|
||||||
|
- name: ingest_merged_requests
|
||||||
|
enabled: true
|
||||||
|
auto_start: true
|
||||||
|
keep_running: true
|
||||||
|
processor:
|
||||||
|
- bulk_indexing:
|
||||||
|
max_worker_size: 1
|
||||||
|
verbose_bulk_result: false
|
||||||
|
bulk:
|
||||||
|
batch_size_in_mb: 5
|
||||||
|
batch_size_in_docs: 5000
|
||||||
|
max_retry_times: 0
|
||||||
|
invalid_queue: ""
|
||||||
|
response_handle:
|
||||||
|
include_index_stats: false
|
||||||
|
include_action_stats: false
|
||||||
|
output_bulk_stats: false
|
||||||
|
include_error_details: true
|
||||||
|
save_error_results: true
|
||||||
|
save_success_results: false
|
||||||
|
save_busy_results: false
|
||||||
|
consumer:
|
||||||
|
fetch_max_messages: 5
|
||||||
|
queues:
|
||||||
|
type: indexing_merge
|
||||||
|
when:
|
||||||
|
cluster_available: ["$[[INGEST_CLUSTER_ID]]"]
|
||||||
|
|
||||||
|
#MANAGED_CONFIG_VERSION: 16
|
||||||
|
#MANAGED: true
|
|
@ -0,0 +1,90 @@
|
||||||
|
env:
|
||||||
|
CLUSTER_PASSWORD: $[[keystore.$[[CLUSTER_ID]]_password]]
|
||||||
|
|
||||||
|
elasticsearch:
|
||||||
|
- id: $[[CLUSTER_ID]]
|
||||||
|
name: $[[CLUSTER_ID]]
|
||||||
|
enabled: true
|
||||||
|
endpoints: $[[CLUSTER_ENDPOINT]]
|
||||||
|
discovery:
|
||||||
|
enabled: false
|
||||||
|
basic_auth:
|
||||||
|
username: $[[CLUSTER_USERNAME]]
|
||||||
|
password: $[[CLUSTER_PASSWORD]]
|
||||||
|
|
||||||
|
pipeline:
|
||||||
|
#clsuter level metrics
|
||||||
|
- auto_start: $[[CLUSTER_LEVEL_TASKS_ENABLED]]
|
||||||
|
enabled: $[[CLUSTER_LEVEL_TASKS_ENABLED]]
|
||||||
|
keep_running: true
|
||||||
|
singleton: true
|
||||||
|
name: collect_$[[CLUSTER_ID]]_es_cluster_stats
|
||||||
|
retry_delay_in_ms: 10000
|
||||||
|
processor:
|
||||||
|
- es_cluster_stats:
|
||||||
|
elasticsearch: $[[CLUSTER_ID]]
|
||||||
|
labels:
|
||||||
|
cluster_id: $[[CLUSTER_ID]]
|
||||||
|
when:
|
||||||
|
cluster_available: ["$[[CLUSTER_ID]]"]
|
||||||
|
|
||||||
|
- auto_start: $[[CLUSTER_LEVEL_TASKS_ENABLED]]
|
||||||
|
enabled: $[[CLUSTER_LEVEL_TASKS_ENABLED]]
|
||||||
|
keep_running: true
|
||||||
|
singleton: true
|
||||||
|
name: collect_$[[CLUSTER_ID]]_es_index_stats
|
||||||
|
retry_delay_in_ms: 10000
|
||||||
|
processor:
|
||||||
|
- es_index_stats:
|
||||||
|
elasticsearch: $[[CLUSTER_ID]]
|
||||||
|
labels:
|
||||||
|
cluster_id: $[[CLUSTER_ID]]
|
||||||
|
when:
|
||||||
|
cluster_available: ["$[[CLUSTER_ID]]"]
|
||||||
|
|
||||||
|
- auto_start: $[[CLUSTER_LEVEL_TASKS_ENABLED]]
|
||||||
|
enabled: $[[CLUSTER_LEVEL_TASKS_ENABLED]]
|
||||||
|
keep_running: true
|
||||||
|
singleton: true
|
||||||
|
name: collect_$[[CLUSTER_ID]]_es_cluster_health
|
||||||
|
retry_delay_in_ms: 10000
|
||||||
|
processor:
|
||||||
|
- es_cluster_health:
|
||||||
|
elasticsearch: $[[CLUSTER_ID]]
|
||||||
|
labels:
|
||||||
|
cluster_id: $[[CLUSTER_ID]]
|
||||||
|
when:
|
||||||
|
cluster_available: ["$[[CLUSTER_ID]]"]
|
||||||
|
|
||||||
|
#node level metrics
|
||||||
|
- auto_start: $[[NODE_LEVEL_TASKS_ENABLED]]
|
||||||
|
enabled: $[[NODE_LEVEL_TASKS_ENABLED]]
|
||||||
|
keep_running: true
|
||||||
|
name: collect_$[[CLUSTER_ID]]_es_node_stats
|
||||||
|
retry_delay_in_ms: 10000
|
||||||
|
processor:
|
||||||
|
- es_node_stats:
|
||||||
|
elasticsearch: $[[CLUSTER_ID]]
|
||||||
|
labels:
|
||||||
|
cluster_id: $[[CLUSTER_ID]]
|
||||||
|
when:
|
||||||
|
cluster_available: ["$[[CLUSTER_ID]]"]
|
||||||
|
|
||||||
|
#node logs
|
||||||
|
- auto_start: $[[NODE_LEVEL_TASKS_ENABLED]]
|
||||||
|
enabled: $[[NODE_LEVEL_TASKS_ENABLED]]
|
||||||
|
keep_running: true
|
||||||
|
name: collect_$[[CLUSTER_ID]]_es_logs
|
||||||
|
retry_delay_in_ms: 10000
|
||||||
|
processor:
|
||||||
|
- es_logs_processor:
|
||||||
|
elasticsearch: $[[CLUSTER_ID]]
|
||||||
|
labels:
|
||||||
|
cluster_id: $[[CLUSTER_ID]]
|
||||||
|
logs_path: $[[NODE_LOGS_PATH]]
|
||||||
|
queue_name: logs
|
||||||
|
when:
|
||||||
|
cluster_available: ["$[[CLUSTER_ID]]"]
|
||||||
|
|
||||||
|
#MANAGED_CONFIG_VERSION: 11
|
||||||
|
#MANAGED: true
|
19
console.yml
19
console.yml
|
@ -1,6 +1,3 @@
|
||||||
path.configs: "config"
|
|
||||||
configs.auto_reload: true
|
|
||||||
|
|
||||||
#env:
|
#env:
|
||||||
# INFINI_CONSOLE_ENDPOINT: "http://127.0.0.1:9000"
|
# INFINI_CONSOLE_ENDPOINT: "http://127.0.0.1:9000"
|
||||||
# INGEST_CLUSTER_ENDPOINT: "https://127.0.0.1:9200"
|
# INGEST_CLUSTER_ENDPOINT: "https://127.0.0.1:9200"
|
||||||
|
@ -11,6 +8,20 @@ configs.auto_reload: true
|
||||||
# WECOM_WEBHOOK_ENDPOINT:
|
# WECOM_WEBHOOK_ENDPOINT:
|
||||||
# FEISHU_WEBHOOK_ENDPOINT:
|
# FEISHU_WEBHOOK_ENDPOINT:
|
||||||
|
|
||||||
|
|
||||||
|
# must in major config file
|
||||||
|
path.configs: "config"
|
||||||
|
configs:
|
||||||
|
managed: true
|
||||||
|
auto_reload: true
|
||||||
|
manager:
|
||||||
|
local_configs_repo_path: ./config_repo/
|
||||||
|
tls: #for mTLS connection with config servers
|
||||||
|
enabled: true
|
||||||
|
ca_file: config/certs/ca.crt
|
||||||
|
cert_file: config/certs/ca.crt
|
||||||
|
key_file: config/certs/ca.key
|
||||||
|
skip_insecure_verify: false
|
||||||
web:
|
web:
|
||||||
enabled: true
|
enabled: true
|
||||||
embedding_api: true
|
embedding_api: true
|
||||||
|
@ -52,7 +63,6 @@ elastic:
|
||||||
|
|
||||||
metrics:
|
metrics:
|
||||||
enabled: true
|
enabled: true
|
||||||
major_ip_pattern: "192.*"
|
|
||||||
queue: metrics
|
queue: metrics
|
||||||
elasticsearch:
|
elasticsearch:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
@ -63,6 +73,7 @@ badger:
|
||||||
value_log_max_entries: 1000000
|
value_log_max_entries: 1000000
|
||||||
value_log_file_size: 104857600
|
value_log_file_size: 104857600
|
||||||
|
|
||||||
|
|
||||||
security:
|
security:
|
||||||
enabled: true
|
enabled: true
|
||||||
# authc:
|
# authc:
|
||||||
|
|
19
main.go
19
main.go
|
@ -4,7 +4,9 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
_ "expvar"
|
_ "expvar"
|
||||||
|
api3 "infini.sh/console/modules/agent/api"
|
||||||
"infini.sh/console/plugin/api/email"
|
"infini.sh/console/plugin/api/email"
|
||||||
|
model2 "infini.sh/framework/core/model"
|
||||||
_ "time/tzdata"
|
_ "time/tzdata"
|
||||||
|
|
||||||
log "github.com/cihub/seelog"
|
log "github.com/cihub/seelog"
|
||||||
|
@ -12,7 +14,6 @@ import (
|
||||||
"infini.sh/console/model"
|
"infini.sh/console/model"
|
||||||
"infini.sh/console/model/alerting"
|
"infini.sh/console/model/alerting"
|
||||||
"infini.sh/console/model/insight"
|
"infini.sh/console/model/insight"
|
||||||
"infini.sh/console/modules/agent"
|
|
||||||
_ "infini.sh/console/plugin"
|
_ "infini.sh/console/plugin"
|
||||||
setup1 "infini.sh/console/plugin/setup"
|
setup1 "infini.sh/console/plugin/setup"
|
||||||
alerting2 "infini.sh/console/service/alerting"
|
alerting2 "infini.sh/console/service/alerting"
|
||||||
|
@ -35,6 +36,7 @@ import (
|
||||||
"infini.sh/framework/modules/task"
|
"infini.sh/framework/modules/task"
|
||||||
"infini.sh/framework/modules/ui"
|
"infini.sh/framework/modules/ui"
|
||||||
_ "infini.sh/framework/plugins"
|
_ "infini.sh/framework/plugins"
|
||||||
|
_ "infini.sh/framework/plugins/managed"
|
||||||
api2 "infini.sh/gateway/api"
|
api2 "infini.sh/gateway/api"
|
||||||
_ "infini.sh/gateway/proxy"
|
_ "infini.sh/gateway/proxy"
|
||||||
)
|
)
|
||||||
|
@ -70,7 +72,6 @@ func main() {
|
||||||
modules = append(modules, module.ModuleItem{Value: &task.TaskModule{}, Priority: 1})
|
modules = append(modules, module.ModuleItem{Value: &task.TaskModule{}, Priority: 1})
|
||||||
modules = append(modules, module.ModuleItem{Value: &metrics.MetricsModule{}, Priority: 1})
|
modules = append(modules, module.ModuleItem{Value: &metrics.MetricsModule{}, Priority: 1})
|
||||||
modules = append(modules, module.ModuleItem{Value: &security.Module{}, Priority: 1})
|
modules = append(modules, module.ModuleItem{Value: &security.Module{}, Priority: 1})
|
||||||
modules = append(modules, module.ModuleItem{Value: &agent.AgentModule{}, Priority: 100})
|
|
||||||
|
|
||||||
uiModule := &ui.UIModule{}
|
uiModule := &ui.UIModule{}
|
||||||
|
|
||||||
|
@ -84,7 +85,7 @@ func main() {
|
||||||
|
|
||||||
if !global.Env().SetupRequired() {
|
if !global.Env().SetupRequired() {
|
||||||
for _, v := range modules {
|
for _, v := range modules {
|
||||||
module.RegisterModuleWithPriority(v.Value,v.Priority)
|
module.RegisterModuleWithPriority(v.Value, v.Priority)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for _, v := range modules {
|
for _, v := range modules {
|
||||||
|
@ -94,6 +95,8 @@ func main() {
|
||||||
|
|
||||||
api.RegisterAPI("")
|
api.RegisterAPI("")
|
||||||
|
|
||||||
|
api3.Init()
|
||||||
|
|
||||||
appConfig = &config.AppConfig{
|
appConfig = &config.AppConfig{
|
||||||
UI: config.UIConfig{
|
UI: config.UIConfig{
|
||||||
LocalPath: ".public",
|
LocalPath: ".public",
|
||||||
|
@ -103,7 +106,7 @@ func main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
ok, err := env.ParseConfig("web", appConfig)
|
ok, err := env.ParseConfig("web", appConfig)
|
||||||
if err != nil {
|
if err != nil && global.Env().SystemConfig.Configs.PanicOnConfigError {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -122,11 +125,10 @@ func main() {
|
||||||
|
|
||||||
elastic2.InitTemplate(false)
|
elastic2.InitTemplate(false)
|
||||||
|
|
||||||
//orm.RegisterSchemaWithIndexName(model.Dict{}, "dict")
|
//orm.RegisterSchema(model.Dict{}, "dict")
|
||||||
orm.RegisterSchemaWithIndexName(elastic.View{}, "view")
|
orm.RegisterSchemaWithIndexName(elastic.View{}, "view")
|
||||||
orm.RegisterSchemaWithIndexName(elastic.CommonCommand{}, "commands")
|
orm.RegisterSchemaWithIndexName(elastic.CommonCommand{}, "commands")
|
||||||
//orm.RegisterSchemaWithIndexName(elastic.TraceTemplate{}, "trace-template")
|
//orm.RegisterSchema(elastic.TraceTemplate{}, "trace-template")
|
||||||
orm.RegisterSchemaWithIndexName(model.Instance{}, "instance")
|
|
||||||
orm.RegisterSchemaWithIndexName(alerting.Rule{}, "alert-rule")
|
orm.RegisterSchemaWithIndexName(alerting.Rule{}, "alert-rule")
|
||||||
orm.RegisterSchemaWithIndexName(alerting.Alert{}, "alert-history")
|
orm.RegisterSchemaWithIndexName(alerting.Alert{}, "alert-history")
|
||||||
orm.RegisterSchemaWithIndexName(alerting.AlertMessage{}, "alert-message")
|
orm.RegisterSchemaWithIndexName(alerting.AlertMessage{}, "alert-message")
|
||||||
|
@ -138,6 +140,9 @@ func main() {
|
||||||
orm.RegisterSchemaWithIndexName(model.Layout{}, "layout")
|
orm.RegisterSchemaWithIndexName(model.Layout{}, "layout")
|
||||||
orm.RegisterSchemaWithIndexName(model.Notification{}, "notification")
|
orm.RegisterSchemaWithIndexName(model.Notification{}, "notification")
|
||||||
orm.RegisterSchemaWithIndexName(model.EmailServer{}, "email-server")
|
orm.RegisterSchemaWithIndexName(model.EmailServer{}, "email-server")
|
||||||
|
orm.RegisterSchemaWithIndexName(model2.Instance{}, "instance")
|
||||||
|
orm.RegisterSchemaWithIndexName(api3.RemoteConfig{}, "configs")
|
||||||
|
|
||||||
api.RegisterSchema()
|
api.RegisterSchema()
|
||||||
|
|
||||||
if global.Env().SetupRequired() {
|
if global.Env().SetupRequired() {
|
||||||
|
|
|
@ -6,7 +6,7 @@ package model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"infini.sh/framework/core/elastic"
|
"infini.sh/framework/core/model"
|
||||||
"infini.sh/framework/core/orm"
|
"infini.sh/framework/core/orm"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@ type EmailServer struct {
|
||||||
Host string `json:"host" elastic_mapping:"host:{type:keyword}"`
|
Host string `json:"host" elastic_mapping:"host:{type:keyword}"`
|
||||||
Port int `json:"port" elastic_mapping:"port:{type:keyword}"`
|
Port int `json:"port" elastic_mapping:"port:{type:keyword}"`
|
||||||
TLS bool `json:"tls" elastic_mapping:"tls:{type:keyword}"`
|
TLS bool `json:"tls" elastic_mapping:"tls:{type:keyword}"`
|
||||||
Auth *elastic.BasicAuth `json:"auth" elastic_mapping:"auth:{type:object}"`
|
Auth *model.BasicAuth `json:"auth" elastic_mapping:"auth:{type:object}"`
|
||||||
Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:boolean}"`
|
Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:boolean}"`
|
||||||
CredentialID string `json:"credential_id" elastic_mapping:"credential_id:{type:keyword}"`
|
CredentialID string `json:"credential_id" elastic_mapping:"credential_id:{type:keyword}"`
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,26 +11,15 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"infini.sh/framework/core/agent"
|
"infini.sh/framework/core/model"
|
||||||
"infini.sh/framework/core/orm"
|
|
||||||
"infini.sh/framework/core/util"
|
"infini.sh/framework/core/util"
|
||||||
"infini.sh/framework/modules/pipeline"
|
"infini.sh/framework/modules/pipeline"
|
||||||
)
|
)
|
||||||
|
type TaskWorker struct {
|
||||||
type Instance struct {
|
model.Instance
|
||||||
orm.ORMObjectBase
|
|
||||||
|
|
||||||
//InstanceID string `json:"instance_id,omitempty" elastic_mapping:"instance_id: { type: keyword }"`
|
|
||||||
Name string `json:"name,omitempty" elastic_mapping:"name:{type:keyword,fields:{text: {type: text}}}"`
|
|
||||||
Endpoint string `json:"endpoint,omitempty" elastic_mapping:"endpoint: { type: keyword }"`
|
|
||||||
Version map[string]interface{} `json:"version,omitempty" elastic_mapping:"version: { type: object }"`
|
|
||||||
BasicAuth agent.BasicAuth `config:"basic_auth" json:"basic_auth,omitempty" elastic_mapping:"basic_auth:{type:object}"`
|
|
||||||
Owner string `json:"owner,omitempty" config:"owner" elastic_mapping:"owner:{type:keyword}"`
|
|
||||||
Tags []string `json:"tags,omitempty"`
|
|
||||||
Description string `json:"description,omitempty" config:"description" elastic_mapping:"description:{type:keyword}"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (inst *Instance) CreatePipeline(body []byte) error {
|
func (inst *TaskWorker) CreatePipeline(body []byte) error {
|
||||||
req := &util.Request{
|
req := &util.Request{
|
||||||
Method: http.MethodPost,
|
Method: http.MethodPost,
|
||||||
Body: body,
|
Body: body,
|
||||||
|
@ -39,7 +28,7 @@ func (inst *Instance) CreatePipeline(body []byte) error {
|
||||||
return inst.doRequest(req, nil)
|
return inst.doRequest(req, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (inst *Instance) StopPipeline(ctx context.Context, pipelineID string) error {
|
func (inst *TaskWorker) StopPipeline(ctx context.Context, pipelineID string) error {
|
||||||
req := &util.Request{
|
req := &util.Request{
|
||||||
Method: http.MethodPost,
|
Method: http.MethodPost,
|
||||||
Url: fmt.Sprintf("%s/pipeline/task/%s/_stop", inst.Endpoint, pipelineID),
|
Url: fmt.Sprintf("%s/pipeline/task/%s/_stop", inst.Endpoint, pipelineID),
|
||||||
|
@ -48,13 +37,13 @@ func (inst *Instance) StopPipeline(ctx context.Context, pipelineID string) error
|
||||||
return inst.doRequest(req, nil)
|
return inst.doRequest(req, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (inst *Instance) StopPipelineWithTimeout(pipelineID string, duration time.Duration) error {
|
func (inst *TaskWorker) StopPipelineWithTimeout(pipelineID string, duration time.Duration) error {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), duration)
|
ctx, cancel := context.WithTimeout(context.Background(), duration)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
return inst.StopPipeline(ctx, pipelineID)
|
return inst.StopPipeline(ctx, pipelineID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (inst *Instance) StartPipeline(pipelineID string) error {
|
func (inst *TaskWorker) StartPipeline(pipelineID string) error {
|
||||||
req := &util.Request{
|
req := &util.Request{
|
||||||
Method: http.MethodPost,
|
Method: http.MethodPost,
|
||||||
Url: fmt.Sprintf("%s/pipeline/task/%s/_start", inst.Endpoint, pipelineID),
|
Url: fmt.Sprintf("%s/pipeline/task/%s/_start", inst.Endpoint, pipelineID),
|
||||||
|
@ -62,7 +51,7 @@ func (inst *Instance) StartPipeline(pipelineID string) error {
|
||||||
return inst.doRequest(req, nil)
|
return inst.doRequest(req, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (inst *Instance) DeletePipeline(pipelineID string) error {
|
func (inst *TaskWorker) DeletePipeline(pipelineID string) error {
|
||||||
req := &util.Request{
|
req := &util.Request{
|
||||||
Method: http.MethodDelete,
|
Method: http.MethodDelete,
|
||||||
Url: fmt.Sprintf("%s/pipeline/task/%s", inst.Endpoint, pipelineID),
|
Url: fmt.Sprintf("%s/pipeline/task/%s", inst.Endpoint, pipelineID),
|
||||||
|
@ -70,7 +59,7 @@ func (inst *Instance) DeletePipeline(pipelineID string) error {
|
||||||
return inst.doRequest(req, nil)
|
return inst.doRequest(req, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (inst *Instance) GetPipeline(pipelineID string) (*pipeline.PipelineStatus, error) {
|
func (inst *TaskWorker) GetPipeline(pipelineID string) (*pipeline.PipelineStatus, error) {
|
||||||
if pipelineID == "" {
|
if pipelineID == "" {
|
||||||
return nil, errors.New("invalid pipelineID")
|
return nil, errors.New("invalid pipelineID")
|
||||||
}
|
}
|
||||||
|
@ -89,7 +78,7 @@ func (inst *Instance) GetPipeline(pipelineID string) (*pipeline.PipelineStatus,
|
||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (inst *Instance) GetPipelinesByIDs(pipelineIDs []string) (pipeline.GetPipelinesResponse, error) {
|
func (inst *TaskWorker) GetPipelinesByIDs(pipelineIDs []string) (pipeline.GetPipelinesResponse, error) {
|
||||||
body := util.MustToJSONBytes(util.MapStr{
|
body := util.MustToJSONBytes(util.MapStr{
|
||||||
"ids": pipelineIDs,
|
"ids": pipelineIDs,
|
||||||
})
|
})
|
||||||
|
@ -106,7 +95,7 @@ func (inst *Instance) GetPipelinesByIDs(pipelineIDs []string) (pipeline.GetPipel
|
||||||
return res, err
|
return res, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (inst *Instance) DeleteQueueBySelector(selector util.MapStr) error {
|
func (inst *TaskWorker) DeleteQueueBySelector(selector util.MapStr) error {
|
||||||
req := &util.Request{
|
req := &util.Request{
|
||||||
Method: http.MethodDelete,
|
Method: http.MethodDelete,
|
||||||
Url: fmt.Sprintf("%s/queue/_search", inst.Endpoint),
|
Url: fmt.Sprintf("%s/queue/_search", inst.Endpoint),
|
||||||
|
@ -117,7 +106,7 @@ func (inst *Instance) DeleteQueueBySelector(selector util.MapStr) error {
|
||||||
return inst.doRequest(req, nil)
|
return inst.doRequest(req, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (inst *Instance) DeleteQueueConsumersBySelector(selector util.MapStr) error {
|
func (inst *TaskWorker) DeleteQueueConsumersBySelector(selector util.MapStr) error {
|
||||||
req := &util.Request{
|
req := &util.Request{
|
||||||
Method: http.MethodDelete,
|
Method: http.MethodDelete,
|
||||||
Url: fmt.Sprintf("%s/queue/consumer/_search", inst.Endpoint),
|
Url: fmt.Sprintf("%s/queue/consumer/_search", inst.Endpoint),
|
||||||
|
@ -128,21 +117,22 @@ func (inst *Instance) DeleteQueueConsumersBySelector(selector util.MapStr) error
|
||||||
return inst.doRequest(req, nil)
|
return inst.doRequest(req, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (inst *Instance) TryConnect(ctx context.Context) error {
|
func (inst *TaskWorker) TryConnect(ctx context.Context) error {
|
||||||
req := &util.Request{
|
req := &util.Request{
|
||||||
Method: http.MethodGet,
|
Method: http.MethodGet,
|
||||||
Url: fmt.Sprintf("%s/_framework/api/_info", inst.Endpoint),
|
Url: fmt.Sprintf("%s/_info", inst.Endpoint),
|
||||||
Context: ctx,
|
Context: ctx,
|
||||||
}
|
}
|
||||||
return inst.doRequest(req, nil)
|
return inst.doRequest(req, nil)
|
||||||
}
|
}
|
||||||
func (inst *Instance) TryConnectWithTimeout(duration time.Duration) error {
|
|
||||||
|
func (inst *TaskWorker) TryConnectWithTimeout(duration time.Duration) error {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), duration)
|
ctx, cancel := context.WithTimeout(context.Background(), duration)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
return inst.TryConnect(ctx)
|
return inst.TryConnect(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (inst *Instance) doRequest(req *util.Request, resBody interface{}) error {
|
func (inst *TaskWorker) doRequest(req *util.Request, resBody interface{}) error {
|
||||||
req.SetBasicAuth(inst.BasicAuth.Username, inst.BasicAuth.Password)
|
req.SetBasicAuth(inst.BasicAuth.Username, inst.BasicAuth.Password)
|
||||||
result, err := util.ExecuteRequest(req)
|
result, err := util.ExecuteRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -1,158 +0,0 @@
|
||||||
/* Copyright © INFINI Ltd. All rights reserved.
|
|
||||||
* Web: https://infinilabs.com
|
|
||||||
* Email: hello#infini.ltd */
|
|
||||||
|
|
||||||
package agent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
log "github.com/cihub/seelog"
|
|
||||||
"infini.sh/console/modules/agent/api"
|
|
||||||
"infini.sh/console/modules/agent/client"
|
|
||||||
"infini.sh/console/modules/agent/common"
|
|
||||||
"infini.sh/console/modules/agent/model"
|
|
||||||
"infini.sh/console/modules/agent/state"
|
|
||||||
"infini.sh/framework/core/agent"
|
|
||||||
"infini.sh/framework/core/credential"
|
|
||||||
"infini.sh/framework/core/elastic"
|
|
||||||
"infini.sh/framework/core/env"
|
|
||||||
"infini.sh/framework/core/host"
|
|
||||||
"infini.sh/framework/core/kv"
|
|
||||||
"infini.sh/framework/core/orm"
|
|
||||||
"infini.sh/framework/core/util"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (module *AgentModule) Name() string {
|
|
||||||
return "agent"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (module *AgentModule) Setup() {
|
|
||||||
module.AgentConfig.Enabled = true
|
|
||||||
module.AgentConfig.StateManager.Enabled = true
|
|
||||||
exists, err := env.ParseConfig("agent", &module.AgentConfig)
|
|
||||||
if exists && err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if module.AgentConfig.Enabled {
|
|
||||||
api.Init()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (module *AgentModule) Start() error {
|
|
||||||
if !module.AgentConfig.Enabled {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
orm.RegisterSchemaWithIndexName(agent.Instance{}, "agent")
|
|
||||||
orm.RegisterSchemaWithIndexName(agent.ESNodeInfo{}, "agent-node")
|
|
||||||
orm.RegisterSchemaWithIndexName(host.HostInfo{}, "host")
|
|
||||||
orm.RegisterSchemaWithIndexName(agent.Setting{}, "agent-setting")
|
|
||||||
var (
|
|
||||||
executor client.Executor
|
|
||||||
err error
|
|
||||||
caFile string
|
|
||||||
caKey string
|
|
||||||
)
|
|
||||||
if module.AgentConfig.Setup != nil {
|
|
||||||
caFile = module.AgentConfig.Setup.CACertFile
|
|
||||||
caKey = module.AgentConfig.Setup.CAKeyFile
|
|
||||||
}
|
|
||||||
if caFile == "" && caKey == "" {
|
|
||||||
caFile, caKey, err = common.GetOrInitDefaultCaCerts()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
executor, err = client.NewMTLSExecutor(caFile, caKey)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
agClient := &client.Client{
|
|
||||||
Executor: executor,
|
|
||||||
}
|
|
||||||
client.RegisterClient(agClient)
|
|
||||||
|
|
||||||
if module.AgentConfig.StateManager.Enabled {
|
|
||||||
onlineAgentIDs, err := common.GetLatestOnlineAgentIDs(nil, 60)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
}
|
|
||||||
agents, err := common.LoadAgentsFromES("")
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
}
|
|
||||||
agentIds := map[string]string{}
|
|
||||||
for _, ag := range agents {
|
|
||||||
if _, ok := onlineAgentIDs[ag.ID]; ok {
|
|
||||||
agentIds[ag.ID] = "online"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
credential.RegisterChangeEvent(func(cred *credential.Credential) {
|
|
||||||
var effectsClusterIDs []string
|
|
||||||
elastic.WalkConfigs(func(key, value interface{}) bool {
|
|
||||||
if cfg, ok := value.(*elastic.ElasticsearchConfig); ok {
|
|
||||||
if cfg.CredentialID == cred.ID {
|
|
||||||
effectsClusterIDs = append(effectsClusterIDs, cfg.ID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
if len(effectsClusterIDs) > 0 {
|
|
||||||
queryDsl := util.MapStr{
|
|
||||||
"query": util.MapStr{
|
|
||||||
"bool": util.MapStr{
|
|
||||||
"must": []util.MapStr{
|
|
||||||
{
|
|
||||||
"terms": util.MapStr{
|
|
||||||
"metadata.labels.cluster_id": effectsClusterIDs,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"script": util.MapStr{
|
|
||||||
"source": fmt.Sprintf("ctx._source['updated'] = '%s'", time.Now().Format(time.RFC3339Nano)),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err = orm.UpdateBy(agent.Setting{}, util.MustToJSONBytes(queryDsl))
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
//check ingest cluster credential
|
|
||||||
if module.AgentConfig.Setup != nil && module.AgentConfig.Setup.IngestClusterCredentialID == cred.ID {
|
|
||||||
agents, err = common.LoadAgentsFromES("")
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, ag := range agents {
|
|
||||||
err = kv.AddValue(model.KVAgentIngestConfigChanged, []byte(ag.ID), []byte("1"))
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
sm := state.NewStateManager(time.Second*30, "agent_state", agentIds, agClient)
|
|
||||||
state.RegisterStateManager(sm)
|
|
||||||
go sm.LoopState()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (module *AgentModule) Stop() error {
|
|
||||||
if !module.AgentConfig.Enabled {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
log.Info("start to stop agent module")
|
|
||||||
if module.AgentConfig.StateManager.Enabled {
|
|
||||||
state.GetStateManager().Stop()
|
|
||||||
}
|
|
||||||
log.Info("agent module was stopped")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type AgentModule struct {
|
|
||||||
model.AgentConfig
|
|
||||||
}
|
|
|
@ -0,0 +1,689 @@
|
||||||
|
/* Copyright © INFINI LTD. All rights reserved.
|
||||||
|
* Web: https://infinilabs.com
|
||||||
|
* Email: hello#infini.ltd */
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
log "github.com/cihub/seelog"
|
||||||
|
httprouter "infini.sh/framework/core/api/router"
|
||||||
|
"infini.sh/framework/core/elastic"
|
||||||
|
"infini.sh/framework/core/global"
|
||||||
|
"infini.sh/framework/core/model"
|
||||||
|
"infini.sh/framework/core/orm"
|
||||||
|
"infini.sh/framework/core/util"
|
||||||
|
"infini.sh/framework/modules/elastic/adapter"
|
||||||
|
"infini.sh/framework/modules/elastic/common"
|
||||||
|
"infini.sh/framework/modules/elastic/metadata"
|
||||||
|
"infini.sh/framework/plugins/managed/server"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
//node -> binding item
|
||||||
|
func GetEnrolledNodesByAgent(instance *model.Instance) (map[string]BindingItem, error) {
|
||||||
|
|
||||||
|
//get nodes settings where agent id = instance id
|
||||||
|
q := orm.Query{
|
||||||
|
Size: 1000,
|
||||||
|
Conds: orm.And(orm.Eq("metadata.category", "node_settings"),
|
||||||
|
orm.Eq("metadata.name", "agent"),
|
||||||
|
orm.Eq("metadata.labels.agent_id", instance.ID),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
err, result := orm.Search(model.Setting{}, &q)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ids := map[string]BindingItem{}
|
||||||
|
for _, row := range result.Result {
|
||||||
|
v, ok := row.(map[string]interface{})
|
||||||
|
if ok {
|
||||||
|
x, ok := v["payload"]
|
||||||
|
if ok {
|
||||||
|
f, ok := x.(map[string]interface{})
|
||||||
|
if ok {
|
||||||
|
nodeID, ok := f["node_uuid"].(string)
|
||||||
|
if ok {
|
||||||
|
item := BindingItem{}
|
||||||
|
item.ClusterID = util.ToString(f["cluster_id"])
|
||||||
|
|
||||||
|
item.ClusterUUID = util.ToString(f["cluster_uuid"])
|
||||||
|
item.NodeUUID = nodeID
|
||||||
|
|
||||||
|
t, ok := v["updated"]
|
||||||
|
if ok {
|
||||||
|
layout := "2006-01-02T15:04:05.999999-07:00"
|
||||||
|
t1, err := time.Parse(layout, util.ToString(t))
|
||||||
|
if err == nil {
|
||||||
|
item.Updated = t1.Unix()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ids[item.NodeUUID] = item
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func refreshNodesInfo(inst *model.Instance) (*elastic.DiscoveryResult, error) {
|
||||||
|
enrolledNodesByAgent, err := GetEnrolledNodesByAgent(inst)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error on get binding nodes info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||||
|
defer cancel()
|
||||||
|
nodesInfo, err := GetElasticsearchNodesViaAgent(ctxTimeout, inst)
|
||||||
|
if err != nil {
|
||||||
|
//TODO return already biding nodes info ??
|
||||||
|
return nil, fmt.Errorf("error on get nodes info from agent: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
newNodes := map[string]*elastic.LocalNodeInfo{}
|
||||||
|
//binding nodes info with agent
|
||||||
|
for nodeID, node := range nodesInfo.Nodes {
|
||||||
|
v, ok := enrolledNodesByAgent[nodeID]
|
||||||
|
node.Status = "online"
|
||||||
|
if ok {
|
||||||
|
node.ClusterID = v.ClusterID
|
||||||
|
node.Enrolled = true
|
||||||
|
|
||||||
|
//output
|
||||||
|
newNodes[nodeID] = node
|
||||||
|
} else {
|
||||||
|
newNodes[nodeID] = node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var findPIDS = map[int]*elastic.NodesInfo{}
|
||||||
|
|
||||||
|
//TODO, merge requests to one
|
||||||
|
for k, v := range enrolledNodesByAgent {
|
||||||
|
|
||||||
|
if _, ok := newNodes[k]; !ok {
|
||||||
|
client := elastic.GetClientNoPanic(v.ClusterID)
|
||||||
|
if client == nil {
|
||||||
|
log.Error("client not found:", v.ClusterID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
status := "online"
|
||||||
|
nodeInfo, err := client.GetNodeInfo(v.NodeUUID)
|
||||||
|
var clusterInfo *elastic.ClusterInformation
|
||||||
|
if err != nil || nodeInfo == nil {
|
||||||
|
status = "offline"
|
||||||
|
|
||||||
|
//get nodes information
|
||||||
|
nodeInfos, err := metadata.GetNodeInformation(v.ClusterID, []string{v.NodeUUID})
|
||||||
|
if err != nil || len(nodeInfos) == 0 {
|
||||||
|
log.Error("node info not found:", v.ClusterID, ",", []string{v.NodeUUID}, ",", err, err != nil, len(nodeInfos) == 0)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
//get node information
|
||||||
|
nodeInfo, ok = nodeInfos[v.NodeUUID]
|
||||||
|
if !ok {
|
||||||
|
log.Error("node info not found:", v.ClusterID, ",", v.NodeUUID, ",", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
//get cluster information
|
||||||
|
clusterInfo, err = metadata.GetClusterInformation(v.ClusterID)
|
||||||
|
if err != nil || clusterInfo == nil {
|
||||||
|
log.Error("cluster info not found:", v.ClusterID, ",", err, clusterInfo == nil)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
clusterInfo, err = adapter.ClusterVersion(elastic.GetMetadata(v.ClusterID))
|
||||||
|
if err != nil || clusterInfo == nil {
|
||||||
|
log.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
findPIDS[nodeInfo.Process.Id] = nodeInfo
|
||||||
|
|
||||||
|
newNodes[k] = &elastic.LocalNodeInfo{
|
||||||
|
Status: status,
|
||||||
|
ClusterID: v.ClusterID,
|
||||||
|
NodeUUID: v.NodeUUID,
|
||||||
|
Enrolled: true,
|
||||||
|
NodeInfo: nodeInfo,
|
||||||
|
ClusterInfo: clusterInfo,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nodesInfo.Nodes = newNodes
|
||||||
|
newUnknows := []model.ProcessInfo{}
|
||||||
|
for _, v := range nodesInfo.UnknownProcess {
|
||||||
|
|
||||||
|
if _, ok := findPIDS[v.PID]; !ok {
|
||||||
|
newUnknows = append(newUnknows, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nodesInfo.UnknownProcess = newUnknows
|
||||||
|
return nodesInfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//get nodes info via agent
|
||||||
|
func GetElasticsearchNodesViaAgent(ctx context.Context, instance *model.Instance) (*elastic.DiscoveryResult, error) {
|
||||||
|
req := &util.Request{
|
||||||
|
Method: http.MethodGet,
|
||||||
|
Path: "/elasticsearch/node/_discovery",
|
||||||
|
Context: ctx,
|
||||||
|
}
|
||||||
|
|
||||||
|
obj := elastic.DiscoveryResult{}
|
||||||
|
_, err := server.ProxyAgentRequest(instance.GetEndpoint(), req, &obj)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &obj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type BindingItem struct {
|
||||||
|
//infini system assigned id
|
||||||
|
ClusterID string `json:"cluster_id"`
|
||||||
|
|
||||||
|
ClusterUUID string `json:"cluster_uuid"`
|
||||||
|
NodeUUID string `json:"node_uuid"`
|
||||||
|
|
||||||
|
Updated int64 `json:"updated"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetElasticLogFiles(ctx context.Context, instance *model.Instance, logsPath string) (interface{}, error) {
|
||||||
|
|
||||||
|
reqBody := util.MustToJSONBytes(util.MapStr{
|
||||||
|
"logs_path": logsPath,
|
||||||
|
})
|
||||||
|
|
||||||
|
req := &util.Request{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
Path: "/elasticsearch/logs/_list",
|
||||||
|
Context: ctx,
|
||||||
|
Body: reqBody,
|
||||||
|
}
|
||||||
|
|
||||||
|
resBody := map[string]interface{}{}
|
||||||
|
_, err := server.ProxyAgentRequest(instance.GetEndpoint(), req, &resBody)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if resBody["success"] != true {
|
||||||
|
return nil, fmt.Errorf("get elasticsearch log files error: %v", resBody)
|
||||||
|
}
|
||||||
|
return resBody["result"], nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetElasticLogFileContent(ctx context.Context, instance *model.Instance, body interface{}) (interface{}, error) {
|
||||||
|
req := &util.Request{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
Path: "/elasticsearch/logs/_read",
|
||||||
|
Context: ctx,
|
||||||
|
Body: util.MustToJSONBytes(body),
|
||||||
|
}
|
||||||
|
resBody := map[string]interface{}{}
|
||||||
|
_, err := server.ProxyAgentRequest(instance.GetEndpoint(), req, &resBody)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if resBody["success"] != true {
|
||||||
|
return nil, fmt.Errorf("get elasticsearch log files error: %v", resBody["error"])
|
||||||
|
}
|
||||||
|
var hasMore bool
|
||||||
|
if v, ok := resBody["EOF"].(bool); ok && !v {
|
||||||
|
hasMore = true
|
||||||
|
}
|
||||||
|
return map[string]interface{}{
|
||||||
|
"lines": resBody["result"],
|
||||||
|
"has_more": hasMore,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *APIHandler) getLogFilesByNode(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||||
|
clusterID := ps.MustGetParameter("id")
|
||||||
|
nodeID := ps.MustGetParameter("node_id")
|
||||||
|
inst, pathLogs, err := getAgentByNodeID(clusterID, nodeID)
|
||||||
|
if err != nil {
|
||||||
|
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
log.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if inst == nil {
|
||||||
|
log.Error(fmt.Sprintf("can not find agent by node [%s]", nodeID))
|
||||||
|
h.WriteJSON(w, util.MapStr{
|
||||||
|
"success": false,
|
||||||
|
"reason": "AGENT_NOT_FOUND",
|
||||||
|
}, http.StatusOK)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
logFiles, err := GetElasticLogFiles(nil, inst, pathLogs)
|
||||||
|
if err != nil {
|
||||||
|
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
log.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.WriteJSON(w, util.MapStr{
|
||||||
|
"success": true,
|
||||||
|
"log_files": logFiles,
|
||||||
|
}, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *APIHandler) getLogFileContent(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||||
|
clusterID := ps.MustGetParameter("id")
|
||||||
|
nodeID := ps.MustGetParameter("node_id")
|
||||||
|
inst, pathLogs, err := getAgentByNodeID(clusterID, nodeID)
|
||||||
|
if err != nil {
|
||||||
|
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
log.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if inst == nil {
|
||||||
|
h.WriteError(w, fmt.Sprintf("can not find agent by node [%s]", nodeID), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
reqBody := struct {
|
||||||
|
FileName string `json:"file_name"`
|
||||||
|
LogsPath string `json:"logs_path"`
|
||||||
|
Offset int `json:"offset"`
|
||||||
|
Lines int `json:"lines"`
|
||||||
|
StartLineNumber int64 `json:"start_line_number"`
|
||||||
|
}{}
|
||||||
|
err = h.DecodeJSON(req, &reqBody)
|
||||||
|
if err != nil {
|
||||||
|
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
log.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
reqBody.LogsPath = pathLogs
|
||||||
|
res, err := GetElasticLogFileContent(nil, inst, reqBody)
|
||||||
|
if err != nil {
|
||||||
|
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
log.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.WriteJSON(w, res, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
//instance, pathLogs
|
||||||
|
func getAgentByNodeID(clusterID, nodeID string) (*model.Instance, string, error) {
|
||||||
|
|
||||||
|
q := orm.Query{
|
||||||
|
Size: 1000,
|
||||||
|
Conds: orm.And(orm.Eq("metadata.category", "node_settings"),
|
||||||
|
orm.Eq("metadata.name", "agent"),
|
||||||
|
orm.Eq("payload.cluster_id", clusterID),
|
||||||
|
orm.Eq("payload.node_uuid", nodeID),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
err, result := orm.Search(model.Setting{}, &q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeInfo,err:=metadata.GetNodeConfig(clusterID, nodeID)
|
||||||
|
if err!=nil||nodeInfo==nil{
|
||||||
|
log.Error("node info is nil")
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
pathLogs := nodeInfo.Payload.NodeInfo.GetPathLogs()
|
||||||
|
|
||||||
|
for _, row := range result.Result {
|
||||||
|
v, ok := row.(map[string]interface{})
|
||||||
|
if ok {
|
||||||
|
|
||||||
|
x, ok := v["metadata"]
|
||||||
|
if ok {
|
||||||
|
f, ok := x.(map[string]interface{})
|
||||||
|
if ok {
|
||||||
|
labels, ok := f["labels"].(map[string]interface{})
|
||||||
|
if ok {
|
||||||
|
id, ok := labels["agent_id"]
|
||||||
|
if ok {
|
||||||
|
inst := &model.Instance{}
|
||||||
|
inst.ID = util.ToString(id)
|
||||||
|
_, err = orm.Get(inst)
|
||||||
|
if err != nil {
|
||||||
|
return nil, pathLogs, err
|
||||||
|
}
|
||||||
|
if inst.Name == "" {
|
||||||
|
return nil, pathLogs, nil
|
||||||
|
}
|
||||||
|
return inst, pathLogs, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterInfo struct {
|
||||||
|
ClusterIDs []string `json:"cluster_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *APIHandler) autoEnrollESNode(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||||
|
|
||||||
|
//{"cluster_id":["infini_default_system_cluster"]}
|
||||||
|
|
||||||
|
//get instances
|
||||||
|
//get all unknown nodes
|
||||||
|
//check each process with cluster id
|
||||||
|
|
||||||
|
//send this to background task
|
||||||
|
|
||||||
|
|
||||||
|
h.WriteAckOKJSON(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *APIHandler) discoveryESNodesInfo(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||||
|
|
||||||
|
id := ps.MustGetParameter("instance_id")
|
||||||
|
instance := model.Instance{}
|
||||||
|
instance.ID = id
|
||||||
|
exists, err := orm.Get(&instance)
|
||||||
|
if !exists || err != nil {
|
||||||
|
h.WriteJSON(w, util.MapStr{
|
||||||
|
"_id": id,
|
||||||
|
"found": false,
|
||||||
|
}, http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := refreshNodesInfo(&instance)
|
||||||
|
if err != nil {
|
||||||
|
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(nodes.UnknownProcess) > 0 {
|
||||||
|
|
||||||
|
var discoveredPIDs map[int]*elastic.LocalNodeInfo = make(map[int]*elastic.LocalNodeInfo)
|
||||||
|
if req.Method == "POST" {
|
||||||
|
bytes, err := h.GetRawBody(req)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(bytes) > 0 {
|
||||||
|
clusterInfo := ClusterInfo{}
|
||||||
|
util.FromJSONBytes(bytes, &clusterInfo)
|
||||||
|
if len(clusterInfo.ClusterIDs) > 0 {
|
||||||
|
//try connect this node to cluster by using this cluster's agent credential
|
||||||
|
for _, clusterID := range clusterInfo.ClusterIDs {
|
||||||
|
meta := elastic.GetMetadata(clusterID)
|
||||||
|
if meta != nil {
|
||||||
|
|
||||||
|
states,err:=elastic.GetClient(clusterID).GetClusterState()
|
||||||
|
if err!=nil||states==nil{
|
||||||
|
log.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
clusterUUID:=states.ClusterUUID
|
||||||
|
|
||||||
|
if meta.Config.AgentCredentialID != "" {
|
||||||
|
auth, err := common.GetAgentBasicAuth(meta.Config)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if auth != nil {
|
||||||
|
//try connect
|
||||||
|
for _, node := range nodes.UnknownProcess {
|
||||||
|
for _, v := range node.ListenAddresses {
|
||||||
|
ip := v.IP
|
||||||
|
if util.ContainStr(v.IP, "::") {
|
||||||
|
ip = fmt.Sprintf("[%s]", v.IP)
|
||||||
|
}
|
||||||
|
nodeHost := fmt.Sprintf("%s:%d", ip, v.Port)
|
||||||
|
success, tryAgain, nodeInfo := h.getESNodeInfoViaProxy(nodeHost, "http", auth, &instance)
|
||||||
|
if !success && tryAgain {
|
||||||
|
//try https again
|
||||||
|
success, tryAgain, nodeInfo = h.getESNodeInfoViaProxy(nodeHost, "https", auth, &instance)
|
||||||
|
}
|
||||||
|
|
||||||
|
if success {
|
||||||
|
log.Debug("connect to es node success:", nodeHost, ", pid: ", node.PID)
|
||||||
|
discoveredPIDs[node.PID] = nodeInfo
|
||||||
|
|
||||||
|
|
||||||
|
if nodeInfo.ClusterInfo.ClusterUUID!=clusterUUID{
|
||||||
|
log.Error("cluster uuid not match, cluster id: ", clusterID, ", cluster uuid: ", clusterUUID, ", node cluster uuid: ", nodeInfo.ClusterInfo.ClusterUUID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
//enroll this node
|
||||||
|
item := BindingItem{
|
||||||
|
ClusterID: clusterID,
|
||||||
|
ClusterUUID: nodeInfo.ClusterInfo.ClusterUUID,
|
||||||
|
NodeUUID: nodeInfo.NodeUUID,
|
||||||
|
}
|
||||||
|
|
||||||
|
settings := NewNodeAgentSettings(instance.ID, &item)
|
||||||
|
err = orm.Update(&orm.Context{
|
||||||
|
Refresh: "wait_for",
|
||||||
|
}, settings)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
nodeInfo.ClusterID = clusterID
|
||||||
|
nodeInfo.Enrolled = true
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
newUnknownProcess := []model.ProcessInfo{}
|
||||||
|
if len(discoveredPIDs) > 0 {
|
||||||
|
for _, node := range nodes.UnknownProcess {
|
||||||
|
if item, ok := discoveredPIDs[node.PID]; !ok {
|
||||||
|
newUnknownProcess = append(newUnknownProcess, node)
|
||||||
|
} else {
|
||||||
|
nodes.Nodes[item.NodeUUID] = item
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nodes.UnknownProcess = newUnknownProcess
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
h.WriteJSON(w, nodes, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *APIHandler) getESNodeInfoViaProxy(esHost string, esSchema string, auth *model.BasicAuth, instance *model.Instance) (success, tryAgain bool, info *elastic.LocalNodeInfo) {
|
||||||
|
esConfig := elastic.ElasticsearchConfig{Host: esHost, Schema: esSchema, BasicAuth: auth}
|
||||||
|
return h.getESNodeInfoViaProxyWithConfig(&esConfig, auth, instance)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *APIHandler) getESNodeInfoViaProxyWithConfig(cfg *elastic.ElasticsearchConfig, auth *model.BasicAuth, instance *model.Instance) (success, tryAgain bool, info *elastic.LocalNodeInfo) {
|
||||||
|
body := util.MustToJSONBytes(cfg)
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||||
|
defer cancel()
|
||||||
|
req := &util.Request{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
Path: "/elasticsearch/node/_info",
|
||||||
|
Context: ctx,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
if auth != nil {
|
||||||
|
req.SetBasicAuth(auth.Username, auth.Password)
|
||||||
|
}
|
||||||
|
|
||||||
|
obj := elastic.LocalNodeInfo{}
|
||||||
|
res, err := server.ProxyAgentRequest(instance.GetEndpoint(), req, &obj)
|
||||||
|
if err != nil {
|
||||||
|
if global.Env().IsDebug {
|
||||||
|
log.Error(err)
|
||||||
|
}
|
||||||
|
return false, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if res != nil && res.StatusCode == http.StatusForbidden {
|
||||||
|
return false, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if res != nil && res.StatusCode == http.StatusOK {
|
||||||
|
node := elastic.LocalNodeInfo{}
|
||||||
|
err := util.FromJSONBytes(res.Body, &node)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return true, false, &node
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewClusterSettings(clusterID string) *model.Setting {
|
||||||
|
settings := model.Setting{
|
||||||
|
Metadata: model.Metadata{
|
||||||
|
Category: Cluster,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
settings.ID = fmt.Sprintf("%v_%v_%v", settings.Metadata.Category, settings.Metadata.Name, clusterID)
|
||||||
|
|
||||||
|
settings.Metadata.Labels = util.MapStr{
|
||||||
|
"cluster_id": clusterID,
|
||||||
|
}
|
||||||
|
|
||||||
|
return &settings
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewNodeAgentSettings(instanceID string, item *BindingItem) *model.Setting {
|
||||||
|
|
||||||
|
settings := model.Setting{
|
||||||
|
Metadata: model.Metadata{
|
||||||
|
Category: Node,
|
||||||
|
Name: "agent",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
settings.ID = fmt.Sprintf("%v_%v_%v", settings.Metadata.Category, settings.Metadata.Name, item.NodeUUID)
|
||||||
|
|
||||||
|
settings.Metadata.Labels = util.MapStr{
|
||||||
|
"agent_id": instanceID,
|
||||||
|
}
|
||||||
|
|
||||||
|
settings.Payload = util.MapStr{
|
||||||
|
"cluster_id": item.ClusterID,
|
||||||
|
"cluster_uuid": item.ClusterUUID,
|
||||||
|
"node_uuid": item.NodeUUID,
|
||||||
|
}
|
||||||
|
|
||||||
|
return &settings
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewIndexSettings(clusterID, nodeID, agentID, indexName, indexID string) *model.Setting {
|
||||||
|
|
||||||
|
settings := model.Setting{
|
||||||
|
Metadata: model.Metadata{
|
||||||
|
Category: Index,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
settings.ID = fmt.Sprintf("%v_%v_%v", settings.Metadata.Category, settings.Metadata.Name, nodeID)
|
||||||
|
|
||||||
|
settings.Metadata.Labels = util.MapStr{
|
||||||
|
"cluster_id": clusterID,
|
||||||
|
"node_id": nodeID,
|
||||||
|
"agent_id": agentID,
|
||||||
|
"index_name": indexName,
|
||||||
|
"index_id": indexID,
|
||||||
|
}
|
||||||
|
|
||||||
|
return &settings
|
||||||
|
}
|
||||||
|
|
||||||
|
const Cluster = "cluster_settings"
|
||||||
|
const Node = "node_settings"
|
||||||
|
const Index = "index_settings"
|
||||||
|
|
||||||
|
func (h *APIHandler) revokeESNode(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||||
|
//agent id
|
||||||
|
instID := ps.MustGetParameter("instance_id")
|
||||||
|
item := BindingItem{}
|
||||||
|
err := h.DecodeJSON(req, &item)
|
||||||
|
if err != nil {
|
||||||
|
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
settings := NewNodeAgentSettings(instID, &item)
|
||||||
|
err = orm.Delete(&orm.Context{
|
||||||
|
Refresh: "wait_for",
|
||||||
|
}, settings)
|
||||||
|
if err != nil {
|
||||||
|
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.WriteAckOKJSON(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *APIHandler) enrollESNode(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||||
|
|
||||||
|
//agent id
|
||||||
|
instID := ps.MustGetParameter("instance_id")
|
||||||
|
|
||||||
|
exists, instance, err := server.GetRuntimeInstanceByID(instID)
|
||||||
|
if err != nil {
|
||||||
|
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
h.WriteError(w, "instance not found", http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
|
||||||
|
//node id and cluster id
|
||||||
|
item := BindingItem{}
|
||||||
|
err = h.DecodeJSON(req, &item)
|
||||||
|
if err != nil {
|
||||||
|
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
//check if the cluster's agent credential is valid
|
||||||
|
meta := elastic.GetMetadata(item.ClusterID)
|
||||||
|
if meta == nil {
|
||||||
|
h.WriteError(w, "cluster not found", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
//use agent credential to access the node
|
||||||
|
meta.Config.BasicAuth, _ = common.GetAgentBasicAuth(meta.Config)
|
||||||
|
|
||||||
|
success, _, _ := h.getESNodeInfoViaProxyWithConfig(meta.Config, meta.Config.BasicAuth, instance)
|
||||||
|
|
||||||
|
if success {
|
||||||
|
//update node's setting
|
||||||
|
settings := NewNodeAgentSettings(instID, &item)
|
||||||
|
err = orm.Update(&orm.Context{
|
||||||
|
Refresh: "wait_for",
|
||||||
|
}, settings)
|
||||||
|
if err != nil {
|
||||||
|
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.WriteAckOKJSON(w)
|
||||||
|
} else {
|
||||||
|
h.WriteError(w, "failed to access this node", http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
}
|
|
@ -5,12 +5,11 @@
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
log "github.com/cihub/seelog"
|
log "github.com/cihub/seelog"
|
||||||
"infini.sh/console/modules/agent/state"
|
|
||||||
httprouter "infini.sh/framework/core/api/router"
|
httprouter "infini.sh/framework/core/api/router"
|
||||||
"infini.sh/framework/core/host"
|
"infini.sh/framework/core/host"
|
||||||
|
"infini.sh/framework/core/model"
|
||||||
"infini.sh/framework/core/orm"
|
"infini.sh/framework/core/orm"
|
||||||
"infini.sh/framework/core/util"
|
"infini.sh/framework/core/util"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -41,14 +40,13 @@ func (h *APIHandler) enrollHost(w http.ResponseWriter, req *http.Request, ps htt
|
||||||
)
|
)
|
||||||
switch hi.Source {
|
switch hi.Source {
|
||||||
case "agent":
|
case "agent":
|
||||||
hostInfo, err = enrollHostFromAgent(hi.AgentID)
|
obj := model.Instance{}
|
||||||
if err != nil {
|
obj.ID = hi.AgentID
|
||||||
errors[hi.IP] = util.MapStr{
|
exists, err := orm.Get(&obj)
|
||||||
"error": err.Error(),
|
if !exists || err != nil {
|
||||||
}
|
|
||||||
log.Error(err)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
hostInfo = &host.HostInfo{}
|
||||||
hostInfo.IP = hi.IP
|
hostInfo.IP = hi.IP
|
||||||
hostInfo.AgentID = hi.AgentID
|
hostInfo.AgentID = hi.AgentID
|
||||||
err = orm.Create(nil, hostInfo)
|
err = orm.Create(nil, hostInfo)
|
||||||
|
@ -135,24 +133,23 @@ func (h *APIHandler) GetHostAgentInfo(w http.ResponseWriter, req *http.Request,
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
sm := state.GetStateManager()
|
obj := model.Instance{}
|
||||||
ag, err := sm.GetAgent(hostInfo.AgentID)
|
obj.ID = hostInfo.AgentID
|
||||||
if err != nil {
|
exists, err := orm.Get(&obj)
|
||||||
log.Error(err)
|
if !exists || err != nil {
|
||||||
h.WriteJSON(w, util.MapStr{}, http.StatusOK)
|
h.WriteJSON(w, util.MapStr{
|
||||||
|
"_id": hostInfo.AgentID,
|
||||||
|
"found": false,
|
||||||
|
}, http.StatusNotFound)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
aversion, err := ag.GetVersion()
|
|
||||||
if err == nil {
|
|
||||||
ag.Version = aversion
|
|
||||||
orm.Save(nil, ag)
|
|
||||||
}
|
|
||||||
h.WriteJSON(w, util.MapStr{
|
h.WriteJSON(w, util.MapStr{
|
||||||
"host_id": hostID,
|
"host_id": hostID,
|
||||||
"agent_id": ag.ID,
|
"agent_id": obj.ID,
|
||||||
"version": ag.Version,
|
"version": obj.Application.Version,
|
||||||
"status": hostInfo.AgentStatus,
|
"status": hostInfo.AgentStatus,
|
||||||
"endpoint": ag.GetEndpoint(),
|
"endpoint": obj.GetEndpoint(),
|
||||||
}, http.StatusOK)
|
}, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,53 +184,38 @@ func (h *APIHandler) GetHostElasticProcess(w http.ResponseWriter, req *http.Requ
|
||||||
h.WriteJSON(w, util.MapStr{}, http.StatusOK)
|
h.WriteJSON(w, util.MapStr{}, http.StatusOK)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
sm := state.GetStateManager()
|
|
||||||
ag, err := sm.GetAgent(hostInfo.AgentID)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
h.WriteJSON(w, util.MapStr{}, http.StatusOK)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ctx,cancel := context.WithTimeout(context.Background(), time.Second * 10)
|
|
||||||
defer cancel()
|
|
||||||
esNodesInfo, err := sm.GetAgentClient().GetElasticsearchNodes(ctx, ag.GetEndpoint())
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var processes []util.MapStr
|
|
||||||
for _, node := range esNodesInfo {
|
|
||||||
processes = append(processes, util.MapStr{
|
|
||||||
"pid": node.ProcessInfo.PID,
|
|
||||||
"pid_status": node.ProcessInfo.Status,
|
|
||||||
"cluster_name": node.ClusterName,
|
|
||||||
"cluster_uuid": node.ClusterUuid,
|
|
||||||
"cluster_id": node.ClusterID,
|
|
||||||
"node_id": node.NodeUUID,
|
|
||||||
"node_name": node.NodeName,
|
|
||||||
"uptime_in_ms": time.Now().UnixMilli() - node.ProcessInfo.CreateTime,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
h.WriteJSON(w, util.MapStr{
|
|
||||||
"elastic_processes": processes,
|
|
||||||
}, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func enrollHostFromAgent(agentID string) (*host.HostInfo, error){
|
obj := model.Instance{}
|
||||||
sm := state.GetStateManager()
|
obj.ID = hostInfo.AgentID
|
||||||
ag, err := sm.GetAgent(agentID)
|
exists, err = orm.Get(&obj)
|
||||||
if err != nil {
|
if !exists || err != nil {
|
||||||
return nil, err
|
h.WriteJSON(w, util.MapStr{
|
||||||
|
"_id": hostInfo.AgentID,
|
||||||
|
"found": false,
|
||||||
|
}, http.StatusNotFound)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
if ag == nil {
|
|
||||||
return nil, fmt.Errorf("can not found agent [%s]", agentID)
|
//esNodesInfo, err := GetElasticsearchNodesViaAgent(context.Background(), &obj)
|
||||||
}
|
//if err != nil {
|
||||||
agentClient := sm.GetAgentClient()
|
// log.Error(err)
|
||||||
hostInfo, err := agentClient.GetHostInfo(nil, ag.GetEndpoint())
|
// h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
if err != nil {
|
// return
|
||||||
return nil, err
|
//}
|
||||||
}
|
//var processes []util.MapStr
|
||||||
hostInfo.AgentStatus = ag.Status
|
//for _, node := range esNodesInfo {
|
||||||
return hostInfo, nil
|
// processes = append(processes, util.MapStr{
|
||||||
|
// "pid": node.ProcessInfo.PID,
|
||||||
|
// "pid_status": node.ProcessInfo.Status,
|
||||||
|
// "cluster_name": node.ClusterName,
|
||||||
|
// "cluster_uuid": node.ClusterUuid,
|
||||||
|
// "cluster_id": node.ClusterID,
|
||||||
|
// "node_id": node.NodeUUID,
|
||||||
|
// "node_name": node.NodeName,
|
||||||
|
// "uptime_in_ms": time.Now().UnixMilli() - node.ProcessInfo.CreateTime,
|
||||||
|
// })
|
||||||
|
//}
|
||||||
|
h.WriteJSON(w, util.MapStr{
|
||||||
|
//"elastic_processes": processes,
|
||||||
|
}, http.StatusOK)
|
||||||
}
|
}
|
|
@ -7,32 +7,32 @@ package api
|
||||||
import (
|
import (
|
||||||
"infini.sh/framework/core/api"
|
"infini.sh/framework/core/api"
|
||||||
"infini.sh/framework/core/api/rbac/enum"
|
"infini.sh/framework/core/api/rbac/enum"
|
||||||
|
"infini.sh/framework/plugins/managed/server"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type APIHandler struct {
|
||||||
|
api.Handler
|
||||||
|
}
|
||||||
|
|
||||||
func Init() {
|
func Init() {
|
||||||
handler := APIHandler{}
|
handler := APIHandler{}
|
||||||
api.HandleAPIMethod(api.POST, "/agent/instance", handler.createInstance)
|
|
||||||
api.HandleAPIMethod(api.GET, "/agent/instance/_search", handler.RequirePermission(handler.searchInstance, enum.PermissionAgentInstanceRead))
|
|
||||||
api.HandleAPIMethod(api.GET, "/agent/instance/:instance_id", handler.getInstance)
|
|
||||||
api.HandleAPIMethod(api.PUT, "/agent/instance/:instance_id", handler.updateInstance)
|
|
||||||
api.HandleAPIMethod(api.DELETE, "/agent/instance/:instance_id", handler.RequirePermission(handler.deleteInstance, enum.PermissionAgentInstanceWrite))
|
|
||||||
api.HandleAPIMethod(api.POST, "/agent/instance/_stats", handler.RequirePermission(handler.getInstanceStats, enum.PermissionAgentInstanceRead))
|
|
||||||
api.HandleAPIMethod(api.GET, "/agent/log/node/:node_id/files", handler.RequirePermission(handler.getLogFilesByNode, enum.PermissionAgentInstanceRead))
|
|
||||||
api.HandleAPIMethod(api.POST, "/agent/log/node/:node_id/_scroll", handler.RequirePermission(handler.getLogFileContent, enum.PermissionAgentInstanceRead))
|
|
||||||
api.HandleAPIMethod(api.GET, "/agent/instance/:instance_id/_nodes", handler.RequirePermission(handler.getESNodesInfo, enum.PermissionAgentInstanceRead))
|
|
||||||
api.HandleAPIMethod(api.POST, "/agent/instance/:instance_id/_nodes/_refresh", handler.RequirePermission(handler.refreshESNodesInfo, enum.PermissionAgentInstanceWrite))
|
|
||||||
api.HandleAPIMethod(api.POST, "/agent/instance/:instance_id/node/_auth", handler.RequirePermission(handler.authESNode, enum.PermissionAgentInstanceWrite))
|
|
||||||
api.HandleAPIMethod(api.DELETE, "/agent/instance/:instance_id/_nodes", handler.RequirePermission(handler.deleteESNode, enum.PermissionAgentInstanceWrite))
|
|
||||||
api.HandleAPIMethod(api.POST, "/agent/instance/:instance_id/node/_associate", handler.RequirePermission(handler.associateESNode, enum.PermissionAgentInstanceWrite))
|
|
||||||
api.HandleAPIMethod(api.POST, "/agent/instance/try_connect", handler.RequireLogin(handler.tryConnect))
|
|
||||||
api.HandleAPIMethod(api.POST, "/agent/auto_associate", handler.RequirePermission(handler.autoAssociateESNode, enum.PermissionAgentInstanceWrite))
|
|
||||||
|
|
||||||
api.HandleAPIMethod(api.POST, "/host/_enroll", handler.enrollHost)
|
api.HandleAPIMethod(api.POST, "/host/_enroll", handler.enrollHost)
|
||||||
api.HandleAPIMethod(api.GET, "/host/:host_id/agent/info",handler.GetHostAgentInfo)
|
api.HandleAPIMethod(api.GET, "/host/:host_id/agent/info", handler.GetHostAgentInfo)
|
||||||
api.HandleAPIMethod(api.GET, "/host/:host_id/processes",handler.GetHostElasticProcess)
|
api.HandleAPIMethod(api.GET, "/host/:host_id/processes", handler.GetHostElasticProcess)
|
||||||
api.HandleAPIMethod(api.DELETE, "/host/:host_id",handler.deleteHost)
|
api.HandleAPIMethod(api.DELETE, "/host/:host_id", handler.deleteHost)
|
||||||
|
|
||||||
|
//bind agent with nodes
|
||||||
|
api.HandleAPIMethod(api.GET, "/instance/:instance_id/node/_discovery", handler.RequirePermission(handler.discoveryESNodesInfo, enum.PermissionAgentInstanceRead))
|
||||||
|
api.HandleAPIMethod(api.POST, "/instance/:instance_id/node/_discovery", handler.RequirePermission(handler.discoveryESNodesInfo, enum.PermissionAgentInstanceRead))
|
||||||
|
api.HandleAPIMethod(api.POST, "/instance/:instance_id/node/_enroll", handler.RequirePermission(handler.enrollESNode, enum.PermissionAgentInstanceWrite))
|
||||||
|
api.HandleAPIMethod(api.POST, "/instance/:instance_id/node/_revoke", handler.RequirePermission(handler.revokeESNode, enum.PermissionAgentInstanceWrite))
|
||||||
|
|
||||||
api.HandleAPIMethod(api.POST, "/agent/install_command", handler.RequireLogin(handler.generateInstallCommand))
|
api.HandleAPIMethod(api.POST, "/instance/node/_auto_enroll", handler.RequirePermission(handler.autoEnrollESNode, enum.PermissionAgentInstanceWrite))
|
||||||
api.HandleAPIMethod(api.GET, "/agent/install.sh", handler.getInstallScript)
|
|
||||||
|
//get elasticsearch node logs, direct fetch or via stored logs(TODO)
|
||||||
|
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/node/:node_id/logs/_list", handler.RequirePermission(handler.getLogFilesByNode, enum.PermissionAgentInstanceRead))
|
||||||
|
api.HandleAPIMethod(api.POST, "/elasticsearch/:id/node/:node_id/logs/_read", handler.RequirePermission(handler.getLogFileContent, enum.PermissionAgentInstanceRead))
|
||||||
|
|
||||||
|
server.RegisterConfigProvider(remoteConfigProvider)
|
||||||
|
server.RegisterConfigProvider(dynamicAgentConfigProvider)
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,124 +0,0 @@
|
||||||
/* Copyright © INFINI Ltd. All rights reserved.
|
|
||||||
* Web: https://infinilabs.com
|
|
||||||
* Email: hello#infini.ltd */
|
|
||||||
|
|
||||||
package api
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
log "github.com/cihub/seelog"
|
|
||||||
"infini.sh/console/modules/agent/client"
|
|
||||||
"infini.sh/console/modules/agent/state"
|
|
||||||
"infini.sh/framework/core/agent"
|
|
||||||
httprouter "infini.sh/framework/core/api/router"
|
|
||||||
"infini.sh/framework/core/orm"
|
|
||||||
"infini.sh/framework/core/util"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (h *APIHandler) getLogFilesByNode(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
|
||||||
nodeID := ps.MustGetParameter("node_id")
|
|
||||||
inst, node, err := getAgentByNodeID(nodeID)
|
|
||||||
if err != nil {
|
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
log.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if inst == nil {
|
|
||||||
log.Error(fmt.Sprintf("can not find agent by node [%s]", nodeID))
|
|
||||||
h.WriteJSON(w, util.MapStr{
|
|
||||||
"success": false,
|
|
||||||
"reason": "AGENT_NOT_FOUND",
|
|
||||||
}, http.StatusOK)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
logFiles, err := client.GetClient().GetElasticLogFiles(nil, inst.GetEndpoint(), node.Path.Logs)
|
|
||||||
if err != nil {
|
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
log.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.WriteJSON(w, util.MapStr{
|
|
||||||
"success": true,
|
|
||||||
"log_files": logFiles,
|
|
||||||
}, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *APIHandler) getLogFileContent(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
|
||||||
nodeID := ps.MustGetParameter("node_id")
|
|
||||||
inst, node, err := getAgentByNodeID(nodeID)
|
|
||||||
if err != nil {
|
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
log.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if inst == nil {
|
|
||||||
h.WriteError(w, fmt.Sprintf("can not find agent by node [%s]", nodeID), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
reqBody := struct {
|
|
||||||
FileName string `json:"file_name"`
|
|
||||||
LogsPath string `json:"logs_path"`
|
|
||||||
Offset int `json:"offset"`
|
|
||||||
Lines int `json:"lines"`
|
|
||||||
StartLineNumber int64 `json:"start_line_number"`
|
|
||||||
}{}
|
|
||||||
err = h.DecodeJSON(req, &reqBody)
|
|
||||||
if err != nil {
|
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
log.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
reqBody.LogsPath = node.Path.Logs
|
|
||||||
sm := state.GetStateManager()
|
|
||||||
res, err := sm.GetAgentClient().GetElasticLogFileContent(nil, inst.GetEndpoint(), reqBody)
|
|
||||||
if err != nil {
|
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
log.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.WriteJSON(w, res, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAgentByNodeID(nodeID string) (*agent.Instance, *agent.ESNodeInfo, error){
|
|
||||||
queryDsl := util.MapStr{
|
|
||||||
"size":1,
|
|
||||||
"query": util.MapStr{
|
|
||||||
"term": util.MapStr{
|
|
||||||
"node_uuid": util.MapStr{
|
|
||||||
"value": nodeID,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"sort": []util.MapStr{
|
|
||||||
{
|
|
||||||
"timestamp": util.MapStr{
|
|
||||||
"order": "desc",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
q := &orm.Query{
|
|
||||||
RawQuery: util.MustToJSONBytes(queryDsl),
|
|
||||||
}
|
|
||||||
err, result := orm.Search(agent.ESNodeInfo{}, q)
|
|
||||||
if err != nil {
|
|
||||||
return nil,nil, err
|
|
||||||
}
|
|
||||||
if len(result.Result) > 0 {
|
|
||||||
buf := util.MustToJSONBytes(result.Result[0])
|
|
||||||
node := &agent.ESNodeInfo{}
|
|
||||||
err = util.FromJSONBytes(buf, node)
|
|
||||||
inst := &agent.Instance{}
|
|
||||||
inst.ID = node.AgentID
|
|
||||||
_, err = orm.Get(inst)
|
|
||||||
if err != nil {
|
|
||||||
return nil, node, err
|
|
||||||
}
|
|
||||||
if inst.Name == "" {
|
|
||||||
return nil, node, nil
|
|
||||||
}
|
|
||||||
return inst, node, nil
|
|
||||||
}
|
|
||||||
return nil, nil, nil
|
|
||||||
}
|
|
|
@ -0,0 +1,243 @@
|
||||||
|
/* Copyright © INFINI LTD. All rights reserved.
|
||||||
|
* Web: https://infinilabs.com
|
||||||
|
* Email: hello#infini.ltd */
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
log "github.com/cihub/seelog"
|
||||||
|
"infini.sh/framework/core/elastic"
|
||||||
|
"infini.sh/framework/core/global"
|
||||||
|
"infini.sh/framework/core/kv"
|
||||||
|
"infini.sh/framework/core/model"
|
||||||
|
"infini.sh/framework/core/orm"
|
||||||
|
"infini.sh/framework/core/util"
|
||||||
|
common2 "infini.sh/framework/modules/elastic/common"
|
||||||
|
metadata2 "infini.sh/framework/modules/elastic/metadata"
|
||||||
|
"infini.sh/framework/plugins/managed/common"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RemoteConfig struct {
|
||||||
|
orm.ORMObjectBase
|
||||||
|
Metadata model.Metadata `json:"metadata" elastic_mapping:"metadata: { type: object }"`
|
||||||
|
Payload common.ConfigFile `json:"payload" elastic_mapping:"payload: { type: object}"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func remoteConfigProvider(instance model.Instance) []*common.ConfigFile {
|
||||||
|
|
||||||
|
//fetch configs from remote db
|
||||||
|
//fetch configs assigned to (instance=_all OR instance=$instance_id ) AND application.name=$application.name
|
||||||
|
|
||||||
|
q := orm.Query{
|
||||||
|
Size: 1000,
|
||||||
|
Conds: orm.And(orm.Eq("metadata.category", "app_settings"),
|
||||||
|
orm.Eq("metadata.name", instance.Application.Name),
|
||||||
|
orm.Eq("metadata.labels.instance", "_all"),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
err, searchResult := orm.Search(RemoteConfig{}, &q)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
result := []*common.ConfigFile{}
|
||||||
|
|
||||||
|
for _, row := range searchResult.Result {
|
||||||
|
v, ok := row.(map[string]interface{})
|
||||||
|
if ok {
|
||||||
|
x, ok := v["payload"]
|
||||||
|
if ok {
|
||||||
|
f, ok := x.(map[string]interface{})
|
||||||
|
if ok {
|
||||||
|
name, ok := f["name"].(string)
|
||||||
|
if ok {
|
||||||
|
item := common.ConfigFile{}
|
||||||
|
item.Name = util.ToString(name)
|
||||||
|
item.Location = util.ToString(f["location"])
|
||||||
|
item.Content = util.ToString(f["content"])
|
||||||
|
item.Version, _ = util.ToInt64(util.ToString(f["version"]))
|
||||||
|
item.Size = int64(len(item.Content))
|
||||||
|
item.Managed = true
|
||||||
|
t, ok := v["updated"]
|
||||||
|
if ok {
|
||||||
|
layout := "2006-01-02T15:04:05.999999-07:00"
|
||||||
|
t1, err := time.Parse(layout, util.ToString(t))
|
||||||
|
if err == nil {
|
||||||
|
item.Updated = t1.Unix()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
result = append(result, &item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func dynamicAgentConfigProvider(instance model.Instance) []*common.ConfigFile {
|
||||||
|
|
||||||
|
if instance.Application.Name != "agent" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//get config files from remote db
|
||||||
|
//get settings with this agent id
|
||||||
|
|
||||||
|
result := []*common.ConfigFile{}
|
||||||
|
ids, err := GetEnrolledNodesByAgent(&instance)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var latestTimestamp int64
|
||||||
|
for _, v := range ids {
|
||||||
|
if v.Updated > latestTimestamp {
|
||||||
|
latestTimestamp = v.Updated
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ids) > 0 {
|
||||||
|
|
||||||
|
cfg := common.ConfigFile{}
|
||||||
|
cfg.Name = "generated_metrics_tasks.yml"
|
||||||
|
cfg.Location = "generated_metrics_tasks.yml"
|
||||||
|
cfg.Content, cfg.Hash = getAgentIngestConfigs(instance.ID, ids)
|
||||||
|
|
||||||
|
hash := cfg.Hash
|
||||||
|
if cfg.Hash == "" {
|
||||||
|
hash = util.MD5digest(cfg.Content)
|
||||||
|
}
|
||||||
|
//if local's hash is different from remote's hash, then update local's hash, update version to current timestamp
|
||||||
|
v, err := kv.GetValue(LastAgentHash, []byte(global.Env().SystemConfig.NodeConfig.ID+":"+instance.ID))
|
||||||
|
if err != nil || v == nil || string(v) != hash {
|
||||||
|
err := kv.AddValue(LastAgentHash, []byte(global.Env().SystemConfig.NodeConfig.ID+":"+instance.ID), []byte(hash))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
latestTimestamp = time.Now().Unix()
|
||||||
|
log.Infof("hash: %v vs %v, update version to current timestamp: %v", string(v), hash, latestTimestamp)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg.Size = int64(len(cfg.Content))
|
||||||
|
cfg.Version = latestTimestamp
|
||||||
|
cfg.Managed = true
|
||||||
|
cfg.Updated = latestTimestamp
|
||||||
|
result = append(result, &cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAgentIngestConfigs(instance string, items map[string]BindingItem) (string, string) {
|
||||||
|
|
||||||
|
if instance == "" {
|
||||||
|
panic("instance id is empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer := bytes.NewBuffer([]byte("configs.template: "))
|
||||||
|
|
||||||
|
//sort items
|
||||||
|
newItems := []util.KeyValue{}
|
||||||
|
|
||||||
|
for k, v := range items {
|
||||||
|
newItems = append(newItems, util.KeyValue{Key: k, Value: v.Updated, Payload: v})
|
||||||
|
}
|
||||||
|
|
||||||
|
newItems = util.SortKeyValueArray(newItems, false)
|
||||||
|
|
||||||
|
var latestVersion int64
|
||||||
|
for _, x := range newItems {
|
||||||
|
|
||||||
|
v, ok := x.Payload.(BindingItem)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.ClusterID == "" {
|
||||||
|
log.Error("cluster id is empty")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata := elastic.GetMetadata(v.ClusterID)
|
||||||
|
if metadata == nil || metadata.Config == nil{
|
||||||
|
log.Errorf("metadata is nil: %v",v.ClusterID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var clusterLevelEnabled = false
|
||||||
|
var nodeLevelEnabled = true
|
||||||
|
|
||||||
|
var username = ""
|
||||||
|
var password = ""
|
||||||
|
|
||||||
|
if metadata.Config.AgentCredentialID != "" {
|
||||||
|
credential, err := common2.GetCredential(metadata.Config.AgentCredentialID)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var dv interface{}
|
||||||
|
dv, err = credential.Decode()
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if auth, ok := dv.(model.BasicAuth); ok {
|
||||||
|
username = auth.Username
|
||||||
|
password = auth.Password
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeInfo,err:=metadata2.GetNodeConfig(v.ClusterID,v.NodeUUID)
|
||||||
|
if err!=nil{
|
||||||
|
log.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
publishAddress:=nodeInfo.Payload.NodeInfo.GetHttpPublishHost()
|
||||||
|
|
||||||
|
if publishAddress==""{
|
||||||
|
log.Errorf("publish address is empty: %v",v.NodeUUID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeEndPoint := metadata.PrepareEndpoint(publishAddress)
|
||||||
|
|
||||||
|
pathLogs:=nodeInfo.Payload.NodeInfo.GetPathLogs()
|
||||||
|
|
||||||
|
if v.Updated > latestVersion {
|
||||||
|
latestVersion = v.Updated
|
||||||
|
}
|
||||||
|
|
||||||
|
taskID:=v.ClusterID+"_"+v.NodeUUID
|
||||||
|
|
||||||
|
buffer.Write([]byte(fmt.Sprintf("\n - name: \"%v\"\n path: ./config/task_config.tpl\n "+
|
||||||
|
"variable:\n "+
|
||||||
|
"TASK_ID: %v\n "+
|
||||||
|
"CLUSTER_ID: %v\n "+
|
||||||
|
"CLUSTER_UUID: %v\n "+
|
||||||
|
"NODE_UUID: %v\n "+
|
||||||
|
"CLUSTER_ENDPOINT: [\"%v\"]\n "+
|
||||||
|
"CLUSTER_USERNAME: \"%v\"\n "+
|
||||||
|
"CLUSTER_PASSWORD: \"%v\"\n "+
|
||||||
|
"CLUSTER_LEVEL_TASKS_ENABLED: %v\n "+
|
||||||
|
"NODE_LEVEL_TASKS_ENABLED: %v\n "+
|
||||||
|
"NODE_LOGS_PATH: \"%v\"\n\n\n", taskID, taskID,
|
||||||
|
v.ClusterID,v.ClusterUUID,v.NodeUUID, nodeEndPoint, username, password, clusterLevelEnabled, nodeLevelEnabled, pathLogs)))
|
||||||
|
}
|
||||||
|
|
||||||
|
hash := util.MD5digest(buffer.String())
|
||||||
|
|
||||||
|
//password: $[[keystore.$[[CLUSTER_ID]]_password]]
|
||||||
|
buffer.WriteString("\n")
|
||||||
|
buffer.WriteString(fmt.Sprintf("#MANAGED_CONFIG_VERSION: %v\n#MANAGED: true\n", latestVersion))
|
||||||
|
|
||||||
|
return buffer.String(), hash
|
||||||
|
}
|
||||||
|
|
||||||
|
const LastAgentHash = "last_agent_hash"
|
|
@ -1,155 +0,0 @@
|
||||||
/* Copyright © INFINI Ltd. All rights reserved.
|
|
||||||
* Web: https://infinilabs.com
|
|
||||||
* Email: hello#infini.ltd */
|
|
||||||
|
|
||||||
package api
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
log "github.com/cihub/seelog"
|
|
||||||
"github.com/valyala/fasttemplate"
|
|
||||||
"infini.sh/console/modules/agent/common"
|
|
||||||
"infini.sh/framework/core/api/rbac"
|
|
||||||
httprouter "infini.sh/framework/core/api/router"
|
|
||||||
"infini.sh/framework/core/global"
|
|
||||||
"infini.sh/framework/core/util"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"net/http"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var tokens = sync.Map{}
|
|
||||||
type Token struct {
|
|
||||||
CreatedAt time.Time
|
|
||||||
UserID string
|
|
||||||
}
|
|
||||||
|
|
||||||
const ExpiredIn = time.Millisecond * 1000 * 60 * 60
|
|
||||||
func (h *APIHandler) generateInstallCommand(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
|
||||||
claims, ok := req.Context().Value("user").(*rbac.UserClaims)
|
|
||||||
if !ok {
|
|
||||||
h.WriteError(w, "user not found", http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
agCfg := common.GetAgentConfig()
|
|
||||||
if agCfg == nil || agCfg.Setup == nil {
|
|
||||||
h.WriteError(w, "agent setup config was not found, please configure in the configuration file first", http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var (
|
|
||||||
t *Token
|
|
||||||
tokenStr string
|
|
||||||
)
|
|
||||||
tokens.Range(func(key, value any) bool {
|
|
||||||
if v, ok := value.(*Token); ok && claims.UserId == v.UserID {
|
|
||||||
t = v
|
|
||||||
tokenStr = key.(string)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
if t == nil {
|
|
||||||
tokenStr = util.GetUUID()
|
|
||||||
t = &Token{
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
UserID: claims.UserId,
|
|
||||||
}
|
|
||||||
}else{
|
|
||||||
if t.CreatedAt.Add(ExpiredIn).Before(time.Now()){
|
|
||||||
tokens.Delete(tokenStr)
|
|
||||||
tokenStr = util.GetUUID()
|
|
||||||
t = &Token{
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
UserID: claims.UserId,
|
|
||||||
}
|
|
||||||
}else{
|
|
||||||
t.CreatedAt = time.Now()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tokens.Store(tokenStr, t)
|
|
||||||
consoleEndpoint := agCfg.Setup.ConsoleEndpoint
|
|
||||||
if consoleEndpoint == "" {
|
|
||||||
consoleEndpoint = getDefaultConsoleEndpoint(req)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
h.WriteJSON(w, util.MapStr{
|
|
||||||
"script": fmt.Sprintf(`curl -sSL %s/agent/install.sh?token=%s |sudo bash -s -- -u %s -v %s -t /opt/agent`, consoleEndpoint, tokenStr, agCfg.Setup.DownloadURL, agCfg.Setup.Version),
|
|
||||||
//"script": fmt.Sprintf(`sudo BASE_URL="%s" AGENT_VER="%s" INSTALL_PATH="/opt" bash -c "$(curl -L '%s/agent/install.sh?token=%s')"`, agCfg.Setup.DownloadURL, agCfg.Setup.Version, consoleEndpoint, tokenStr),
|
|
||||||
"token": tokenStr,
|
|
||||||
"expired_at": t.CreatedAt.Add(ExpiredIn),
|
|
||||||
}, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDefaultConsoleEndpoint(req *http.Request) string{
|
|
||||||
scheme := "http"
|
|
||||||
if req.TLS != nil {
|
|
||||||
scheme = "https"
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s://%s", scheme, req.Host)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *APIHandler) getInstallScript(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
|
||||||
tokenStr := h.GetParameter(req, "token")
|
|
||||||
if strings.TrimSpace(tokenStr) == "" {
|
|
||||||
h.WriteError(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if v, ok := tokens.Load(tokenStr); !ok {
|
|
||||||
h.WriteError(w, "token is invalid", http.StatusUnauthorized)
|
|
||||||
return
|
|
||||||
}else{
|
|
||||||
if t, ok := v.(*Token); !ok || t.CreatedAt.Add(ExpiredIn).Before(time.Now()) {
|
|
||||||
tokens.Delete(tokenStr)
|
|
||||||
h.WriteError(w, "token was expired", http.StatusUnauthorized)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
agCfg := common.GetAgentConfig()
|
|
||||||
caCert, clientCertPEM, clientKeyPEM, err := common.GenerateServerCert(agCfg.Setup.CACertFile, agCfg.Setup.CAKeyFile)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
scriptTplPath := path.Join(global.Env().GetConfigDir(), "install_agent.tpl")
|
|
||||||
buf, err := os.ReadFile(scriptTplPath)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
tpl := fasttemplate.New(string(buf), "{{", "}}")
|
|
||||||
downloadURL := agCfg.Setup.DownloadURL
|
|
||||||
if downloadURL == "" {
|
|
||||||
downloadURL = "https://release.infinilabs.com/agent/stable/"
|
|
||||||
}
|
|
||||||
port := agCfg.Setup.Port
|
|
||||||
if port == "" {
|
|
||||||
port = "8080"
|
|
||||||
}
|
|
||||||
consoleEndpoint := agCfg.Setup.ConsoleEndpoint
|
|
||||||
if consoleEndpoint == "" {
|
|
||||||
consoleEndpoint = getDefaultConsoleEndpoint(req)
|
|
||||||
}
|
|
||||||
_, err = tpl.Execute(w, map[string]interface{}{
|
|
||||||
"base_url": agCfg.Setup.DownloadURL,
|
|
||||||
"agent_version": agCfg.Setup.Version,
|
|
||||||
"console_endpoint": consoleEndpoint,
|
|
||||||
"client_crt": clientCertPEM,
|
|
||||||
"client_key": clientKeyPEM,
|
|
||||||
"ca_crt": caCert,
|
|
||||||
"port": port,
|
|
||||||
"token": tokenStr,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,274 +0,0 @@
|
||||||
/* Copyright © INFINI Ltd. All rights reserved.
|
|
||||||
* Web: https://infinilabs.com
|
|
||||||
* Email: hello#infini.ltd */
|
|
||||||
|
|
||||||
package client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"infini.sh/console/modules/agent/common"
|
|
||||||
"infini.sh/framework/core/agent"
|
|
||||||
"infini.sh/framework/core/elastic"
|
|
||||||
"infini.sh/framework/core/host"
|
|
||||||
"infini.sh/framework/core/util"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
var defaultClient ClientAPI
|
|
||||||
|
|
||||||
func GetClient() ClientAPI {
|
|
||||||
if defaultClient == nil {
|
|
||||||
panic("agent client not init")
|
|
||||||
}
|
|
||||||
return defaultClient
|
|
||||||
}
|
|
||||||
|
|
||||||
func RegisterClient(client ClientAPI) {
|
|
||||||
defaultClient = client
|
|
||||||
}
|
|
||||||
type ClientAPI interface {
|
|
||||||
GetHostInfo(ctx context.Context, agentBaseURL string) (*host.HostInfo, error)
|
|
||||||
GetElasticProcess(ctx context.Context, agentBaseURL string, agentID string)(interface{}, error)
|
|
||||||
GetElasticLogFiles(ctx context.Context, agentBaseURL string, logsPath string)(interface{}, error)
|
|
||||||
GetElasticLogFileContent(ctx context.Context, agentBaseURL string, body interface{})(interface{}, error)
|
|
||||||
GetInstanceBasicInfo(ctx context.Context, agentBaseURL string) (*agent.Instance, error)
|
|
||||||
RegisterElasticsearch(ctx context.Context, agentBaseURL string, cfgs []elastic.ElasticsearchConfig) error
|
|
||||||
GetElasticsearchNodes(ctx context.Context, agentBaseURL string) ([]agent.ESNodeInfo, error)
|
|
||||||
AuthESNode(ctx context.Context, agentBaseURL string, cfg elastic.ElasticsearchConfig) (*agent.ESNodeInfo, error)
|
|
||||||
CreatePipeline(ctx context.Context, agentBaseURL string, body []byte) error
|
|
||||||
DeletePipeline(ctx context.Context, agentBaseURL, pipelineID string) error
|
|
||||||
SetKeystoreValue(ctx context.Context, agentBaseURL string, key, value string) error
|
|
||||||
SaveDynamicConfig(ctx context.Context, agentBaseURL string, name, content string) error
|
|
||||||
SaveIngestConfig(ctx context.Context, agentBaseURL string) error
|
|
||||||
DoRequest(req *util.Request, respObj interface{}) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type Client struct {
|
|
||||||
Executor Executor
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
func (client *Client) GetHostInfo(ctx context.Context, agentBaseURL string) (*host.HostInfo, error) {
|
|
||||||
req := &util.Request{
|
|
||||||
Method: http.MethodGet,
|
|
||||||
Url: fmt.Sprintf("%s/agent/host/_basic", agentBaseURL),
|
|
||||||
Context: ctx,
|
|
||||||
}
|
|
||||||
resBody := struct {
|
|
||||||
Success bool `json:"success"`
|
|
||||||
Error string `json:"error"`
|
|
||||||
HostInfo *host.HostInfo `json:"result"`
|
|
||||||
}{}
|
|
||||||
err := client.DoRequest(req, &resBody)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if resBody.Success != true {
|
|
||||||
return nil, fmt.Errorf("enroll error from client: %v", resBody.Error)
|
|
||||||
}
|
|
||||||
return resBody.HostInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (client *Client) GetElasticProcess(ctx context.Context, agentBaseURL string, agentID string)(interface{}, error) {
|
|
||||||
req := &util.Request{
|
|
||||||
Method: http.MethodGet,
|
|
||||||
Url: fmt.Sprintf("%s/elasticsearch/%s/process/_elastic", agentBaseURL, agentID),
|
|
||||||
Context: ctx,
|
|
||||||
}
|
|
||||||
resBody := map[string]interface{}{}
|
|
||||||
err := client.DoRequest(req, &resBody)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if resBody["success"] != true {
|
|
||||||
return nil, fmt.Errorf("discover host callback error: %v", resBody["error"])
|
|
||||||
}
|
|
||||||
return resBody["elastic_process"], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (client *Client) GetElasticLogFiles(ctx context.Context, agentBaseURL string, logsPath string)(interface{}, error) {
|
|
||||||
reqBody := util.MustToJSONBytes(util.MapStr{
|
|
||||||
"logs_path": logsPath,
|
|
||||||
})
|
|
||||||
req := &util.Request{
|
|
||||||
Method: http.MethodPost,
|
|
||||||
Url: fmt.Sprintf("%s/agent/logs/elastic/list", agentBaseURL),
|
|
||||||
Context: ctx,
|
|
||||||
Body: reqBody,
|
|
||||||
}
|
|
||||||
resBody := map[string]interface{}{}
|
|
||||||
err := client.DoRequest(req, &resBody)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if resBody["success"] != true {
|
|
||||||
return nil, fmt.Errorf("get elasticsearch log files error: %v", resBody["error"])
|
|
||||||
}
|
|
||||||
return resBody["result"], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (client *Client) GetElasticLogFileContent(ctx context.Context, agentBaseURL string, body interface{})(interface{}, error) {
|
|
||||||
req := &util.Request{
|
|
||||||
Method: http.MethodPost,
|
|
||||||
Url: fmt.Sprintf("%s/agent/logs/elastic/_read", agentBaseURL),
|
|
||||||
Context: ctx,
|
|
||||||
Body: util.MustToJSONBytes(body),
|
|
||||||
}
|
|
||||||
resBody := map[string]interface{}{}
|
|
||||||
err := client.DoRequest(req, &resBody)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if resBody["success"] != true {
|
|
||||||
return nil, fmt.Errorf("get elasticsearch log files error: %v", resBody["error"])
|
|
||||||
}
|
|
||||||
var hasMore bool
|
|
||||||
if v, ok := resBody["EOF"].(bool); ok && !v {
|
|
||||||
hasMore = true
|
|
||||||
}
|
|
||||||
return map[string]interface{}{
|
|
||||||
"lines": resBody["result"],
|
|
||||||
"has_more": hasMore,
|
|
||||||
} , nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (client *Client) GetInstanceBasicInfo(ctx context.Context, agentBaseURL string) (*agent.Instance, error){
|
|
||||||
req := &util.Request{
|
|
||||||
Method: http.MethodGet,
|
|
||||||
Url: fmt.Sprintf("%s/agent/_info", agentBaseURL ),
|
|
||||||
Context: ctx,
|
|
||||||
}
|
|
||||||
resBody := &agent.Instance{}
|
|
||||||
err := client.DoRequest(req, &resBody)
|
|
||||||
return resBody, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (client *Client) RegisterElasticsearch(ctx context.Context, agentBaseURL string, cfgs []elastic.ElasticsearchConfig) error {
|
|
||||||
reqBody, err := util.ToJSONBytes(cfgs)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req := &util.Request{
|
|
||||||
Method: http.MethodPost,
|
|
||||||
Url: fmt.Sprintf("%s/elasticsearch/_register", agentBaseURL ),
|
|
||||||
Context: ctx,
|
|
||||||
Body: reqBody,
|
|
||||||
}
|
|
||||||
resBody := util.MapStr{}
|
|
||||||
err = client.DoRequest(req, &resBody)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if resBody["acknowledged"] != true {
|
|
||||||
return fmt.Errorf("%v", resBody["error"])
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (client *Client) GetElasticsearchNodes(ctx context.Context, agentBaseURL string) ([]agent.ESNodeInfo, error) {
|
|
||||||
req := &util.Request{
|
|
||||||
Method: http.MethodGet,
|
|
||||||
Url: fmt.Sprintf("%s/elasticsearch/_nodes", agentBaseURL ),
|
|
||||||
Context: ctx,
|
|
||||||
}
|
|
||||||
resBody := []agent.ESNodeInfo{}
|
|
||||||
err := client.DoRequest(req, &resBody)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return resBody, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (client *Client) AuthESNode(ctx context.Context, agentBaseURL string, cfg elastic.ElasticsearchConfig) (*agent.ESNodeInfo, error) {
|
|
||||||
reqBody, err := util.ToJSONBytes(cfg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
req := &util.Request{
|
|
||||||
Method: http.MethodPost,
|
|
||||||
Url: fmt.Sprintf("%s/elasticsearch/_auth", agentBaseURL ),
|
|
||||||
Context: ctx,
|
|
||||||
Body: reqBody,
|
|
||||||
}
|
|
||||||
resBody := &agent.ESNodeInfo{}
|
|
||||||
err = client.DoRequest(req, resBody)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return resBody, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (client *Client) CreatePipeline(ctx context.Context, agentBaseURL string, body []byte) error{
|
|
||||||
req := &util.Request{
|
|
||||||
Method: http.MethodPost,
|
|
||||||
Url: agentBaseURL + "/pipeline/tasks/",
|
|
||||||
Body: body,
|
|
||||||
Context: ctx,
|
|
||||||
}
|
|
||||||
resBody := util.MapStr{}
|
|
||||||
return client.DoRequest(req, &resBody)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (client *Client) DeletePipeline(ctx context.Context, agentBaseURL, pipelineID string) error{
|
|
||||||
req := &util.Request{
|
|
||||||
Method: http.MethodDelete,
|
|
||||||
Url: fmt.Sprintf("%s/pipeline/task/%s", agentBaseURL, pipelineID),
|
|
||||||
Context: ctx,
|
|
||||||
}
|
|
||||||
return client.DoRequest(req, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (client *Client) SetKeystoreValue(ctx context.Context, agentBaseURL string, key, value string) error{
|
|
||||||
body := util.MapStr{
|
|
||||||
"key": key,
|
|
||||||
"value": value,
|
|
||||||
}
|
|
||||||
req := &util.Request{
|
|
||||||
Method: http.MethodPost,
|
|
||||||
Url: fmt.Sprintf("%s/_framework/keystore", agentBaseURL),
|
|
||||||
Context: ctx,
|
|
||||||
Body: util.MustToJSONBytes(body),
|
|
||||||
}
|
|
||||||
return client.DoRequest(req, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (client *Client) SaveDynamicConfig(ctx context.Context, agentBaseURL string, name, content string) error{
|
|
||||||
body := util.MapStr{
|
|
||||||
"configs": util.MapStr{
|
|
||||||
name: content,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
req := &util.Request{
|
|
||||||
Method: http.MethodPost,
|
|
||||||
Url: fmt.Sprintf("%s/agent/config", agentBaseURL),
|
|
||||||
Context: ctx,
|
|
||||||
Body: util.MustToJSONBytes(body),
|
|
||||||
}
|
|
||||||
return client.DoRequest(req, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (client *Client) SaveIngestConfig(ctx context.Context, agentBaseURL string) error {
|
|
||||||
ingestCfg, basicAuth, err := common.GetAgentIngestConfig()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if basicAuth != nil && basicAuth.Password != "" {
|
|
||||||
err = client.SetKeystoreValue(ctx, agentBaseURL, "ingest_cluster_password", basicAuth.Password)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("set keystore value to agent error: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = client.SaveDynamicConfig(context.Background(), agentBaseURL, "ingest", ingestCfg )
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("save dynamic config to agent error: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
func (client *Client) DoRequest(req *util.Request, respObj interface{}) error {
|
|
||||||
return client.Executor.DoRequest(req, respObj)
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,100 +0,0 @@
|
||||||
/* Copyright © INFINI Ltd. All rights reserved.
|
|
||||||
* Web: https://infinilabs.com
|
|
||||||
* Email: hello#infini.ltd */
|
|
||||||
|
|
||||||
package client
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"infini.sh/console/modules/agent/common"
|
|
||||||
"infini.sh/framework/core/util"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Executor interface {
|
|
||||||
DoRequest(req *util.Request, respObj interface{}) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type HttpExecutor struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (executor *HttpExecutor) DoRequest(req *util.Request, respObj interface{}) error {
|
|
||||||
result, err := util.ExecuteRequest(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if result.StatusCode != 200 {
|
|
||||||
return fmt.Errorf(string(result.Body))
|
|
||||||
}
|
|
||||||
if respObj == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return util.FromJSONBytes(result.Body, respObj)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMTLSExecutor(caCertFile, caKeyFile string) (*MTLSExecutor, error){
|
|
||||||
var (
|
|
||||||
instanceCrt string
|
|
||||||
instanceKey string
|
|
||||||
)
|
|
||||||
instanceCrt, instanceKey, err := common.GetAgentInstanceCerts(caCertFile, caKeyFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("generate tls cert error: %w", err)
|
|
||||||
}
|
|
||||||
hClient, err := util.NewMTLSClient(caCertFile, instanceCrt, instanceKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &MTLSExecutor{
|
|
||||||
CaCertFile: caCertFile,
|
|
||||||
CAKeyFile: caKeyFile,
|
|
||||||
client: hClient,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type MTLSExecutor struct {
|
|
||||||
CaCertFile string
|
|
||||||
CAKeyFile string
|
|
||||||
client *http.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
func (executor *MTLSExecutor) DoRequest(req *util.Request, respObj interface{}) error {
|
|
||||||
var reader io.Reader
|
|
||||||
if len(req.Body) > 0 {
|
|
||||||
reader = bytes.NewReader(req.Body)
|
|
||||||
}
|
|
||||||
var (
|
|
||||||
hr *http.Request
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
if req.Context == nil {
|
|
||||||
hr, err = http.NewRequest(req.Method, req.Url, reader)
|
|
||||||
}else{
|
|
||||||
hr, err = http.NewRequestWithContext(req.Context, req.Method, req.Url, reader)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
res, err := executor.client.Do(hr)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
buf, err := io.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if res.StatusCode != 200 {
|
|
||||||
return fmt.Errorf(string(buf))
|
|
||||||
}
|
|
||||||
if respObj != nil {
|
|
||||||
err = util.FromJSONBytes(buf, respObj)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -5,15 +5,10 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/x509"
|
|
||||||
"encoding/pem"
|
|
||||||
log "github.com/cihub/seelog"
|
log "github.com/cihub/seelog"
|
||||||
"infini.sh/console/modules/agent/model"
|
"infini.sh/console/modules/agent/model"
|
||||||
"infini.sh/framework/core/env"
|
"infini.sh/framework/core/env"
|
||||||
"infini.sh/framework/core/global"
|
"infini.sh/framework/plugins/managed/common"
|
||||||
"infini.sh/framework/core/util"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -30,37 +25,10 @@ func GetAgentConfig() *model.AgentConfig {
|
||||||
log.Debug("agent config not found: %v", err)
|
log.Debug("agent config not found: %v", err)
|
||||||
}
|
}
|
||||||
if agentCfg.Setup.CACertFile == "" && agentCfg.Setup.CAKeyFile == "" {
|
if agentCfg.Setup.CACertFile == "" && agentCfg.Setup.CAKeyFile == "" {
|
||||||
agentCfg.Setup.CACertFile, agentCfg.Setup.CAKeyFile, err = GetOrInitDefaultCaCerts()
|
agentCfg.Setup.CACertFile, agentCfg.Setup.CAKeyFile, err = common.GetOrInitDefaultCaCerts()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("generate default ca certs error: %v", err)
|
log.Errorf("generate default ca certs error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return agentCfg
|
return agentCfg
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetOrInitDefaultCaCerts()(string, string, error){
|
|
||||||
dataDir := global.Env().GetDataDir()
|
|
||||||
caFile := path.Join(dataDir, "certs/ca.crt")
|
|
||||||
caKey := path.Join(dataDir, "certs/ca.key")
|
|
||||||
if !(util.FileExists(caFile) && util.FileExists(caKey) ) {
|
|
||||||
err := os.MkdirAll(path.Join(dataDir, "certs"), 0775)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
log.Info("auto generating cert files")
|
|
||||||
_, rootKey, rootCertPEM := util.GetRootCert()
|
|
||||||
|
|
||||||
caKeyPEM := pem.EncodeToMemory(&pem.Block{
|
|
||||||
Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(rootKey),
|
|
||||||
})
|
|
||||||
_, err = util.FilePutContentWithByte(caKey, caKeyPEM)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
_, err = util.FilePutContentWithByte(caFile, rootCertPEM)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return caFile, caKey, nil
|
|
||||||
}
|
|
|
@ -1,541 +0,0 @@
|
||||||
/* Copyright © INFINI Ltd. All rights reserved.
|
|
||||||
* Web: https://infinilabs.com
|
|
||||||
* Email: hello#infini.ltd */
|
|
||||||
|
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"infini.sh/console/modules/agent/model"
|
|
||||||
"infini.sh/framework/core/agent"
|
|
||||||
"infini.sh/framework/core/credential"
|
|
||||||
"infini.sh/framework/core/elastic"
|
|
||||||
"infini.sh/framework/core/event"
|
|
||||||
"infini.sh/framework/core/global"
|
|
||||||
"infini.sh/framework/core/orm"
|
|
||||||
"infini.sh/framework/core/util"
|
|
||||||
log "src/github.com/cihub/seelog"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func ParseAgentSettings(settings []agent.Setting)(*model.ParseAgentSettingsResult, error){
|
|
||||||
var clusterCfgs []elastic.ElasticsearchConfig
|
|
||||||
var (
|
|
||||||
pipelines []util.MapStr
|
|
||||||
toDeletePipelineNames []string
|
|
||||||
)
|
|
||||||
for _, setting := range settings {
|
|
||||||
if setting.Metadata.Labels == nil {
|
|
||||||
return nil, fmt.Errorf("empty metadata labels of setting [%s]", setting.ID)
|
|
||||||
}
|
|
||||||
var (
|
|
||||||
clusterID string
|
|
||||||
ok bool
|
|
||||||
)
|
|
||||||
nodeUUID := util.ToString(setting.Metadata.Labels["node_uuid"])
|
|
||||||
if clusterID, ok = setting.Metadata.Labels["cluster_id"].(string); ok && clusterID != ""{
|
|
||||||
cfg := elastic.GetConfig(clusterID)
|
|
||||||
newID := getClusterConfigReferenceName(clusterID, nodeUUID)
|
|
||||||
newCfg := elastic.ElasticsearchConfig{
|
|
||||||
Enabled: true,
|
|
||||||
Name: newID,
|
|
||||||
BasicAuth: cfg.BasicAuth,
|
|
||||||
//todo get endpoint from agent node info
|
|
||||||
Endpoint: setting.Metadata.Labels["endpoint"].(string),
|
|
||||||
ClusterUUID: cfg.ClusterUUID,
|
|
||||||
}
|
|
||||||
newCfg.ID = newID
|
|
||||||
clusterCfgs = append(clusterCfgs, newCfg)
|
|
||||||
}else{
|
|
||||||
return nil, fmt.Errorf("got wrong cluster id [%v] from metadata labels", setting.Metadata.Labels["cluster_id"])
|
|
||||||
}
|
|
||||||
|
|
||||||
taskCfg, err := util.MapStr(setting.Payload).GetValue("task")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
vBytes, err := util.ToJSONBytes(taskCfg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
taskSetting := model.TaskSetting{}
|
|
||||||
err = util.FromJSONBytes(vBytes, &taskSetting)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
partPipelines, partDeletePipelineNames, err := TransformSettingsToConfig(&taskSetting, clusterID, nodeUUID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
pipelines = append(pipelines, partPipelines...)
|
|
||||||
toDeletePipelineNames = append(toDeletePipelineNames, partDeletePipelineNames...)
|
|
||||||
}
|
|
||||||
return &model.ParseAgentSettingsResult{
|
|
||||||
ClusterConfigs: clusterCfgs,
|
|
||||||
Pipelines: pipelines,
|
|
||||||
ToDeletePipelineNames: toDeletePipelineNames,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAgentSettings query agent setting by agent id and updated timestamp,
|
|
||||||
// if there has any setting was updated, then return setting list includes settings not changed,
|
|
||||||
// otherwise return empty setting list
|
|
||||||
func GetAgentSettings(agentID string, timestamp int64) ([]agent.Setting, error) {
|
|
||||||
query := util.MapStr{
|
|
||||||
"bool": util.MapStr{
|
|
||||||
"must": []util.MapStr{
|
|
||||||
{
|
|
||||||
"term": util.MapStr{
|
|
||||||
"metadata.category": util.MapStr{
|
|
||||||
"value": "agent",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"term": util.MapStr{
|
|
||||||
"metadata.name": util.MapStr{
|
|
||||||
"value": "task",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"term": util.MapStr{
|
|
||||||
"metadata.labels.agent_id": util.MapStr{
|
|
||||||
"value": agentID,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
//{
|
|
||||||
// "range": util.MapStr{
|
|
||||||
// "updated": util.MapStr{
|
|
||||||
// "gt": timestamp,
|
|
||||||
// },
|
|
||||||
// },
|
|
||||||
//},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
queryDsl := util.MapStr{
|
|
||||||
"size": 500,
|
|
||||||
"query": query,
|
|
||||||
}
|
|
||||||
q := orm.Query{
|
|
||||||
RawQuery: util.MustToJSONBytes(queryDsl),
|
|
||||||
}
|
|
||||||
err, result := orm.Search(agent.Setting{}, &q)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("search settings error: %w", err)
|
|
||||||
}
|
|
||||||
if len(result.Result) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
var (
|
|
||||||
settings []agent.Setting
|
|
||||||
hasUpdated bool
|
|
||||||
)
|
|
||||||
for _, row := range result.Result {
|
|
||||||
setting := agent.Setting{}
|
|
||||||
buf, err := util.ToJSONBytes(row)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = util.FromJSONBytes(buf, &setting)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if setting.Updated != nil && setting.Updated.UnixMilli() > timestamp {
|
|
||||||
hasUpdated = true
|
|
||||||
}
|
|
||||||
settings = append(settings, setting)
|
|
||||||
}
|
|
||||||
if !hasUpdated {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return settings, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getClusterConfigReferenceName(clusterID, nodeUUID string) string {
|
|
||||||
return fmt.Sprintf("%s_%s", clusterID, nodeUUID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TransformSettingsToConfig(setting *model.TaskSetting, clusterID, nodeUUID string) ([]util.MapStr, []string, error) {
|
|
||||||
if setting == nil {
|
|
||||||
return nil, nil, fmt.Errorf("empty setting")
|
|
||||||
}
|
|
||||||
var (
|
|
||||||
pipelines []util.MapStr
|
|
||||||
toDeletePipelineNames []string
|
|
||||||
)
|
|
||||||
if setting.ClusterStats != nil {
|
|
||||||
var processorName = "es_cluster_stats"
|
|
||||||
if setting.ClusterStats.Enabled {
|
|
||||||
pipelineCfg, err := newClusterMetricPipeline(processorName, clusterID, nodeUUID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
pipelines = append(pipelines, pipelineCfg)
|
|
||||||
}else{
|
|
||||||
toDeletePipelineNames = append(toDeletePipelineNames, getMetricPipelineName(clusterID, processorName))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if setting.IndexStats != nil {
|
|
||||||
var processorName = "es_index_stats"
|
|
||||||
if setting.IndexStats.Enabled {
|
|
||||||
pipelineCfg, err := newClusterMetricPipeline(processorName, clusterID, nodeUUID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
pipelines = append(pipelines, pipelineCfg)
|
|
||||||
}else{
|
|
||||||
toDeletePipelineNames = append(toDeletePipelineNames, getMetricPipelineName(clusterID, processorName))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if setting.ClusterHealth != nil {
|
|
||||||
var processorName = "es_cluster_health"
|
|
||||||
if setting.ClusterHealth.Enabled {
|
|
||||||
pipelineCfg, err := newClusterMetricPipeline(processorName, clusterID, nodeUUID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
pipelines = append(pipelines, pipelineCfg)
|
|
||||||
}else{
|
|
||||||
toDeletePipelineNames = append(toDeletePipelineNames, getMetricPipelineName(clusterID, processorName))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if setting.NodeStats != nil {
|
|
||||||
var processorName = "es_node_stats"
|
|
||||||
if setting.NodeStats.Enabled {
|
|
||||||
params := util.MapStr{
|
|
||||||
"elasticsearch": getClusterConfigReferenceName(clusterID, nodeUUID),
|
|
||||||
"labels": util.MapStr{
|
|
||||||
"cluster_id": clusterID,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if len(setting.NodeStats.NodeIDs) > 0{
|
|
||||||
params["node_uuids"] = setting.NodeStats.NodeIDs
|
|
||||||
}
|
|
||||||
cfg := util.MapStr{
|
|
||||||
processorName: params,
|
|
||||||
}
|
|
||||||
enabled := true
|
|
||||||
pipelineCfg := util.MapStr{
|
|
||||||
"enabled": &enabled,
|
|
||||||
"name": getMetricPipelineName(nodeUUID, processorName),
|
|
||||||
"auto_start": true,
|
|
||||||
"keep_running": true,
|
|
||||||
"retry_delay_in_ms": 10000,
|
|
||||||
"processor": []util.MapStr{cfg},
|
|
||||||
}
|
|
||||||
pipelines = append(pipelines, pipelineCfg)
|
|
||||||
}else{
|
|
||||||
toDeletePipelineNames = append(toDeletePipelineNames, getMetricPipelineName(nodeUUID, processorName))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if setting.Logs != nil {
|
|
||||||
var processorName = "es_logs_processor"
|
|
||||||
if setting.Logs.Enabled {
|
|
||||||
params := util.MapStr{
|
|
||||||
"elasticsearch": getClusterConfigReferenceName(clusterID, nodeUUID),
|
|
||||||
"queue_name": "logs",
|
|
||||||
"labels": util.MapStr{
|
|
||||||
"cluster_id": clusterID,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if setting.Logs.LogsPath != "" {
|
|
||||||
params["logs_path"] = setting.Logs.LogsPath
|
|
||||||
}
|
|
||||||
cfg := util.MapStr{
|
|
||||||
processorName: params,
|
|
||||||
}
|
|
||||||
enabled := true
|
|
||||||
pipelineCfg := util.MapStr{
|
|
||||||
"enabled": &enabled,
|
|
||||||
"name": fmt.Sprintf("collect_%s_es_logs", nodeUUID),
|
|
||||||
"auto_start": true,
|
|
||||||
"keep_running": true,
|
|
||||||
"retry_delay_in_ms": 3000,
|
|
||||||
"processor": []util.MapStr{cfg},
|
|
||||||
}
|
|
||||||
pipelines = append(pipelines, pipelineCfg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return pipelines, toDeletePipelineNames, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
func newClusterMetricPipeline(processorName string, clusterID string, nodeUUID string)(util.MapStr, error){
|
|
||||||
referName := getClusterConfigReferenceName(clusterID, nodeUUID)
|
|
||||||
cfg := util.MapStr{
|
|
||||||
processorName: util.MapStr{
|
|
||||||
"elasticsearch": referName,
|
|
||||||
"labels": util.MapStr{
|
|
||||||
"cluster_id": clusterID,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
enabled := true
|
|
||||||
pipelineCfg := util.MapStr{
|
|
||||||
"enabled": &enabled,
|
|
||||||
"name": getMetricPipelineName(clusterID, processorName),
|
|
||||||
"auto_start": true,
|
|
||||||
"keep_running": true,
|
|
||||||
"retry_delay_in_ms": 10000,
|
|
||||||
"processor": []util.MapStr{cfg},
|
|
||||||
}
|
|
||||||
return pipelineCfg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getMetricPipelineName(clusterID, processorName string) string{
|
|
||||||
return fmt.Sprintf("collect_%s_%s", clusterID, processorName)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
func LoadAgentsFromES(clusterID string) ([]agent.Instance, error) {
|
|
||||||
q := orm.Query{
|
|
||||||
Size: 1000,
|
|
||||||
}
|
|
||||||
if clusterID != "" {
|
|
||||||
q.Conds = orm.And(orm.Eq("id", clusterID))
|
|
||||||
}
|
|
||||||
err, result := orm.Search(agent.Instance{}, &q)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("query agent error: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(result.Result) > 0 {
|
|
||||||
var agents = make([]agent.Instance, 0, len(result.Result))
|
|
||||||
for _, row := range result.Result {
|
|
||||||
ag := agent.Instance{}
|
|
||||||
bytes := util.MustToJSONBytes(row)
|
|
||||||
err = util.FromJSONBytes(bytes, &ag)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("got unexpected agent: %s, error: %v", string(bytes), err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
agents = append(agents, ag)
|
|
||||||
}
|
|
||||||
return agents, nil
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetLatestOnlineAgentIDs(agentIds []string, lastSeconds int) (map[string]struct{}, error) {
|
|
||||||
q := orm.Query{
|
|
||||||
WildcardIndex: true,
|
|
||||||
}
|
|
||||||
mustQ := []util.MapStr{
|
|
||||||
{
|
|
||||||
"term": util.MapStr{
|
|
||||||
"metadata.name": util.MapStr{
|
|
||||||
"value": "agent",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"term": util.MapStr{
|
|
||||||
"metadata.category": util.MapStr{
|
|
||||||
"value": "instance",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if len(agentIds) > 0 {
|
|
||||||
mustQ = append(mustQ, util.MapStr{
|
|
||||||
"terms": util.MapStr{
|
|
||||||
"agent.id": agentIds,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
queryDSL := util.MapStr{
|
|
||||||
"sort": []util.MapStr{
|
|
||||||
{
|
|
||||||
"timestamp": util.MapStr{
|
|
||||||
"order": "desc",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"collapse": util.MapStr{
|
|
||||||
"field": "agent.id",
|
|
||||||
},
|
|
||||||
"query": util.MapStr{
|
|
||||||
"bool": util.MapStr{
|
|
||||||
"filter": []util.MapStr{
|
|
||||||
{
|
|
||||||
"range": util.MapStr{
|
|
||||||
"timestamp": util.MapStr{
|
|
||||||
"gte": fmt.Sprintf("now-%ds", lastSeconds),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"must": mustQ,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if len(agentIds) == 0 {
|
|
||||||
queryDSL["size"] = 2000
|
|
||||||
}else{
|
|
||||||
queryDSL["size"] = len(agentIds)
|
|
||||||
}
|
|
||||||
q.RawQuery = util.MustToJSONBytes(queryDSL)
|
|
||||||
err, result := orm.Search(event.Event{}, &q)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("query agent instance metric error: %w", err)
|
|
||||||
}
|
|
||||||
agentIDs := map[string]struct{}{}
|
|
||||||
if len(result.Result) > 0 {
|
|
||||||
searchRes := elastic.SearchResponse{}
|
|
||||||
err = util.FromJSONBytes(result.Raw, &searchRes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
agentIDKeyPath := []string{"agent", "id"}
|
|
||||||
for _, hit := range searchRes.Hits.Hits {
|
|
||||||
agentID, _ := util.GetMapValueByKeys(agentIDKeyPath, hit.Source)
|
|
||||||
if v, ok := agentID.(string); ok {
|
|
||||||
agentIDs[v] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return agentIDs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetAgentIngestConfig() (string, *elastic.BasicAuth, error) {
|
|
||||||
agCfg := GetAgentConfig()
|
|
||||||
var (
|
|
||||||
endpoint string
|
|
||||||
ok bool
|
|
||||||
)
|
|
||||||
emptyIngestClusterEndpoint := false
|
|
||||||
if agCfg.Setup.IngestClusterEndpoint == nil {
|
|
||||||
emptyIngestClusterEndpoint = true
|
|
||||||
}
|
|
||||||
if endpoint, ok = agCfg.Setup.IngestClusterEndpoint.(string);ok {
|
|
||||||
if endpoint = strings.TrimSpace(endpoint); endpoint == "" {
|
|
||||||
emptyIngestClusterEndpoint = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if emptyIngestClusterEndpoint {
|
|
||||||
cfg := elastic.GetConfig(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
|
|
||||||
endpoint = cfg.Endpoint
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
basicAuth elastic.BasicAuth
|
|
||||||
)
|
|
||||||
if agCfg.Setup.IngestClusterCredentialID != "" {
|
|
||||||
cred := credential.Credential{}
|
|
||||||
cred.ID = agCfg.Setup.IngestClusterCredentialID
|
|
||||||
_, err := orm.Get(&cred)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, fmt.Errorf("query credential [%s] error: %w", cred.ID, err)
|
|
||||||
}
|
|
||||||
info, err := cred.Decode()
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, fmt.Errorf("decode credential [%s] error: %w", cred.ID, err)
|
|
||||||
}
|
|
||||||
if basicAuth, ok = info.(elastic.BasicAuth); !ok {
|
|
||||||
log.Debug("invalid credential: ", cred)
|
|
||||||
}
|
|
||||||
}else{
|
|
||||||
cfg := elastic.GetConfig(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
|
|
||||||
basicAuth = *cfg.BasicAuth
|
|
||||||
}
|
|
||||||
tpl := `elasticsearch:
|
|
||||||
- name: default
|
|
||||||
enabled: true
|
|
||||||
endpoint: %s
|
|
||||||
discovery:
|
|
||||||
enabled: true
|
|
||||||
basic_auth:
|
|
||||||
username: %s
|
|
||||||
password: $[[keystore.ingest_cluster_password]]
|
|
||||||
metrics:
|
|
||||||
enabled: true
|
|
||||||
queue: metrics
|
|
||||||
network:
|
|
||||||
enabled: true
|
|
||||||
summary: true
|
|
||||||
details: true
|
|
||||||
memory:
|
|
||||||
metrics:
|
|
||||||
- swap
|
|
||||||
- memory
|
|
||||||
disk:
|
|
||||||
metrics:
|
|
||||||
- iops
|
|
||||||
- usage
|
|
||||||
cpu:
|
|
||||||
metrics:
|
|
||||||
- idle
|
|
||||||
- system
|
|
||||||
- user
|
|
||||||
- iowait
|
|
||||||
- load
|
|
||||||
instance:
|
|
||||||
enabled: true
|
|
||||||
pipeline:
|
|
||||||
- name: logs_indexing_merge
|
|
||||||
auto_start: true
|
|
||||||
keep_running: true
|
|
||||||
processor:
|
|
||||||
- indexing_merge:
|
|
||||||
index_name: ".infini_logs"
|
|
||||||
elasticsearch: "default"
|
|
||||||
input_queue: "logs"
|
|
||||||
idle_timeout_in_seconds: 10
|
|
||||||
output_queue:
|
|
||||||
name: "logs_requests"
|
|
||||||
label:
|
|
||||||
tag: "logs"
|
|
||||||
worker_size: 1
|
|
||||||
bulk_size_in_mb: 10
|
|
||||||
- name: ingest_logs
|
|
||||||
auto_start: true
|
|
||||||
keep_running: true
|
|
||||||
processor:
|
|
||||||
- bulk_indexing:
|
|
||||||
bulk:
|
|
||||||
compress: true
|
|
||||||
batch_size_in_mb: 5
|
|
||||||
batch_size_in_docs: 5000
|
|
||||||
consumer:
|
|
||||||
fetch_max_messages: 100
|
|
||||||
queues:
|
|
||||||
type: indexing_merge
|
|
||||||
tag: "logs"
|
|
||||||
when:
|
|
||||||
cluster_available: ["default"]
|
|
||||||
- name: metrics_indexing_merge
|
|
||||||
auto_start: true
|
|
||||||
keep_running: true
|
|
||||||
processor:
|
|
||||||
- indexing_merge:
|
|
||||||
elasticsearch: "default"
|
|
||||||
index_name: ".infini_metrics"
|
|
||||||
input_queue: "metrics"
|
|
||||||
output_queue:
|
|
||||||
name: "metrics_requests"
|
|
||||||
label:
|
|
||||||
tag: "metrics"
|
|
||||||
worker_size: 1
|
|
||||||
bulk_size_in_mb: 5
|
|
||||||
- name: ingest_metrics
|
|
||||||
auto_start: true
|
|
||||||
keep_running: true
|
|
||||||
processor:
|
|
||||||
- bulk_indexing:
|
|
||||||
bulk:
|
|
||||||
compress: true
|
|
||||||
batch_size_in_mb: 5
|
|
||||||
batch_size_in_docs: 5000
|
|
||||||
consumer:
|
|
||||||
fetch_max_messages: 100
|
|
||||||
queues:
|
|
||||||
type: indexing_merge
|
|
||||||
tag: "metrics"
|
|
||||||
when:
|
|
||||||
cluster_available: ["default"]`
|
|
||||||
tpl = fmt.Sprintf(tpl, endpoint, basicAuth.Username)
|
|
||||||
return tpl, &basicAuth, nil
|
|
||||||
}
|
|
|
@ -1,41 +0,0 @@
|
||||||
/* Copyright © INFINI Ltd. All rights reserved.
|
|
||||||
* Web: https://infinilabs.com
|
|
||||||
* Email: hello#infini.ltd */
|
|
||||||
|
|
||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"gopkg.in/yaml.v2"
|
|
||||||
"infini.sh/framework/core/util"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestTransformSettingsToConfig(t *testing.T) {
|
|
||||||
setting := TaskSetting{
|
|
||||||
ClusterHealth: ClusterHealthTask{
|
|
||||||
Enabled: true,
|
|
||||||
},
|
|
||||||
ClusterStats: ClusterStatsTask {
|
|
||||||
Enabled: true,
|
|
||||||
},
|
|
||||||
IndexStats: IndexStatsTask{
|
|
||||||
Enabled: true,
|
|
||||||
},
|
|
||||||
NodeStats: NodeStatsTask{
|
|
||||||
Enabled: true,
|
|
||||||
NodeIDs: []string{"ddddnnnn"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
pipelines, err := transformSettingsToConfig(&setting, "testxxx")
|
|
||||||
if err !=nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
buf, err := yaml.Marshal(util.MapStr{
|
|
||||||
"pipeline": pipelines,
|
|
||||||
})
|
|
||||||
if err !=nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
fmt.Println(string(buf))
|
|
||||||
}
|
|
|
@ -5,20 +5,15 @@
|
||||||
package model
|
package model
|
||||||
|
|
||||||
type AgentConfig struct {
|
type AgentConfig struct {
|
||||||
Enabled bool `config:"enabled"`
|
Enabled bool `config:"enabled"`
|
||||||
StateManager struct{
|
|
||||||
Enabled bool `config:"enabled"`
|
|
||||||
} `config:"state_manager"`
|
|
||||||
Setup *SetupConfig `config:"setup"`
|
Setup *SetupConfig `config:"setup"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type SetupConfig struct {
|
type SetupConfig struct {
|
||||||
DownloadURL string `config:"download_url"`
|
DownloadURL string `config:"download_url"`
|
||||||
Version string `config:"version"`
|
Version string `config:"version"`
|
||||||
CACertFile string `config:"ca_cert"`
|
CACertFile string `config:"ca_cert"`
|
||||||
CAKeyFile string `config:"ca_key"`
|
CAKeyFile string `config:"ca_key"`
|
||||||
ConsoleEndpoint string `config:"console_endpoint"`
|
ConsoleEndpoint string `config:"console_endpoint"`
|
||||||
IngestClusterEndpoint interface{} `config:"ingest_cluster_endpoint"`
|
Port string `config:"port"`
|
||||||
IngestClusterCredentialID string `config:"ingest_cluster_credential_id"`
|
|
||||||
Port string `config:"port"`
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,15 +0,0 @@
|
||||||
/* Copyright © INFINI Ltd. All rights reserved.
|
|
||||||
* Web: https://infinilabs.com
|
|
||||||
* Email: hello#infini.ltd */
|
|
||||||
|
|
||||||
package model
|
|
||||||
|
|
||||||
const (
|
|
||||||
StatusOnline string = "online"
|
|
||||||
StatusOffline = "offline"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
KVAgentIngestConfigChanged = "agent_ingest_config_changed"
|
|
||||||
KVSyncDynamicTaskSettings = "agent_sync_dynamic_task_settings"
|
|
||||||
)
|
|
|
@ -1,46 +0,0 @@
|
||||||
/* Copyright © INFINI Ltd. All rights reserved.
|
|
||||||
* Web: https://infinilabs.com
|
|
||||||
* Email: hello#infini.ltd */
|
|
||||||
|
|
||||||
package model
|
|
||||||
|
|
||||||
import (
|
|
||||||
"infini.sh/framework/core/elastic"
|
|
||||||
"infini.sh/framework/core/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
type TaskSetting struct {
|
|
||||||
ClusterHealth *ClusterHealthTask `json:"cluster_health,omitempty"`
|
|
||||||
ClusterStats *ClusterStatsTask `json:"cluster_stats,omitempty"`
|
|
||||||
IndexStats *IndexStatsTask `json:"index_stats,omitempty"`
|
|
||||||
NodeStats *NodeStatsTask `json:"node_stats,omitempty"`
|
|
||||||
Logs *LogsTask `json:"logs,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ClusterHealthTask struct {
|
|
||||||
Enabled bool `json:"enabled"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ClusterStatsTask struct {
|
|
||||||
Enabled bool `json:"enabled"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type IndexStatsTask struct {
|
|
||||||
Enabled bool `json:"enabled"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type NodeStatsTask struct {
|
|
||||||
Enabled bool `json:"enabled"`
|
|
||||||
NodeIDs []string `json:"node_ids,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type LogsTask struct {
|
|
||||||
Enabled bool `json:"enabled"`
|
|
||||||
LogsPath string `json:"logs_path"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ParseAgentSettingsResult struct {
|
|
||||||
ClusterConfigs []elastic.ElasticsearchConfig
|
|
||||||
Pipelines []util.MapStr
|
|
||||||
ToDeletePipelineNames []string
|
|
||||||
}
|
|
|
@ -1,351 +0,0 @@
|
||||||
/* Copyright © INFINI Ltd. All rights reserved.
|
|
||||||
* Web: https://infinilabs.com
|
|
||||||
* Email: hello#infini.ltd */
|
|
||||||
|
|
||||||
package state
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"github.com/buger/jsonparser"
|
|
||||||
log "github.com/cihub/seelog"
|
|
||||||
"gopkg.in/yaml.v2"
|
|
||||||
"infini.sh/console/modules/agent/client"
|
|
||||||
"infini.sh/console/modules/agent/common"
|
|
||||||
"infini.sh/console/modules/agent/model"
|
|
||||||
"infini.sh/framework/core/agent"
|
|
||||||
"infini.sh/framework/core/host"
|
|
||||||
"infini.sh/framework/core/kv"
|
|
||||||
"infini.sh/framework/core/orm"
|
|
||||||
"infini.sh/framework/core/util"
|
|
||||||
"infini.sh/framework/modules/elastic"
|
|
||||||
"runtime"
|
|
||||||
"runtime/debug"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var stateManager IStateManager
|
|
||||||
|
|
||||||
func GetStateManager() IStateManager {
|
|
||||||
if stateManager == nil {
|
|
||||||
panic("agent state manager not init")
|
|
||||||
}
|
|
||||||
return stateManager
|
|
||||||
}
|
|
||||||
|
|
||||||
func RegisterStateManager(sm IStateManager) {
|
|
||||||
stateManager = sm
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsEnabled() bool {
|
|
||||||
return stateManager != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type IStateManager interface {
|
|
||||||
GetAgent(ID string) (*agent.Instance, error)
|
|
||||||
UpdateAgent(inst *agent.Instance, syncToES bool) (*agent.Instance, error)
|
|
||||||
GetTaskAgent(clusterID string) (*agent.Instance, error)
|
|
||||||
DeleteAgent(agentID string) error
|
|
||||||
LoopState()
|
|
||||||
Stop()
|
|
||||||
GetAgentClient() client.ClientAPI
|
|
||||||
}
|
|
||||||
|
|
||||||
type StateManager struct {
|
|
||||||
TTL time.Duration // kv ttl
|
|
||||||
KVKey string
|
|
||||||
stopC chan struct{}
|
|
||||||
stopCompleteC chan struct{}
|
|
||||||
agentClient *client.Client
|
|
||||||
agentIds map[string]string
|
|
||||||
agentMutex sync.Mutex
|
|
||||||
workerChan chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewStateManager(TTL time.Duration, kvKey string, agentIds map[string]string, agentClient *client.Client) *StateManager {
|
|
||||||
return &StateManager{
|
|
||||||
TTL: TTL,
|
|
||||||
KVKey: kvKey,
|
|
||||||
stopC: make(chan struct{}),
|
|
||||||
stopCompleteC: make(chan struct{}),
|
|
||||||
agentClient: agentClient,
|
|
||||||
agentIds: agentIds,
|
|
||||||
workerChan: make(chan struct{}, runtime.NumCPU()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sm *StateManager) checkAgentStatus() {
|
|
||||||
onlineAgentIDs, err := common.GetLatestOnlineAgentIDs(nil, int(sm.TTL.Seconds()))
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
//add new agent to state
|
|
||||||
sm.agentMutex.Lock()
|
|
||||||
for agentID := range onlineAgentIDs {
|
|
||||||
if _, ok := sm.agentIds[agentID]; !ok {
|
|
||||||
log.Infof("status of agent [%s] changed to online", agentID)
|
|
||||||
sm.agentIds[agentID] = model.StatusOnline
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sm.agentMutex.Unlock()
|
|
||||||
for agentID, status := range sm.agentIds {
|
|
||||||
sm.workerChan <- struct{}{}
|
|
||||||
go func(agentID string) {
|
|
||||||
defer func() {
|
|
||||||
if err := recover(); err != nil {
|
|
||||||
log.Errorf("check agent [%s] status recover form panic error: %v", agentID, err)
|
|
||||||
debug.PrintStack()
|
|
||||||
}
|
|
||||||
<-sm.workerChan
|
|
||||||
}()
|
|
||||||
sm.syncSettings(agentID)
|
|
||||||
sm.syncIngestSettings(agentID)
|
|
||||||
if _, ok := onlineAgentIDs[agentID]; ok {
|
|
||||||
host.UpdateHostAgentStatus(agentID, model.StatusOnline)
|
|
||||||
if status == model.StatusOnline {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// status change to online
|
|
||||||
sm.agentMutex.Lock()
|
|
||||||
sm.agentIds[agentID] = model.StatusOnline
|
|
||||||
sm.agentMutex.Unlock()
|
|
||||||
log.Infof("status of agent [%s] changed to online", agentID)
|
|
||||||
return
|
|
||||||
}else{
|
|
||||||
// already offline
|
|
||||||
if status == model.StatusOffline {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// status change to offline
|
|
||||||
sm.agentMutex.Lock()
|
|
||||||
sm.agentIds[agentID] = model.StatusOffline
|
|
||||||
sm.agentMutex.Unlock()
|
|
||||||
ag, err := sm.GetAgent(agentID)
|
|
||||||
if err != nil {
|
|
||||||
if err != elastic.ErrNotFound {
|
|
||||||
log.Error(err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ag.Status = model.StatusOffline
|
|
||||||
log.Infof("agent [%s] is offline", ag.Endpoint)
|
|
||||||
_, err = sm.UpdateAgent(ag, true)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
//update host agent status
|
|
||||||
host.UpdateHostAgentStatus(ag.ID, model.StatusOffline)
|
|
||||||
}(agentID)
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (sm *StateManager) getLastSyncSettingsTimestamp(agentID string) int64{
|
|
||||||
vbytes, err := kv.GetValue(model.KVSyncDynamicTaskSettings, []byte(agentID))
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
}
|
|
||||||
if vbytes == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
t, err := strconv.ParseInt(string(vbytes), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sm *StateManager) syncSettings(agentID string) {
|
|
||||||
ag, err := sm.GetAgent(agentID)
|
|
||||||
if err != nil {
|
|
||||||
if err != elastic.ErrNotFound {
|
|
||||||
log.Errorf("get agent error: %v", err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
newTimestamp := time.Now().UnixMilli()
|
|
||||||
lastSyncTimestamp := sm.getLastSyncSettingsTimestamp(agentID)
|
|
||||||
settings, err := common.GetAgentSettings(agentID, lastSyncTimestamp)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("query agent settings error: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(settings) == 0 {
|
|
||||||
log.Debugf("got no settings of agent [%s]", agentID)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
parseResult, err := common.ParseAgentSettings(settings)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("parse agent settings error: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
agClient := sm.GetAgentClient()
|
|
||||||
var clusterCfgs []util.MapStr
|
|
||||||
if len(parseResult.ClusterConfigs) > 0 {
|
|
||||||
for _, cfg := range parseResult.ClusterConfigs {
|
|
||||||
clusterCfg := util.MapStr{
|
|
||||||
"name": cfg.ID,
|
|
||||||
"enabled": true,
|
|
||||||
"endpoint": cfg.Endpoint,
|
|
||||||
}
|
|
||||||
if cfg.BasicAuth != nil && cfg.BasicAuth.Password != ""{
|
|
||||||
cid := cfg.ID
|
|
||||||
if cfg.ClusterUUID != "" {
|
|
||||||
cid = cfg.ClusterUUID
|
|
||||||
}
|
|
||||||
err = agClient.SetKeystoreValue(context.Background(), ag.GetEndpoint(), fmt.Sprintf("%s_password", cid), cfg.BasicAuth.Password)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("set keystore value error: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
clusterCfg["basic_auth"] = util.MapStr{
|
|
||||||
"username": cfg.BasicAuth.Username,
|
|
||||||
"password": fmt.Sprintf("$[[keystore.%s_password]]", cid),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
clusterCfgs = append(clusterCfgs, clusterCfg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var dynamicCfg = util.MapStr{}
|
|
||||||
if len(clusterCfgs) > 0 {
|
|
||||||
dynamicCfg["elasticsearch"] = clusterCfgs
|
|
||||||
}
|
|
||||||
if len(parseResult.Pipelines) > 0 {
|
|
||||||
dynamicCfg["pipeline"] = parseResult.Pipelines
|
|
||||||
}
|
|
||||||
cfgBytes, err := yaml.Marshal(dynamicCfg)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("serialize config to yaml error: ", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = agClient.SaveDynamicConfig(context.Background(), ag.GetEndpoint(), "dynamic_task", string(cfgBytes))
|
|
||||||
|
|
||||||
newTimestampStr := strconv.FormatInt(newTimestamp, 10)
|
|
||||||
err = kv.AddValue(model.KVSyncDynamicTaskSettings, []byte(agentID), []byte(newTimestampStr))
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (sm *StateManager) syncIngestSettings(agentID string) {
|
|
||||||
v, err := kv.GetValue(model.KVAgentIngestConfigChanged, []byte(agentID))
|
|
||||||
if err != nil {
|
|
||||||
log.Error(err)
|
|
||||||
}
|
|
||||||
if string(v) != "1" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ag, err := sm.GetAgent(agentID)
|
|
||||||
if err != nil {
|
|
||||||
if err != elastic.ErrNotFound {
|
|
||||||
log.Errorf("get agent error: %v", err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = sm.agentClient.SaveIngestConfig(context.Background(), ag.GetEndpoint())
|
|
||||||
if err == nil {
|
|
||||||
kv.AddValue(model.KVAgentIngestConfigChanged,[]byte(agentID), []byte("0"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sm *StateManager) getAvailableAgent(clusterID string) (*agent.Instance, error) {
|
|
||||||
agents, err := common.LoadAgentsFromES(clusterID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(agents) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
for _, ag := range agents {
|
|
||||||
if ag.Status == "offline" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sm *StateManager) LoopState() {
|
|
||||||
t := time.NewTicker(30 * time.Second)
|
|
||||||
defer t.Stop()
|
|
||||||
MAINLOOP:
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-sm.stopC:
|
|
||||||
sm.stopCompleteC <- struct{}{}
|
|
||||||
close(sm.workerChan)
|
|
||||||
break MAINLOOP
|
|
||||||
case <-t.C:
|
|
||||||
sm.checkAgentStatus()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sm *StateManager) Stop() {
|
|
||||||
sm.stopC <- struct{}{}
|
|
||||||
<-sm.stopCompleteC
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sm *StateManager) GetAgent(ID string) (*agent.Instance, error) {
|
|
||||||
buf, err := kv.GetValue(sm.KVKey, []byte(ID))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
strTime, _ := jsonparser.GetString(buf, "timestamp")
|
|
||||||
timestamp, _ := time.Parse(time.RFC3339, strTime)
|
|
||||||
inst := &agent.Instance{}
|
|
||||||
inst.ID = ID
|
|
||||||
if time.Since(timestamp) > sm.TTL {
|
|
||||||
exists, err := orm.Get(inst)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if !exists {
|
|
||||||
return nil, fmt.Errorf("can not found agent [%s]", ID)
|
|
||||||
}
|
|
||||||
//inst.Timestamp = time.Now()
|
|
||||||
err = kv.AddValue(sm.KVKey, []byte(ID), util.MustToJSONBytes(inst))
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("save agent [%s] to kv error: %v", ID, err)
|
|
||||||
}
|
|
||||||
return inst, nil
|
|
||||||
}
|
|
||||||
err = util.FromJSONBytes(buf, inst)
|
|
||||||
return inst, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sm *StateManager) UpdateAgent(inst *agent.Instance, syncToES bool) (*agent.Instance, error) {
|
|
||||||
//inst.Timestamp = time.Now()
|
|
||||||
err := kv.AddValue(sm.KVKey, []byte(inst.ID), util.MustToJSONBytes(inst))
|
|
||||||
if syncToES {
|
|
||||||
ctx := orm.Context{
|
|
||||||
Refresh: "wait_for",
|
|
||||||
}
|
|
||||||
err = orm.Update(&ctx, inst)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return inst, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sm *StateManager) GetTaskAgent(clusterID string) (*agent.Instance, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
func (sm *StateManager) DeleteAgent(agentID string) error {
|
|
||||||
sm.agentMutex.Lock()
|
|
||||||
delete(sm.agentIds, agentID)
|
|
||||||
sm.agentMutex.Unlock()
|
|
||||||
log.Infof("delete agent [%s] from state", agentID)
|
|
||||||
|
|
||||||
return kv.DeleteKey(sm.KVKey, []byte(agentID))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sm *StateManager) GetAgentClient() client.ClientAPI {
|
|
||||||
return sm.agentClient
|
|
||||||
}
|
|
|
@ -7,11 +7,11 @@ package common
|
||||||
import (
|
import (
|
||||||
"infini.sh/console/model"
|
"infini.sh/console/model"
|
||||||
"infini.sh/framework/core/credential"
|
"infini.sh/framework/core/credential"
|
||||||
"infini.sh/framework/core/elastic"
|
model2 "infini.sh/framework/core/model"
|
||||||
"infini.sh/framework/core/orm"
|
"infini.sh/framework/core/orm"
|
||||||
)
|
)
|
||||||
|
|
||||||
func GetBasicAuth(srv *model.EmailServer) (basicAuth elastic.BasicAuth, err error) {
|
func GetBasicAuth(srv *model.EmailServer) (basicAuth model2.BasicAuth, err error) {
|
||||||
if srv.Auth != nil && srv.Auth.Username != "" {
|
if srv.Auth != nil && srv.Auth.Username != "" {
|
||||||
basicAuth = *srv.Auth
|
basicAuth = *srv.Auth
|
||||||
return
|
return
|
||||||
|
@ -28,7 +28,7 @@ func GetBasicAuth(srv *model.EmailServer) (basicAuth elastic.BasicAuth, err erro
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if auth, ok := dv.(elastic.BasicAuth); ok {
|
if auth, ok := dv.(model2.BasicAuth); ok {
|
||||||
basicAuth = auth
|
basicAuth = auth
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,14 +5,7 @@
|
||||||
package gateway
|
package gateway
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/tls"
|
|
||||||
log "github.com/cihub/seelog"
|
|
||||||
"infini.sh/framework/core/api"
|
"infini.sh/framework/core/api"
|
||||||
"infini.sh/framework/core/api/rbac/enum"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type GatewayAPI struct {
|
type GatewayAPI struct {
|
||||||
|
@ -20,46 +13,48 @@ type GatewayAPI struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func InitAPI() {
|
func InitAPI() {
|
||||||
gateway:=GatewayAPI{}
|
//gateway:=GatewayAPI{}
|
||||||
api.HandleAPIMethod(api.POST, "/gateway/instance/try_connect", gateway.RequireLogin(gateway.tryConnect))
|
//api.HandleAPIMethod(api.POST, "/gateway/instance/try_connect", gateway.RequireLogin(gateway.tryConnect))
|
||||||
api.HandleAPIMethod(api.GET, "/gateway/instance/:instance_id", gateway.RequirePermission(gateway.getInstance, enum.PermissionGatewayInstanceRead))
|
//api.HandleAPIMethod(api.GET, "/gateway/instance/:instance_id", gateway.RequirePermission(gateway.getInstance, enum.PermissionGatewayInstanceRead))
|
||||||
api.HandleAPIMethod(api.POST, "/gateway/instance", gateway.RequirePermission(gateway.createInstance, enum.PermissionGatewayInstanceWrite))
|
//api.HandleAPIMethod(api.POST, "/gateway/instance", gateway.RequirePermission(gateway.createInstance, enum.PermissionGatewayInstanceWrite))
|
||||||
api.HandleAPIMethod(api.PUT, "/gateway/instance/:instance_id", gateway.RequirePermission(gateway.updateInstance, enum.PermissionGatewayInstanceWrite))
|
//api.HandleAPIMethod(api.PUT, "/gateway/instance/:instance_id", gateway.RequirePermission(gateway.updateInstance, enum.PermissionGatewayInstanceWrite))
|
||||||
api.HandleAPIMethod(api.DELETE, "/gateway/instance/:instance_id", gateway.RequirePermission(gateway.deleteInstance, enum.PermissionGatewayInstanceWrite))
|
//api.HandleAPIMethod(api.DELETE, "/gateway/instance/:instance_id", gateway.RequirePermission(gateway.deleteInstance, enum.PermissionGatewayInstanceWrite))
|
||||||
api.HandleAPIMethod(api.GET, "/gateway/instance/_search", gateway.RequirePermission(gateway.searchInstance, enum.PermissionGatewayInstanceRead))
|
//api.HandleAPIMethod(api.GET, "/gateway/instance/_search", gateway.RequirePermission(gateway.searchInstance, enum.PermissionGatewayInstanceRead))
|
||||||
api.HandleAPIMethod(api.POST, "/gateway/instance/status", gateway.RequirePermission(gateway.getInstanceStatus, enum.PermissionGatewayInstanceRead))
|
//api.HandleAPIMethod(api.POST, "/gateway/instance/status", gateway.RequirePermission(gateway.getInstanceStatus, enum.PermissionGatewayInstanceRead))
|
||||||
|
//
|
||||||
|
//api.HandleAPIMethod(api.POST, "/gateway/instance/:instance_id/_proxy", gateway.RequirePermission(gateway.proxy, enum.PermissionGatewayInstanceRead))
|
||||||
|
|
||||||
api.HandleAPIMethod(api.POST, "/gateway/instance/:instance_id/_proxy", gateway.RequirePermission(gateway.proxy, enum.PermissionGatewayInstanceRead))
|
//api.HandleAPIMethod(api.GET, "/_platform/nodes", gateway.getExecutionNodes)
|
||||||
|
//
|
||||||
api.HandleAPIFunc("/ws_proxy", func(w http.ResponseWriter, req *http.Request) {
|
//api.HandleAPIFunc("/ws_proxy", func(w http.ResponseWriter, req *http.Request) {
|
||||||
log.Debug(req.RequestURI)
|
// log.Debug(req.RequestURI)
|
||||||
endpoint := req.URL.Query().Get("endpoint")
|
// endpoint := req.URL.Query().Get("endpoint")
|
||||||
path := req.URL.Query().Get("path")
|
// path := req.URL.Query().Get("path")
|
||||||
var tlsConfig = &tls.Config{
|
// var tlsConfig = &tls.Config{
|
||||||
InsecureSkipVerify: true,
|
// InsecureSkipVerify: true,
|
||||||
}
|
// }
|
||||||
target, err := url.Parse(endpoint)
|
// target, err := url.Parse(endpoint)
|
||||||
if err != nil {
|
// if err != nil {
|
||||||
log.Error(err)
|
// log.Error(err)
|
||||||
return
|
// return
|
||||||
}
|
// }
|
||||||
newURL, err := url.Parse(path)
|
// newURL, err := url.Parse(path)
|
||||||
if err != nil {
|
// if err != nil {
|
||||||
log.Error(err)
|
// log.Error(err)
|
||||||
return
|
// return
|
||||||
}
|
// }
|
||||||
req.URL.Path = newURL.Path
|
// req.URL.Path = newURL.Path
|
||||||
req.URL.RawPath = newURL.RawPath
|
// req.URL.RawPath = newURL.RawPath
|
||||||
req.URL.RawQuery = ""
|
// req.URL.RawQuery = ""
|
||||||
req.RequestURI = req.URL.RequestURI()
|
// req.RequestURI = req.URL.RequestURI()
|
||||||
req.Header.Set("HOST", target.Host)
|
// req.Header.Set("HOST", target.Host)
|
||||||
req.Host = target.Host
|
// req.Host = target.Host
|
||||||
wsProxy := NewSingleHostReverseProxy(target)
|
// wsProxy := NewSingleHostReverseProxy(target)
|
||||||
wsProxy.Dial = (&net.Dialer{
|
// wsProxy.Dial = (&net.Dialer{
|
||||||
Timeout: 30 * time.Second,
|
// Timeout: 30 * time.Second,
|
||||||
KeepAlive: 30 * time.Second,
|
// KeepAlive: 30 * time.Second,
|
||||||
}).Dial
|
// }).Dial
|
||||||
wsProxy.TLSClientConfig = tlsConfig
|
// wsProxy.TLSClientConfig = tlsConfig
|
||||||
wsProxy.ServeHTTP(w, req)
|
// wsProxy.ServeHTTP(w, req)
|
||||||
})
|
//})
|
||||||
}
|
}
|
|
@ -1,428 +1,455 @@
|
||||||
/* Copyright © INFINI Ltd. All rights reserved.
|
///* Copyright © INFINI Ltd. All rights reserved.
|
||||||
* web: https://infinilabs.com
|
// * web: https://infinilabs.com
|
||||||
* mail: hello#infini.ltd */
|
// * mail: hello#infini.ltd */
|
||||||
|
//
|
||||||
package gateway
|
package gateway
|
||||||
|
//
|
||||||
|
//import (
|
||||||
|
// "fmt"
|
||||||
|
// log "github.com/cihub/seelog"
|
||||||
|
// "github.com/segmentio/encoding/json"
|
||||||
|
// "infini.sh/console/model"
|
||||||
|
// httprouter "infini.sh/framework/core/api/router"
|
||||||
|
// elastic2 "infini.sh/framework/core/elastic"
|
||||||
|
// "infini.sh/framework/core/model"
|
||||||
|
// "infini.sh/framework/core/orm"
|
||||||
|
// "infini.sh/framework/core/proxy"
|
||||||
|
// "infini.sh/framework/core/task"
|
||||||
|
// "infini.sh/framework/core/util"
|
||||||
|
// "infini.sh/framework/modules/elastic"
|
||||||
|
// "net/http"
|
||||||
|
// "net/url"
|
||||||
|
// "strconv"
|
||||||
|
// "strings"
|
||||||
|
// "time"
|
||||||
|
//)
|
||||||
|
//
|
||||||
|
|
||||||
import (
|
//func (h *GatewayAPI) getInstance(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||||
"fmt"
|
// id := ps.MustGetParameter("instance_id")
|
||||||
log "github.com/cihub/seelog"
|
//
|
||||||
"github.com/segmentio/encoding/json"
|
// obj := model.Instance{}
|
||||||
"infini.sh/console/model"
|
// obj.ID = id
|
||||||
"infini.sh/framework/core/agent"
|
//
|
||||||
httprouter "infini.sh/framework/core/api/router"
|
// exists, err := orm.Get(&obj)
|
||||||
"infini.sh/framework/core/orm"
|
// if !exists || err != nil {
|
||||||
"infini.sh/framework/core/proxy"
|
// h.WriteJSON(w, util.MapStr{
|
||||||
"infini.sh/framework/core/task"
|
// "_id": id,
|
||||||
"infini.sh/framework/core/util"
|
// "found": false,
|
||||||
"infini.sh/framework/modules/elastic"
|
// }, http.StatusNotFound)
|
||||||
"net/http"
|
// return
|
||||||
"strconv"
|
// }
|
||||||
"strings"
|
// if err != nil {
|
||||||
)
|
// h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
// log.Error(err)
|
||||||
func (h *GatewayAPI) createInstance(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
// return
|
||||||
var obj = &model.Instance{}
|
// }
|
||||||
err := h.DecodeJSON(req, obj)
|
//
|
||||||
if err != nil {
|
// h.WriteJSON(w, util.MapStr{
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
// "found": true,
|
||||||
log.Error(err)
|
// "_id": id,
|
||||||
return
|
// "_source": obj,
|
||||||
}
|
// }, 200)
|
||||||
|
//}
|
||||||
res, err := h.doConnect(obj.Endpoint, obj.BasicAuth)
|
//
|
||||||
if err != nil {
|
//func (h *GatewayAPI) updateInstance(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
// id := ps.MustGetParameter("instance_id")
|
||||||
log.Error(err)
|
// obj := model.Instance{}
|
||||||
return
|
//
|
||||||
}
|
// obj.ID = id
|
||||||
obj.ID = res.ID
|
// exists, err := orm.Get(&obj)
|
||||||
|
// if !exists || err != nil {
|
||||||
exists, err := orm.Get(obj)
|
// h.WriteJSON(w, util.MapStr{
|
||||||
if err != nil && err != elastic.ErrNotFound {
|
// "_id": id,
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
// "result": "not_found",
|
||||||
log.Error(err)
|
// }, http.StatusNotFound)
|
||||||
return
|
// return
|
||||||
}
|
// }
|
||||||
if exists {
|
//
|
||||||
h.WriteError(w, "gateway instance already registered", http.StatusInternalServerError)
|
// id = obj.ID
|
||||||
return
|
// create := obj.Created
|
||||||
}
|
// obj = model.Instance{}
|
||||||
err = orm.Create(nil, obj)
|
// err = h.DecodeJSON(req, &obj)
|
||||||
if err != nil {
|
// if err != nil {
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
// h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
log.Error(err)
|
// log.Error(err)
|
||||||
return
|
// return
|
||||||
}
|
// }
|
||||||
|
//
|
||||||
h.WriteJSON(w, util.MapStr{
|
// //protect
|
||||||
"_id": obj.ID,
|
// obj.ID = id
|
||||||
"result": "created",
|
// obj.Created = create
|
||||||
}, 200)
|
// err = orm.Update(nil, &obj)
|
||||||
|
// if err != nil {
|
||||||
}
|
// h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
// log.Error(err)
|
||||||
func (h *GatewayAPI) getInstance(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
// return
|
||||||
id := ps.MustGetParameter("instance_id")
|
// }
|
||||||
|
//
|
||||||
obj := model.Instance{}
|
// h.WriteJSON(w, util.MapStr{
|
||||||
obj.ID = id
|
// "_id": obj.ID,
|
||||||
|
// "result": "updated",
|
||||||
exists, err := orm.Get(&obj)
|
// }, 200)
|
||||||
if !exists || err != nil {
|
//}
|
||||||
h.WriteJSON(w, util.MapStr{
|
//
|
||||||
"_id": id,
|
//func (h *GatewayAPI) deleteInstance(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||||
"found": false,
|
// id := ps.MustGetParameter("instance_id")
|
||||||
}, http.StatusNotFound)
|
//
|
||||||
return
|
// obj := model.Instance{}
|
||||||
}
|
// obj.ID = id
|
||||||
if err != nil {
|
//
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
// exists, err := orm.Get(&obj)
|
||||||
log.Error(err)
|
// if !exists || err != nil {
|
||||||
return
|
// h.WriteJSON(w, util.MapStr{
|
||||||
}
|
// "_id": id,
|
||||||
|
// "result": "not_found",
|
||||||
h.WriteJSON(w, util.MapStr{
|
// }, http.StatusNotFound)
|
||||||
"found": true,
|
// return
|
||||||
"_id": id,
|
// }
|
||||||
"_source": obj,
|
//
|
||||||
}, 200)
|
// //check reference
|
||||||
}
|
// query := util.MapStr{
|
||||||
|
// "size": 1,
|
||||||
func (h *GatewayAPI) updateInstance(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
// "query": util.MapStr{
|
||||||
id := ps.MustGetParameter("instance_id")
|
// "bool": util.MapStr{
|
||||||
obj := model.Instance{}
|
// "must": []util.MapStr{
|
||||||
|
// {
|
||||||
obj.ID = id
|
// "term": util.MapStr{
|
||||||
exists, err := orm.Get(&obj)
|
// "metadata.labels.permit_nodes.id": util.MapStr{
|
||||||
if !exists || err != nil {
|
// "value": id,
|
||||||
h.WriteJSON(w, util.MapStr{
|
// },
|
||||||
"_id": id,
|
// },
|
||||||
"result": "not_found",
|
// },
|
||||||
}, http.StatusNotFound)
|
// {
|
||||||
return
|
// "terms": util.MapStr{
|
||||||
}
|
// "metadata.type": []string{"cluster_migration", "cluster_comparison"},
|
||||||
|
// },
|
||||||
id = obj.ID
|
// },
|
||||||
create := obj.Created
|
// },
|
||||||
obj = model.Instance{}
|
// "must_not": []util.MapStr{
|
||||||
err = h.DecodeJSON(req, &obj)
|
// {
|
||||||
if err != nil {
|
// "terms": util.MapStr{
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
// "status": []string{task.StatusError, task.StatusComplete},
|
||||||
log.Error(err)
|
// },
|
||||||
return
|
// },
|
||||||
}
|
// },
|
||||||
|
// },
|
||||||
//protect
|
// },
|
||||||
obj.ID = id
|
// }
|
||||||
obj.Created = create
|
// q := &orm.Query{
|
||||||
err = orm.Update(nil, &obj)
|
// RawQuery: util.MustToJSONBytes(query),
|
||||||
if err != nil {
|
// }
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
// err, result := orm.Search(task.Task{}, q)
|
||||||
log.Error(err)
|
// if err != nil {
|
||||||
return
|
// h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
}
|
// log.Error(err)
|
||||||
|
// return
|
||||||
h.WriteJSON(w, util.MapStr{
|
// }
|
||||||
"_id": obj.ID,
|
// if len(result.Result) > 0 {
|
||||||
"result": "updated",
|
// var taskId interface{}
|
||||||
}, 200)
|
// if m, ok := result.Result[0].(map[string]interface{}); ok {
|
||||||
}
|
// taskId = m["id"]
|
||||||
|
// }
|
||||||
func (h *GatewayAPI) deleteInstance(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
// h.WriteError(w, fmt.Sprintf("failed to delete gateway instance [%s] since it is used by task [%v]", id, taskId), http.StatusInternalServerError)
|
||||||
id := ps.MustGetParameter("instance_id")
|
// return
|
||||||
|
// }
|
||||||
obj := model.Instance{}
|
//
|
||||||
obj.ID = id
|
// err = orm.Delete(nil, &obj)
|
||||||
|
// if err != nil {
|
||||||
exists, err := orm.Get(&obj)
|
// h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
if !exists || err != nil {
|
// log.Error(err)
|
||||||
h.WriteJSON(w, util.MapStr{
|
// return
|
||||||
"_id": id,
|
// }
|
||||||
"result": "not_found",
|
//
|
||||||
}, http.StatusNotFound)
|
// h.WriteJSON(w, util.MapStr{
|
||||||
return
|
// "_id": obj.ID,
|
||||||
}
|
// "result": "deleted",
|
||||||
|
// }, 200)
|
||||||
//check reference
|
//}
|
||||||
query := util.MapStr{
|
//
|
||||||
"size": 1,
|
//func (h *GatewayAPI) searchInstance(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||||
"query": util.MapStr{
|
//
|
||||||
"bool": util.MapStr{
|
// var (
|
||||||
"must": []util.MapStr{
|
// keyword = h.GetParameterOrDefault(req, "keyword", "")
|
||||||
{
|
// queryDSL = `{"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d}`
|
||||||
"term": util.MapStr{
|
// strSize = h.GetParameterOrDefault(req, "size", "20")
|
||||||
"metadata.labels.permit_nodes.id": util.MapStr{
|
// strFrom = h.GetParameterOrDefault(req, "from", "0")
|
||||||
"value": id,
|
// mustBuilder = &strings.Builder{}
|
||||||
},
|
// )
|
||||||
},
|
// if keyword != "" {
|
||||||
},
|
// mustBuilder.WriteString(fmt.Sprintf(`{"query_string":{"default_field":"*","query": "%s"}}`, keyword))
|
||||||
{
|
// }
|
||||||
"terms": util.MapStr{
|
// size, _ := strconv.Atoi(strSize)
|
||||||
"metadata.type": []string{"cluster_migration", "cluster_comparison"},
|
// if size <= 0 {
|
||||||
},
|
// size = 20
|
||||||
},
|
// }
|
||||||
},
|
// from, _ := strconv.Atoi(strFrom)
|
||||||
"must_not": []util.MapStr{
|
// if from < 0 {
|
||||||
{
|
// from = 0
|
||||||
"terms": util.MapStr{
|
// }
|
||||||
"status": []string{task.StatusError, task.StatusComplete},
|
//
|
||||||
},
|
// q := orm.Query{}
|
||||||
},
|
// queryDSL = fmt.Sprintf(queryDSL, mustBuilder.String(), size, from)
|
||||||
},
|
// q.RawQuery = []byte(queryDSL)
|
||||||
},
|
//
|
||||||
},
|
// err, res := orm.Search(&model.Instance{}, &q)
|
||||||
}
|
// if err != nil {
|
||||||
q := &orm.Query{
|
// log.Error(err)
|
||||||
RawQuery: util.MustToJSONBytes(query),
|
// h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
}
|
// return
|
||||||
err, result := orm.Search(task.Task{}, q)
|
// }
|
||||||
if err != nil {
|
// h.Write(w, res.Raw)
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
//}
|
||||||
log.Error(err)
|
//
|
||||||
return
|
//func (h *GatewayAPI) getInstanceStatus(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||||
}
|
// var instanceIDs = []string{}
|
||||||
if len(result.Result) > 0 {
|
// err := h.DecodeJSON(req, &instanceIDs)
|
||||||
var taskId interface{}
|
// if err != nil {
|
||||||
if m, ok := result.Result[0].(map[string]interface{}); ok {
|
// log.Error(err)
|
||||||
taskId = m["id"]
|
// h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
}
|
// return
|
||||||
h.WriteError(w, fmt.Sprintf("failed to delete gateway instance [%s] since it is used by task [%v]", id, taskId), http.StatusInternalServerError)
|
// }
|
||||||
return
|
// if len(instanceIDs) == 0 {
|
||||||
}
|
// h.WriteJSON(w, util.MapStr{}, http.StatusOK)
|
||||||
|
// return
|
||||||
err = orm.Delete(nil, &obj)
|
// }
|
||||||
if err != nil {
|
// q := orm.Query{}
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
// queryDSL := util.MapStr{
|
||||||
log.Error(err)
|
// "query": util.MapStr{
|
||||||
return
|
// "terms": util.MapStr{
|
||||||
}
|
// "_id": instanceIDs,
|
||||||
|
// },
|
||||||
h.WriteJSON(w, util.MapStr{
|
// },
|
||||||
"_id": obj.ID,
|
// }
|
||||||
"result": "deleted",
|
// q.RawQuery = util.MustToJSONBytes(queryDSL)
|
||||||
}, 200)
|
//
|
||||||
}
|
// err, res := orm.Search(&model.Instance{}, &q)
|
||||||
|
// if err != nil {
|
||||||
func (h *GatewayAPI) searchInstance(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
// log.Error(err)
|
||||||
var (
|
// h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
keyword = h.GetParameterOrDefault(req, "keyword", "")
|
// return
|
||||||
strSize = h.GetParameterOrDefault(req, "size", "20")
|
// }
|
||||||
strFrom = h.GetParameterOrDefault(req, "from", "0")
|
// result := util.MapStr{}
|
||||||
sort = h.GetParameterOrDefault(req, "sort", "created:desc")
|
// for _, item := range res.Result {
|
||||||
)
|
// instance := util.MapStr(item.(map[string]interface{}))
|
||||||
mustQ := []interface{}{}
|
// if err != nil {
|
||||||
if keyword != "" {
|
// log.Error(err)
|
||||||
mustQ = append(mustQ, util.MapStr{
|
// continue
|
||||||
"query_string": util.MapStr{"default_field":"*","query": fmt.Sprintf("*%s*", keyword)},
|
// }
|
||||||
})
|
// endpoint, _ := instance.GetValue("endpoint")
|
||||||
}
|
// username, _ := instance.GetValue("basic_auth.username")
|
||||||
size, _ := strconv.Atoi(strSize)
|
// if username == nil {
|
||||||
if size <= 0 {
|
// username = ""
|
||||||
size = 20
|
// }
|
||||||
}
|
// password, _ := instance.GetValue("basic_auth.password")
|
||||||
from, _ := strconv.Atoi(strFrom)
|
// if password == nil {
|
||||||
if from < 0 {
|
// password = ""
|
||||||
from = 0
|
// }
|
||||||
}
|
// gid, _ := instance.GetValue("id")
|
||||||
var (
|
// res, err := proxy.DoProxyRequest(&proxy.Request{
|
||||||
sortField string
|
// Endpoint: endpoint.(string),
|
||||||
sortDirection string
|
// Method: http.MethodGet,
|
||||||
)
|
// Path: "/stats",
|
||||||
sortParts := strings.Split(sort, ":")
|
// BasicAuth: instance.BasicAuth{
|
||||||
sortField = sortParts[0]
|
// Username: username.(string),
|
||||||
if len(sortParts) >= 2 {
|
// Password: password.(string),
|
||||||
sortDirection = sortParts[1]
|
// },
|
||||||
}
|
// })
|
||||||
if sortDirection == "" {
|
// if err != nil {
|
||||||
sortDirection = "asc"
|
// log.Error(err)
|
||||||
}
|
// result[gid.(string)] = util.MapStr{}
|
||||||
query := util.MapStr{
|
// continue
|
||||||
"size": size,
|
// }
|
||||||
"from": from,
|
// var resMap = util.MapStr{}
|
||||||
"query": util.MapStr{
|
// err = util.FromJSONBytes(res.Body, &resMap)
|
||||||
"bool": util.MapStr{
|
// if err != nil {
|
||||||
"must": mustQ,
|
// result[gid.(string)] = util.MapStr{}
|
||||||
},
|
// log.Errorf("get stats of %v error: %v", endpoint, err)
|
||||||
},
|
// continue
|
||||||
"sort": []util.MapStr{
|
// }
|
||||||
{
|
//
|
||||||
sortField: util.MapStr{
|
// result[gid.(string)] = resMap
|
||||||
"order": sortDirection,
|
// }
|
||||||
},
|
// h.WriteJSON(w, result, http.StatusOK)
|
||||||
},
|
//}
|
||||||
},
|
//func (h *GatewayAPI) proxy(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||||
}
|
// var (
|
||||||
|
// method = h.Get(req, "method", "GET")
|
||||||
q := orm.Query{
|
// path = h.Get(req, "path", "")
|
||||||
RawQuery: util.MustToJSONBytes(query),
|
// )
|
||||||
}
|
// instanceID := ps.MustGetParameter("instance_id")
|
||||||
|
//
|
||||||
err, res := orm.Search(&model.Instance{}, &q)
|
// obj := model.Instance{}
|
||||||
if err != nil {
|
// obj.ID = instanceID
|
||||||
log.Error(err)
|
//
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
// exists, err := orm.Get(&obj)
|
||||||
return
|
// if err != nil {
|
||||||
}
|
// h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
h.Write(w, res.Raw)
|
// log.Error(err)
|
||||||
}
|
// return
|
||||||
|
// }
|
||||||
func (h *GatewayAPI) getInstanceStatus(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
// if !exists {
|
||||||
var instanceIDs = []string{}
|
// h.WriteJSON(w, util.MapStr{
|
||||||
err := h.DecodeJSON(req, &instanceIDs)
|
// "error": "gateway instance not found",
|
||||||
if err != nil {
|
// }, http.StatusNotFound)
|
||||||
log.Error(err)
|
// return
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
// }
|
||||||
return
|
// res, err := proxy.DoProxyRequest(&proxy.Request{
|
||||||
}
|
// Method: method,
|
||||||
if len(instanceIDs) == 0 {
|
// Endpoint: obj.Endpoint,
|
||||||
h.WriteJSON(w, util.MapStr{}, http.StatusOK)
|
// Path: path,
|
||||||
return
|
// Body: req.Body,
|
||||||
}
|
// BasicAuth: obj.BasicAuth,
|
||||||
q := orm.Query{}
|
// ContentLength: int(req.ContentLength),
|
||||||
queryDSL := util.MapStr{
|
// })
|
||||||
"query": util.MapStr{
|
// if err != nil {
|
||||||
"terms": util.MapStr{
|
// h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
"_id": instanceIDs,
|
// log.Error(err)
|
||||||
},
|
// return
|
||||||
},
|
// }
|
||||||
}
|
// h.WriteHeader(w, res.StatusCode)
|
||||||
q.RawQuery = util.MustToJSONBytes(queryDSL)
|
// h.Write(w, res.Body)
|
||||||
|
//}
|
||||||
err, res := orm.Search(&model.Instance{}, &q)
|
//
|
||||||
if err != nil {
|
//type GatewayConnectResponse struct {
|
||||||
log.Error(err)
|
// ID string `json:"id"`
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
// Name string `json:"name"`
|
||||||
return
|
// Tagline string `json:"tagline"`
|
||||||
}
|
// Version struct {
|
||||||
result := util.MapStr{}
|
// BuildDate string `json:"build_date"`
|
||||||
for _, item := range res.Result {
|
// BuildHash string `json:"build_hash"`
|
||||||
instance := util.MapStr(item.(map[string]interface{}))
|
// EOLDate string `json:"eol_date"`
|
||||||
if err != nil {
|
// Number string `json:"number"`
|
||||||
log.Error(err)
|
// } `json:"version"`
|
||||||
continue
|
//
|
||||||
}
|
//}
|
||||||
endpoint, _ := instance.GetValue("endpoint")
|
//func (h *GatewayAPI) doConnect(endpoint string, basicAuth model.BasicAuth) (*GatewayConnectResponse, error) {
|
||||||
username, _ := instance.GetValue("basic_auth.username")
|
// res, err := proxy.DoProxyRequest(&proxy.Request{
|
||||||
if username == nil {
|
// Method: http.MethodGet,
|
||||||
username = ""
|
// Endpoint: endpoint,
|
||||||
}
|
// Path: "/_info",
|
||||||
password, _ := instance.GetValue("basic_auth.password")
|
// BasicAuth: basicAuth,
|
||||||
if password == nil {
|
// })
|
||||||
password = ""
|
// if err != nil {
|
||||||
}
|
// return nil, err
|
||||||
gid, _ := instance.GetValue("id")
|
// }
|
||||||
res, err := proxy.DoProxyRequest(&proxy.Request{
|
// if res.StatusCode == http.StatusNotFound {
|
||||||
Endpoint: endpoint.(string),
|
// return nil, fmt.Errorf("unknow gateway version")
|
||||||
Method: http.MethodGet,
|
// }
|
||||||
Path: "/stats",
|
// b := res.Body
|
||||||
BasicAuth: agent.BasicAuth{
|
// gres := &GatewayConnectResponse{}
|
||||||
Username: username.(string),
|
// err = json.Unmarshal(b, gres)
|
||||||
Password: password.(string),
|
// return gres, err
|
||||||
},
|
//
|
||||||
})
|
//}
|
||||||
if err != nil {
|
//
|
||||||
log.Error(err)
|
//func (h *GatewayAPI) tryConnect(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||||
result[gid.(string)] = util.MapStr{}
|
// var reqBody = struct {
|
||||||
continue
|
// Endpoint string `json:"endpoint"`
|
||||||
}
|
// BasicAuth model.BasicAuth
|
||||||
var resMap = util.MapStr{}
|
// }{}
|
||||||
err = util.FromJSONBytes(res.Body, &resMap)
|
// err := h.DecodeJSON(req, &reqBody)
|
||||||
if err != nil {
|
// if err != nil {
|
||||||
result[gid.(string)] = util.MapStr{}
|
// h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
log.Errorf("get stats of %v error: %v", endpoint, err)
|
// return
|
||||||
continue
|
// }
|
||||||
}
|
// connectRes, err := h.doConnect(reqBody.Endpoint, reqBody.BasicAuth)
|
||||||
|
// if err != nil {
|
||||||
result[gid.(string)] = resMap
|
// h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
}
|
// return
|
||||||
h.WriteJSON(w, result, http.StatusOK)
|
// }
|
||||||
}
|
// h.WriteJSON(w, connectRes, http.StatusOK)
|
||||||
func (h *GatewayAPI) proxy(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
//}
|
||||||
var (
|
//
|
||||||
method = h.Get(req, "method", "GET")
|
//func (h *GatewayAPI) getExecutionNodes(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
|
||||||
path = h.Get(req, "path", "")
|
// var (
|
||||||
)
|
// keyword = h.GetParameterOrDefault(req, "keyword", "")
|
||||||
instanceID := ps.MustGetParameter("instance_id")
|
// strSize = h.GetParameterOrDefault(req, "size", "10")
|
||||||
|
// strFrom = h.GetParameterOrDefault(req, "from", "0")
|
||||||
obj := model.Instance{}
|
// )
|
||||||
obj.ID = instanceID
|
// size, _ := strconv.Atoi(strSize)
|
||||||
|
// if size <= 0 {
|
||||||
exists, err := orm.Get(&obj)
|
// size = 10
|
||||||
if err != nil {
|
// }
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
// from, _ := strconv.Atoi(strFrom)
|
||||||
log.Error(err)
|
// if from < 0 {
|
||||||
return
|
// from = 0
|
||||||
}
|
// }
|
||||||
if !exists {
|
// gatewayIndexName := orm.GetIndexName(model.Instance{})
|
||||||
h.WriteJSON(w, util.MapStr{
|
//
|
||||||
"error": "gateway instance not found",
|
// query := util.MapStr{
|
||||||
}, http.StatusNotFound)
|
// "size": size,
|
||||||
return
|
// "from": from,
|
||||||
}
|
// "sort": []util.MapStr{
|
||||||
res, err := proxy.DoProxyRequest(&proxy.Request{
|
// {
|
||||||
Method: method,
|
// "created": util.MapStr{
|
||||||
Endpoint: obj.Endpoint,
|
// "order": "desc",
|
||||||
Path: path,
|
// },
|
||||||
Body: req.Body,
|
// },
|
||||||
BasicAuth: obj.BasicAuth,
|
// },
|
||||||
ContentLength: int(req.ContentLength),
|
// }
|
||||||
})
|
// if keyword != "" {
|
||||||
if err != nil {
|
// query["query"] = util.MapStr{
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
// "bool": util.MapStr{
|
||||||
log.Error(err)
|
// "must": []util.MapStr{
|
||||||
return
|
// {
|
||||||
}
|
// "prefix": util.MapStr{
|
||||||
h.WriteHeader(w, res.StatusCode)
|
// "name": util.MapStr{
|
||||||
h.Write(w, res.Body)
|
// "value": keyword,
|
||||||
}
|
// },
|
||||||
|
// },
|
||||||
type GatewayConnectResponse struct {
|
// },
|
||||||
ID string `json:"id"`
|
// },
|
||||||
Name string `json:"name"`
|
// },
|
||||||
Tagline string `json:"tagline"`
|
// }
|
||||||
Version struct {
|
// }
|
||||||
BuildDate string `json:"build_date"`
|
// q := orm.Query{
|
||||||
BuildHash string `json:"build_hash"`
|
// IndexName: gatewayIndexName,
|
||||||
EOLDate string `json:"eol_date"`
|
// RawQuery: util.MustToJSONBytes(query),
|
||||||
Number string `json:"number"`
|
// }
|
||||||
} `json:"version"`
|
// err, result := orm.Search(nil, &q)
|
||||||
|
// if err != nil {
|
||||||
}
|
// h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||||
func (h *GatewayAPI) doConnect(endpoint string, basicAuth agent.BasicAuth) (*GatewayConnectResponse, error) {
|
// return
|
||||||
res, err := proxy.DoProxyRequest(&proxy.Request{
|
// }
|
||||||
Method: http.MethodGet,
|
//
|
||||||
Endpoint: endpoint,
|
// searchRes := elastic2.SearchResponse{}
|
||||||
Path: "/_framework/api/_info",
|
// err = util.FromJSONBytes(result.Raw, &searchRes)
|
||||||
BasicAuth: basicAuth,
|
// if err != nil||searchRes.ESError!=nil {
|
||||||
})
|
// msg:=fmt.Sprintf("%v,%v",err,searchRes.ESError)
|
||||||
if err != nil {
|
// h.WriteError(w, msg, http.StatusInternalServerError)
|
||||||
return nil, err
|
// return
|
||||||
}
|
// }
|
||||||
if res.StatusCode == http.StatusNotFound {
|
// var nodes = []util.MapStr{}
|
||||||
return nil, fmt.Errorf("unknow gateway version")
|
//
|
||||||
}
|
// for _, hit := range searchRes.Hits.Hits {
|
||||||
b := res.Body
|
// buf := util.MustToJSONBytes(hit.Source)
|
||||||
gres := &GatewayConnectResponse{}
|
// inst := model.Instance{}
|
||||||
err = json.Unmarshal(b, gres)
|
// err = util.FromJSONBytes(buf, &inst)
|
||||||
return gres, err
|
// if err != nil {
|
||||||
|
// log.Error(err)
|
||||||
}
|
// continue
|
||||||
|
// }
|
||||||
func (h *GatewayAPI) tryConnect(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
// node := util.MapStr{
|
||||||
var reqBody = struct {
|
// "id": inst.ID,
|
||||||
Endpoint string `json:"endpoint"`
|
// "name": inst.Name,
|
||||||
BasicAuth agent.BasicAuth
|
// "available": false,
|
||||||
}{}
|
// "type": "gateway",
|
||||||
err := h.DecodeJSON(req, &reqBody)
|
// }
|
||||||
if err != nil {
|
// ul, err := url.Parse(inst.Endpoint)
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
// if err != nil {
|
||||||
return
|
// log.Error(err)
|
||||||
}
|
// continue
|
||||||
connectRes, err := h.doConnect(reqBody.Endpoint, reqBody.BasicAuth)
|
// }
|
||||||
if err != nil {
|
// node["host"] = ul.Host
|
||||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
// err = inst.TryConnectWithTimeout(time.Second)
|
||||||
return
|
// if err != nil {
|
||||||
}
|
// log.Error(err)
|
||||||
h.WriteJSON(w, connectRes, http.StatusOK)
|
// }else{
|
||||||
}
|
// node["available"] = true
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// nodes = append(nodes, node)
|
||||||
|
// }
|
||||||
|
// h.WriteJSON(w, nodes, http.StatusOK)
|
||||||
|
//}
|
||||||
|
|
|
@ -1,186 +0,0 @@
|
||||||
/* Copyright © INFINI Ltd. All rights reserved.
|
|
||||||
* Web: https://infinilabs.com
|
|
||||||
* Email: hello#infini.ltd */
|
|
||||||
|
|
||||||
package gateway
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"io"
|
|
||||||
log "github.com/cihub/seelog"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ReverseProxy is a WebSocket reverse proxy. It will not work with a regular
|
|
||||||
// HTTP request, so it is the caller's responsiblity to ensure the incoming
|
|
||||||
// request is a WebSocket request.
|
|
||||||
type ReverseProxy struct {
|
|
||||||
// Director must be a function which modifies
|
|
||||||
// the request into a new request to be sent
|
|
||||||
// using Transport. Its response is then copied
|
|
||||||
// back to the original client unmodified.
|
|
||||||
Director func(*http.Request)
|
|
||||||
|
|
||||||
// Dial specifies the dial function for dialing the proxied
|
|
||||||
// server over tcp.
|
|
||||||
// If Dial is nil, net.Dial is used.
|
|
||||||
Dial func(network, addr string) (net.Conn, error)
|
|
||||||
|
|
||||||
// TLSClientConfig specifies the TLS configuration to use for 'wss'.
|
|
||||||
// If nil, the default configuration is used.
|
|
||||||
TLSClientConfig *tls.Config
|
|
||||||
|
|
||||||
// ErrorLog specifies an optional logger for errors
|
|
||||||
// that occur when attempting to proxy the request.
|
|
||||||
// If nil, logging goes to os.Stderr via the log package's
|
|
||||||
// standard logger.
|
|
||||||
ErrorLog *log.LoggerInterface
|
|
||||||
}
|
|
||||||
|
|
||||||
// stolen from net/http/httputil. singleJoiningSlash ensures that the route
|
|
||||||
// '/a/' joined with '/b' becomes '/a/b'.
|
|
||||||
func singleJoiningSlash(a, b string) string {
|
|
||||||
aslash := strings.HasSuffix(a, "/")
|
|
||||||
bslash := strings.HasPrefix(b, "/")
|
|
||||||
switch {
|
|
||||||
case aslash && bslash:
|
|
||||||
return a + b[1:]
|
|
||||||
case !aslash && !bslash:
|
|
||||||
return a + "/" + b
|
|
||||||
}
|
|
||||||
return a + b
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSingleHostReverseProxy returns a new websocket ReverseProxy. The path
|
|
||||||
// rewrites follow the same rules as the httputil.ReverseProxy. If the target
|
|
||||||
// url has the path '/foo' and the incoming request '/bar', the request path
|
|
||||||
// will be updated to '/foo/bar' before forwarding.
|
|
||||||
// Scheme should specify if 'ws' or 'wss' should be used.
|
|
||||||
func NewSingleHostReverseProxy(target *url.URL) *ReverseProxy {
|
|
||||||
targetQuery := target.RawQuery
|
|
||||||
director := func(req *http.Request) {
|
|
||||||
req.URL.Scheme = target.Scheme
|
|
||||||
req.URL.Host = target.Host
|
|
||||||
req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)
|
|
||||||
if targetQuery == "" || req.URL.RawQuery == "" {
|
|
||||||
req.URL.RawQuery = targetQuery + req.URL.RawQuery
|
|
||||||
} else {
|
|
||||||
req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &ReverseProxy{Director: director}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Function to implement the http.Handler interface.
|
|
||||||
func (p *ReverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|
||||||
logFunc := log.Errorf
|
|
||||||
|
|
||||||
if !IsWebSocketRequest(r) {
|
|
||||||
http.Error(w, "Cannot handle non-WebSocket requests", 500)
|
|
||||||
logFunc("Received a request that was not a WebSocket request")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
outreq := new(http.Request)
|
|
||||||
// shallow copying
|
|
||||||
*outreq = *r
|
|
||||||
p.Director(outreq)
|
|
||||||
host := outreq.URL.Host
|
|
||||||
|
|
||||||
if clientIP, _, err := net.SplitHostPort(r.RemoteAddr); err == nil {
|
|
||||||
// If we aren't the first proxy retain prior
|
|
||||||
// X-Forwarded-For information as a comma+space
|
|
||||||
// separated list and fold multiple headers into one.
|
|
||||||
if prior, ok := outreq.Header["X-Forwarded-For"]; ok {
|
|
||||||
clientIP = strings.Join(prior, ", ") + ", " + clientIP
|
|
||||||
}
|
|
||||||
outreq.Header.Set("X-Forwarded-For", clientIP)
|
|
||||||
}
|
|
||||||
|
|
||||||
dial := p.Dial
|
|
||||||
if dial == nil {
|
|
||||||
dial = net.Dial
|
|
||||||
}
|
|
||||||
|
|
||||||
// if host does not specify a port, use the default http port
|
|
||||||
if !strings.Contains(host, ":") {
|
|
||||||
if outreq.URL.Scheme == "wss" {
|
|
||||||
host = host + ":443"
|
|
||||||
} else {
|
|
||||||
host = host + ":80"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if outreq.URL.Scheme == "wss" {
|
|
||||||
var tlsConfig *tls.Config
|
|
||||||
if p.TLSClientConfig == nil {
|
|
||||||
tlsConfig = &tls.Config{}
|
|
||||||
} else {
|
|
||||||
tlsConfig = p.TLSClientConfig
|
|
||||||
}
|
|
||||||
dial = func(network, address string) (net.Conn, error) {
|
|
||||||
return tls.Dial("tcp", host, tlsConfig)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
d, err := dial("tcp", host)
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, "Error forwarding request.", 500)
|
|
||||||
logFunc("Error dialing websocket backend %s: %v", outreq.URL, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// All request generated by the http package implement this interface.
|
|
||||||
hj, ok := w.(http.Hijacker)
|
|
||||||
if !ok {
|
|
||||||
http.Error(w, "Not a hijacker?", 500)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Hijack() tells the http package not to do anything else with the connection.
|
|
||||||
// After, it bcomes this functions job to manage it. `nc` is of type *net.Conn.
|
|
||||||
nc, _, err := hj.Hijack()
|
|
||||||
if err != nil {
|
|
||||||
logFunc("Hijack error: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer nc.Close() // must close the underlying net connection after hijacking
|
|
||||||
defer d.Close()
|
|
||||||
|
|
||||||
// write the modified incoming request to the dialed connection
|
|
||||||
err = outreq.Write(d)
|
|
||||||
if err != nil {
|
|
||||||
logFunc("Error copying request to target: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
errc := make(chan error, 2)
|
|
||||||
cp := func(dst io.Writer, src io.Reader) {
|
|
||||||
_, err := io.Copy(dst, src)
|
|
||||||
errc <- err
|
|
||||||
}
|
|
||||||
go cp(d, nc)
|
|
||||||
go cp(nc, d)
|
|
||||||
<-errc
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsWebSocketRequest returns a boolean indicating whether the request has the
|
|
||||||
// headers of a WebSocket handshake request.
|
|
||||||
func IsWebSocketRequest(r *http.Request) bool {
|
|
||||||
contains := func(key, val string) bool {
|
|
||||||
vv := strings.Split(r.Header.Get(key), ",")
|
|
||||||
for _, v := range vv {
|
|
||||||
if val == strings.ToLower(strings.TrimSpace(v)) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !contains("Connection", "upgrade") {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !contains("Upgrade", "websocket") {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
|
@ -5,18 +5,16 @@
|
||||||
package license
|
package license
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"infini.sh/framework/core/api"
|
"infini.sh/framework/core/api"
|
||||||
httprouter "infini.sh/framework/core/api/router"
|
httprouter "infini.sh/framework/core/api/router"
|
||||||
"infini.sh/framework/core/util"
|
"infini.sh/framework/core/util"
|
||||||
"infini.sh/license"
|
"infini.sh/license"
|
||||||
|
"net/http"
|
||||||
)
|
)
|
||||||
|
|
||||||
type LicenseAPI struct {
|
type LicenseAPI struct {
|
||||||
api.Handler
|
api.Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
func InitAPI() {
|
func InitAPI() {
|
||||||
handler := LicenseAPI{}
|
handler := LicenseAPI{}
|
||||||
api.HandleAPIMethod(api.POST, "/_license/request_trial", handler.RequestTrialLicense)
|
api.HandleAPIMethod(api.POST, "/_license/request_trial", handler.RequestTrialLicense)
|
||||||
|
@ -30,36 +28,36 @@ func (handler *LicenseAPI) RequestTrialLicense(w http.ResponseWriter, req *http.
|
||||||
}
|
}
|
||||||
|
|
||||||
v := license.TrialRequest{}
|
v := license.TrialRequest{}
|
||||||
err = util.FromJSONBytes(body, &v)
|
err=util.FromJSONBytes(body, &v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.Error500(w, err.Error())
|
handler.Error500(w, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO implement config for the api endpoint
|
//TODO implement config for the api endpoint
|
||||||
request := util.NewPostRequest("https://api.infini.cloud/_license/request_trial", util.MustToJSONBytes(v))
|
request:=util.NewPostRequest("https://api.infini.cloud/_license/request_trial", util.MustToJSONBytes(v))
|
||||||
response, err := util.ExecuteRequest(request)
|
response,err:=util.ExecuteRequest(request)
|
||||||
if err != nil {
|
if err!=nil{
|
||||||
handler.WriteError(w, err.Error(), response.StatusCode)
|
handler.WriteError(w,err.Error(),response.StatusCode)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
r := license.TrialResponse{}
|
r:=license.TrialResponse{}
|
||||||
err = util.FromJSONBytes(response.Body, &r)
|
err=util.FromJSONBytes(response.Body, &r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handler.Error500(w, err.Error())
|
handler.Error500(w, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.License != "" {
|
if r.License!=""{
|
||||||
ok := license.ApplyLicense(r.License)
|
ok := license.ApplyLicense(r.License)
|
||||||
if ok {
|
if ok {
|
||||||
license.PersistLicense(r.License)
|
license.PersistLicense(r.License)
|
||||||
} else {
|
}else{
|
||||||
r.License = ""
|
r.License=""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteHeader(response.StatusCode)
|
w.WriteHeader(response.StatusCode)
|
||||||
w.Write(util.MustToJSONBytes(r))
|
w.Write(util.MustToJSONBytes(r))
|
||||||
}
|
}
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"infini.sh/framework/core/kv"
|
"infini.sh/framework/core/kv"
|
||||||
|
"infini.sh/framework/core/model"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
uri2 "net/url"
|
uri2 "net/url"
|
||||||
|
@ -88,7 +89,7 @@ func (module *Module) Start() error {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if basicAuth, ok := bv.(elastic.BasicAuth); ok {
|
if basicAuth, ok := bv.(model.BasicAuth); ok {
|
||||||
err = keystore.SetValue("SYSTEM_CLUSTER_PASS", []byte(basicAuth.Password))
|
err = keystore.SetValue("SYSTEM_CLUSTER_PASS", []byte(basicAuth.Password))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
|
@ -203,7 +204,7 @@ func (module *Module) validate(w http.ResponseWriter, r *http.Request, ps httpro
|
||||||
}
|
}
|
||||||
cfg1 = elastic1.ORMConfig{}
|
cfg1 = elastic1.ORMConfig{}
|
||||||
exist, err := env.ParseConfig("elastic.orm", &cfg1)
|
exist, err := env.ParseConfig("elastic.orm", &cfg1)
|
||||||
if exist && err != nil {
|
if exist && err != nil &&global.Env().SystemConfig.Configs.PanicOnConfigError{
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -272,7 +273,7 @@ func (module *Module) initTempClient(r *http.Request) (error, elastic.API, Setup
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
Reserved: true,
|
Reserved: true,
|
||||||
Endpoint: request.Cluster.Endpoint,
|
Endpoint: request.Cluster.Endpoint,
|
||||||
BasicAuth: &elastic.BasicAuth{
|
BasicAuth: &model.BasicAuth{
|
||||||
Username: request.Cluster.Username,
|
Username: request.Cluster.Username,
|
||||||
Password: request.Cluster.Password,
|
Password: request.Cluster.Password,
|
||||||
},
|
},
|
||||||
|
@ -516,10 +517,6 @@ func (module *Module) initialize(w http.ResponseWriter, r *http.Request, ps http
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
//处理索引
|
//处理索引
|
||||||
elastic2.InitSchema()
|
elastic2.InitSchema()
|
||||||
//init security
|
//init security
|
||||||
|
@ -592,9 +589,9 @@ func (module *Module) initialize(w http.ResponseWriter, r *http.Request, ps http
|
||||||
//save to local file
|
//save to local file
|
||||||
file := path.Join(global.Env().GetConfigDir(), "system_config.yml")
|
file := path.Join(global.Env().GetConfigDir(), "system_config.yml")
|
||||||
_, err = util.FilePutContent(file, fmt.Sprintf("configs.template:\n - name: \"system\"\n path: ./config/system_config.tpl\n variable:\n "+
|
_, err = util.FilePutContent(file, fmt.Sprintf("configs.template:\n - name: \"system\"\n path: ./config/system_config.tpl\n variable:\n "+
|
||||||
"CLUSTER_ID: %v\n CLUSTER_ENDPINT: \"%v\"\n "+
|
"CLUSTER_ID: %v\n CLUSTER_ENDPOINT: \"%v\"\n "+
|
||||||
"CLUSTER_USER: \"%v\"\n CLUSTER_VER: \"%v\"\n CLUSTER_DISTRIBUTION: \"%v\"\n INDEX_PREFIX: \"%v\"",
|
"CLUSTER_USER: \"%v\"\n CLUSTER_VER: \"%v\"\n CLUSTER_DISTRIBUTION: \"%v\"\n INDEX_PREFIX: \"%v\"",
|
||||||
GlobalSystemElasticsearchID, cfg.Endpoint, cfg.BasicAuth.Username, cfg.Version, cfg.Distribution, cfg1.IndexPrefix))
|
GlobalSystemElasticsearchID, cfg.GetAnyEndpoint(), cfg.BasicAuth.Username, cfg.Version, cfg.Distribution, cfg1.IndexPrefix))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ package task_manager
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
model2 "infini.sh/console/model"
|
||||||
migration_model "infini.sh/console/plugin/task_manager/model"
|
migration_model "infini.sh/console/plugin/task_manager/model"
|
||||||
"infini.sh/framework/core/global"
|
"infini.sh/framework/core/global"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -12,7 +13,6 @@ import (
|
||||||
|
|
||||||
log "github.com/cihub/seelog"
|
log "github.com/cihub/seelog"
|
||||||
|
|
||||||
"infini.sh/console/model"
|
|
||||||
migration_util "infini.sh/console/plugin/task_manager/util"
|
migration_util "infini.sh/console/plugin/task_manager/util"
|
||||||
|
|
||||||
httprouter "infini.sh/framework/core/api/router"
|
httprouter "infini.sh/framework/core/api/router"
|
||||||
|
@ -679,7 +679,7 @@ func (h *APIHandler) getChildPipelineInfosFromGateway(pipelineTaskIDs map[string
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
for instID, taskIDs := range pipelineTaskIDs {
|
for instID, taskIDs := range pipelineTaskIDs {
|
||||||
inst := &model.Instance{}
|
inst := &model2.TaskWorker{}
|
||||||
inst.ID = instID
|
inst.ID = instID
|
||||||
_, err = orm.Get(inst)
|
_, err = orm.Get(inst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -2,13 +2,12 @@ package model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"infini.sh/console/model"
|
"infini.sh/console/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Scheduler interface {
|
type Scheduler interface {
|
||||||
GetPreferenceInstance(config ExecutionConfig) (instance *model.Instance, err error)
|
GetPreferenceInstance(config ExecutionConfig) (instance *model.TaskWorker, err error)
|
||||||
GetInstance(instanceID string) (instance *model.Instance, err error)
|
GetInstance(instanceID string) (instance *model.TaskWorker, err error)
|
||||||
IncrInstanceJobs(instanceID string)
|
IncrInstanceJobs(instanceID string)
|
||||||
DecrInstanceJobs(instanceID string)
|
DecrInstanceJobs(instanceID string)
|
||||||
RefreshInstanceJobsFromES() error
|
RefreshInstanceJobsFromES() error
|
||||||
|
|
|
@ -3,13 +3,13 @@ package pipeline_task
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"infini.sh/console/model"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
log "github.com/cihub/seelog"
|
log "github.com/cihub/seelog"
|
||||||
|
|
||||||
"infini.sh/console/model"
|
|
||||||
migration_model "infini.sh/console/plugin/task_manager/model"
|
migration_model "infini.sh/console/plugin/task_manager/model"
|
||||||
migration_util "infini.sh/console/plugin/task_manager/util"
|
migration_util "infini.sh/console/plugin/task_manager/util"
|
||||||
|
|
||||||
|
@ -169,7 +169,7 @@ func (p *processor) handlePendingStopPipelineTask(taskItem *task.Task) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *processor) cleanGatewayPipeline(taskItem *task.Task) (instance *model.Instance, err error) {
|
func (p *processor) cleanGatewayPipeline(taskItem *task.Task) (instance *model.TaskWorker, err error) {
|
||||||
instance, err = p.getPipelineExecutionInstance(taskItem)
|
instance, err = p.getPipelineExecutionInstance(taskItem)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed to get execution instance for task [%s], err: %v", taskItem.ID, err)
|
log.Errorf("failed to get execution instance for task [%s], err: %v", taskItem.ID, err)
|
||||||
|
@ -184,7 +184,7 @@ func (p *processor) cleanGatewayPipeline(taskItem *task.Task) (instance *model.I
|
||||||
return instance, nil
|
return instance, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *processor) getPipelineExecutionInstance(taskItem *task.Task) (*model.Instance, error) {
|
func (p *processor) getPipelineExecutionInstance(taskItem *task.Task) (*model.TaskWorker, error) {
|
||||||
instanceID, _ := util.ExtractString(taskItem.Metadata.Labels["execution_instance_id"])
|
instanceID, _ := util.ExtractString(taskItem.Metadata.Labels["execution_instance_id"])
|
||||||
instance, err := p.scheduler.GetInstance(instanceID)
|
instance, err := p.scheduler.GetInstance(instanceID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -53,7 +53,7 @@ func NewScheduler(elasticsearch, indexName string, checkInstanceAvailable bool,
|
||||||
return scheduler, nil
|
return scheduler, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *scheduler) GetPreferenceInstance(config migration_model.ExecutionConfig) (*model.Instance, error) {
|
func (p *scheduler) GetPreferenceInstance(config migration_model.ExecutionConfig) (*model.TaskWorker, error) {
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
minID string
|
minID string
|
||||||
|
@ -64,7 +64,7 @@ func (p *scheduler) GetPreferenceInstance(config migration_model.ExecutionConfig
|
||||||
instanceTotal := p.getInstanceState(node.ID).Total
|
instanceTotal := p.getInstanceState(node.ID).Total
|
||||||
if instanceTotal < minTotal {
|
if instanceTotal < minTotal {
|
||||||
if p.CheckInstanceAvailable {
|
if p.CheckInstanceAvailable {
|
||||||
tempInst := model.Instance{}
|
tempInst := model.TaskWorker{}
|
||||||
tempInst.ID = node.ID
|
tempInst.ID = node.ID
|
||||||
_, err = orm.Get(&tempInst)
|
_, err = orm.Get(&tempInst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -95,11 +95,11 @@ func (p *scheduler) GetPreferenceInstance(config migration_model.ExecutionConfig
|
||||||
return instance, nil
|
return instance, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *scheduler) GetInstance(instanceID string) (*model.Instance, error) {
|
func (p *scheduler) GetInstance(instanceID string) (*model.TaskWorker, error) {
|
||||||
if instanceID == "" {
|
if instanceID == "" {
|
||||||
return nil, errors.New("invalid instanceID")
|
return nil, errors.New("invalid instanceID")
|
||||||
}
|
}
|
||||||
instance := model.Instance{}
|
instance := model.TaskWorker{}
|
||||||
instance.ID = instanceID
|
instance.ID = instanceID
|
||||||
|
|
||||||
_, err := orm.Get(&instance)
|
_, err := orm.Get(&instance)
|
||||||
|
@ -114,7 +114,7 @@ func (p *scheduler) GetInstance(instanceID string) (*model.Instance, error) {
|
||||||
return &instance, nil
|
return &instance, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *scheduler) initializeInstance(instance *model.Instance) error {
|
func (p *scheduler) initializeInstance(instance *model.TaskWorker) error {
|
||||||
lastInitializedAt := p.getLastInitializedAt(instance.ID)
|
lastInitializedAt := p.getLastInitializedAt(instance.ID)
|
||||||
if time.Now().Sub(lastInitializedAt) < initializeInterval {
|
if time.Now().Sub(lastInitializedAt) < initializeInterval {
|
||||||
return nil
|
return nil
|
||||||
|
@ -162,7 +162,7 @@ func (p *scheduler) initializeInstance(instance *model.Instance) error {
|
||||||
// user could change the following configurations manually:
|
// user could change the following configurations manually:
|
||||||
// - input_queue (metrics.logging_queue)
|
// - input_queue (metrics.logging_queue)
|
||||||
// - elasticsearch (elasticsearch.name)
|
// - elasticsearch (elasticsearch.name)
|
||||||
func (p *scheduler) createPipelineLoggingMerge(instance *model.Instance) error {
|
func (p *scheduler) createPipelineLoggingMerge(instance *model.TaskWorker) error {
|
||||||
cfg := &migration_model.PipelineTaskConfig{
|
cfg := &migration_model.PipelineTaskConfig{
|
||||||
Name: "pipeline_logging_merge",
|
Name: "pipeline_logging_merge",
|
||||||
AutoStart: true,
|
AutoStart: true,
|
||||||
|
@ -194,7 +194,7 @@ func (p *scheduler) createPipelineLoggingMerge(instance *model.Instance) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *scheduler) createIngestPipelineLogging(instance *model.Instance) error {
|
func (p *scheduler) createIngestPipelineLogging(instance *model.TaskWorker) error {
|
||||||
cfg := &migration_model.PipelineTaskConfig{
|
cfg := &migration_model.PipelineTaskConfig{
|
||||||
Name: "ingest_pipeline_logging",
|
Name: "ingest_pipeline_logging",
|
||||||
AutoStart: true,
|
AutoStart: true,
|
||||||
|
|
|
@ -6,8 +6,8 @@ package alerting
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"infini.sh/framework/core/config"
|
|
||||||
config2 "infini.sh/console/config"
|
config2 "infini.sh/console/config"
|
||||||
|
"infini.sh/framework/core/config"
|
||||||
"infini.sh/framework/core/env"
|
"infini.sh/framework/core/env"
|
||||||
"infini.sh/framework/core/global"
|
"infini.sh/framework/core/global"
|
||||||
"infini.sh/framework/core/kv"
|
"infini.sh/framework/core/kv"
|
||||||
|
@ -53,6 +53,6 @@ func GetInnerConsoleEndpoint() (string, error){
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", fmt.Errorf("web config not exists")
|
return "", fmt.Errorf("web config not exists")
|
||||||
}
|
}
|
||||||
endpoint := fmt.Sprintf("%s://%s", appConfig.GetSchema(), appConfig.Network.GetPublishAddr())
|
endpoint := appConfig.GetEndpoint()
|
||||||
return endpoint, nil
|
return endpoint, nil
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue