服务监控类
- 安装agent,上传agent目录下的文件。
- 修改agent.properties 配置,修改标红字体部分。
#配置 |
---|
name=target-service #服务名称 |
### http server |
# When the enabled value = false, agent will not start the http server |
# You can use -Deaseagent.server.enabled=[true |
easeagent.server.enabled=true #启用agent服务设置 |
# http server port. You can use -Deaseagent.server.port=[port] to override. |
easeagent.server.port=9900 #同台服务上起多个实例修改为不同的端口,确认端口不被占用 |
# Enable health/readiness |
easeagent.health.readiness.enabled=true |
#globalCanaryHeaders.serviceHeaders.mesh-app-backend.0=X-canary |
### |
### output server |
### |
observability.outputServer.bootstrapServer=127.0.0.1:9092 #kafka-IP端口,集群使用逗号分隔。 |
observability.outputServer.timeout=10000 |
observability.outputServer.enabled=true |
### |
### metrics |
### |
observability.metrics.enabled=true |
# metrics access |
observability.metrics.access.enabled=true |
observability.metrics.access.interval=30 |
observability.metrics.access.topic=application-log |
observability.metrics.access.appendType=kafka |
# metrics request |
observability.metrics.request.enabled=true |
observability.metrics.request.interval=30 |
observability.metrics.request.topic=application-meter |
observability.metrics.request.appendType=kafka |
# metrics jdbc statement |
observability.metrics.jdbcStatement.enabled=true |
observability.metrics.jdbcStatement.interval=30 |
observability.metrics.jdbcStatement.topic=application-meter |
observability.metrics.jdbcStatement.appendType=kafka |
# metrics jdbc connection |
observability.metrics.jdbcConnection.enabled=true |
observability.metrics.jdbcConnection.interval=30 |
observability.metrics.jdbcConnection.topic=application-meter |
observability.metrics.jdbcConnection.appendType=kafka |
# metrics rabbit |
observability.metrics.rabbit.enabled=true |
observability.metrics.rabbit.interval=30 |
observability.metrics.rabbit.topic=platform-meter |
observability.metrics.rabbit.appendType=kafka |
# metrics kafka |
observability.metrics.kafka.enabled=true |
observability.metrics.kafka.interval=30 |
observability.metrics.kafka.topic=platform-meter |
observability.metrics.kafka.appendType=kafka |
# metrics redis |
observability.metrics.redis.enabled=true |
observability.metrics.redis.interval=30 |
observability.metrics.redis.topic=application-meter |
observability.metrics.redis.appendType=kafka |
# metrics jvmGc |
observability.metrics.jvmGc.enabled=true |
observability.metrics.jvmGc.interval=30 |
observability.metrics.jvmGc.topic=platform-meter |
observability.metrics.jvmGc.appendType=kafka |
# metrics jvmMemory |
observability.metrics.jvmMemory.enabled=true |
observability.metrics.jvmMemory.interval=30 |
observability.metrics.jvmMemory.topic=platform-meter |
observability.metrics.jvmMemory.appendType=kafka |
# metrics md5Dictionary |
observability.metrics.md5Dictionary.enabled=true |
# 5 minutes |
observability.metrics.md5Dictionary.interval=300 |
observability.metrics.md5Dictionary.topic=application-meter |
observability.metrics.md5Dictionary.appendType=kafka |
### |
### tracings |
### |
observability.tracings.enabled=true #开启tracing设置 |
observability.tracings.sampledByQPS=100 |
observability.tracings.output.enabled=true |
observability.tracings.output.topic=log-tracing |
# 1000000 - 100 |
observability.tracings.output.messageMaxBytes=999900 |
observability.tracings.output.reportThread=1 |
observability.tracings.output.queuedMaxSpans=1000 |
observability.tracings.output.queuedMaxSize=1000000 |
observability.tracings.output.messageTimeout=1000 |
# tracings request |
observability.tracings.request.enabled=true |
# tracings remoteInvoke |
observability.tracings.remoteInvoke.enabled=true |
# tracings kafka |
observability.tracings.kafka.enabled=true #开启tracing必须提供kafka |
observability.tracings.kafka.servicePrefix=kafka |
# tracings jdbc |
observability.tracings.jdbc.enabled=true |
# tracings redis |
observability.tracings.redis.enabled=true |
observability.tracings.redis.servicePrefix=redis |
# tracings rabbit |
observability.tracings.rabbit.enabled=true |
observability.tracings.rabbit.servicePrefix=rabbitmq |
- 启动服务
java -javaagent:/{agent-path}/metricagent.jar=/{agent-path}/agent.properties -Dmetricagent.name={agent-name} |
---|
-jar /{agent-path}/agent-demo-0.0.1-SNAPSHOT.jar |
- {agent-name} :为name配置的服务名 |
- {agent-path} :为agent文件的路径 |
中间件监控类
中间件类型采集到kafka,topic统一为telegraf。
- 安装telegraf. 上传‘中间件采集’目录下的 telegraf,telegraf.conf文件。
- 修改配置telegraf.conf, 修改标红字体部分配置。
#配置 |
---|
[agent] |
interval = "30s" |
round_interval = true |
metric_batch_size = 1000 |
metric_buffer_limit = 10000 |
collection_jitter = "0s" |
flush_interval = "10s" |
flush_jitter = "5s" |
precision = "" |
hostname = "demo-telegraf" #实际主机名或IP |
omit_hostname = false |
[global_tags] |
tags = "_metrics" |
[[inputs.easedba_mysql]] |
servers = ["mysql:mysql@tcp(mysql:3306)/?tls=false"] |
gather_global_statuses = true |
gather_innodb = true |
gather_connection_statuses = true |
gather_db_sizes = true |
gather_replication = false |
gather_snapshot = true |
[inputs.easedba_mysql.tags] |
category = "infrastructure" |
system = "easeservice-mysql" |
cluster_name = "km_mysql" #集群名 |
instance = "demo-single" #实例名 |
host_ipv4 = "172.31.25.82" #主机IP |
service = "demo-telegrafservice-agent-for-mysql" #服务名 |
deploy_mode = "host" |
type = "mysqlserver" |
index = "mysqlserver" |
[[inputs.redis]] |
servers = ["tcp://redis:6379"] #redis服务IP端口 |
password = "123456" #redis密码 |
[inputs.redis.tags] |
host_name = "demo-single" |
cluster_name = "km_redis" |
system = "easeservice-redis" |
instance = "demo-single" |
category = "infrastructure" |
host_ipv4 = "172.31.25.82" |
service = "demo-telegrafservice-agent-for-redis" |
deploy_mode = "host" |
index = "redis" |
type = "redis" |
[[inputs.elasticsearch]] |
servers = ["http://elasticsearch:9200"] |
## Timeout for HTTP requests to the elastic search server(s) |
http_timeout = "30s" |
## When local is true (the default), the node will read only its own stats. |
## Set local to false when you want to read the node stats from all nodes |
## of the cluster. |
local = true |
## Set cluster_health to true when you want to obtain cluster health stats |
cluster_health = false #非集群模式 |
## Adjust cluster_health_level when you want to obtain detailed health stats |
## The options are |
## - indices (default) |
## - cluster |
# cluster_health_level = "indices" |
## Set cluster_stats to true when you want to obtain cluster stats. |
cluster_stats = false |
## Only gather cluster_stats from the master node. To work this require local = true |
cluster_stats_only_from_master = true |
## Indices to collect; can be one or more indices names or _all |
## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date. |
# indices_include = ["_all"] |
## One of "shards", "cluster", "indices" |
## Currently only "shards" is implemented |
# indices_level = "shards" |
## node_stats is a list of sub-stats that you want to have gathered. Valid options |
## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", |
## "breaker". Per default, all stats are gathered. |
node_stats = ["indices", "thread_pool", "jvm", "http", "os", "fs", "transport"] |
## HTTP Basic Authentication username and password. |
# username = "" |
# password = "" |
## Optional TLS Config |
# tls_ca = "/etc/telegraf/ca.pem" |
# tls_cert = "/etc/telegraf/cert.pem" |
# tls_key = "/etc/telegraf/key.pem" |
## Use TLS but skip chain & host verification |
# insecure_skip_verify = false |
## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. |
## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and ## sort them by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most ## recent indices. |
# num_most_recent_indices = 0 |
[inputs.elasticsearch.tags] |
system = "easeservice-km05-es" |
instance = "demo-km5" |
category = "infrastructure" |
host_ipv4 = "172.31.25.82" |
service = "demo-telegrafservice-agent-for-es" |
deploy_mode = "host" |
tags = "_metrics" |
index = "elasticsearch" |
type = "elasticsearch" |
[[outputs.kafka]] |
## URLs of kafka brokers |
brokers = ["kafka:9092"] |
## Kafka topic for producer messages |
topic = "telegraf" |
data_format = "json" |
[outputs.kafka.tagdrop] |
service = ["demo-telegrafservice-agent"] |
- 启动telegraf。
Chmod +x telegraf |
---|
./telegraf --config telegraf.conf |