以下是grafana agent metrics 集成victoriametrics 的一个简单试用,主要是metrics 的,对于log 以及trace 的后边会介绍

环境准备

  • docker-compose 文件
version:  "3"
version:  "3"
services:
postgres:
image: postgres:12
volumes:
- ./db_data:/var/lib/postgresql/data
ports:
- 5432:5432
environment:
POSTGRES_PASSWORD: postgrespassword
agent:
image: grafana/agent:v0.28.1
ports:
- 12345:12345
entrypoint:
- /bin/agent
- -config.file=/etc/agent-config/agent.yaml
- -metrics.wal-directory=/tmp/agent/wal
- -enable-features=integrations-next
- -config.expand-env
- -config.enable-read-api
volumes:
- ./agent.yaml:/etc/agent-config/agent.yaml
- ./agent-data:/etc/agent/data
prometheus:
image: prom/prometheus
ports:
- 9090:9090
volumes:
- ./promdata:/prometheus
- ./prometheus.yml:/etc/prometheus/prometheus.yml
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
victoriametrics:
image: victoriametrics/victoria-metrics
volumes:
- ./victoriametrics:/victoriametrics
ports:
- 8428:8428
command:
- '-storageDataPath=/victoriametrics'
- '-retentionPeriod=1'
grafana:
image: grafana/grafana
ports:
- 3000:3000
配置
prometheus.yml

global:
scrape_interval: 1s
evaluation_interval: 1s

remote_write:
- url: "http://victoriametrics:8428/api/v1/write"

scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['prometheus:9090']

- job_name: 'victoriametrics'
static_configs:
- targets: ['victoriametrics:8428']
agent.yaml
开启了node exporter 以及process exporter,同时将metrics 写到victoriametrics中


# This file serves as an example agent configuration to interact with the
# docker compose environment.
#
# You should pass the following command line flags when running the agent
# locally and using this file:
#
# -enable-features=integrations-next -config.expand-env -config.enable-read-api
#
# -enable-features=integrations-next is required as the file is configured for
# the integrations revamp.
#
# -config.expand-env is required to expand environment variables. Environment
# variables are used when running the agent inside of docker-compose to connect
# to the other services. When running the agent externally, the expressions will
# default to the appropriate values of the exposed ports.
#
# -config.enable-read-api is optional, but allows you to invoke the /-/config
# endpoint to examine the generated config.

server:
log_level: debug

metrics:
global:
scrape_interval: 10s
remote_write:
- url: http://victoriametrics:8428/api/v1/write
configs:
- name: default
scrape_configs:
- job_name: avalanche
static_configs:
- targets: ['${AVALANCHE_HOST:-localhost:9001}']
#
# Integrations
#
# Uncomment individual integrations below to enable them. Some integrations are
# enabled by default.
#

integrations:
node_exporter: {}
## process
process:
process_names:
- name: "{{.Comm}}"
cmdline:
- '.+'
## mysql (requires docker-compose mysql profile)
# mysql_configs:
# - data_source_name: root@(${MYSQL_HOST:-localhost:3306})/

## postgres (requires docker-compose postgres profile)
postgres_configs:
- data_source_names:
- postgresql://postgres:postgrespassword@postgres:5432/postgres?sslmode=disable

## redis (requires docker-compose redis profile)
# redis_configs:
# - redis_addr: ${REDIS_HOST:-localhost:6379}

## dnsmasq (requires docker-compose dnsmasq profile)
# dnsmasq_configs:
# - dnsmasq_address: ${DNSMASQ_HOST:-localhost:30053}
# leases_path: /tmp/dnsmasq-leases/dnsmasq.leases

## memcached (requires docker-compose memcached profile)
# memcached_configs:
# - memcached_address: ${MEMCACHED_HOST:-localhost:11211}
# timeout: 10s

## statsd
# statsd: {}

## consul (requires docker-compose consul profile)
# consul_configs:
# - server: http://${CONSUL_HOST:-localhost:8500}

## elasticsearch (requires docker-compose elasticsearch profile)
# elasticsearch_configs:
# - address: http://${ELASTICSEARCH_HOST:-localhost:9200}

## kafka (requires docker-compose kafka profile)
# kafka_configs:
# - kafka_uris: [${KAFKA_HOST:-localhost:9093}]

## github (requires docker-compose github profile)
# github_configs:
# - repositories:
# - grafana/agent

## mongodb (requires docker-compose mongodb profile)
# mongodb_configs:
# - mongodb_uri: mongodb://${MONGODB_HOST:-mongodb:27017}
# relabel_configs:
# - source_labels: [__address__]
# target_label: service_name
# replacement: 'mongodb'
# - source_labels: [__address__]
# target_label: mongodb_cluster
# replacement: 'mongodb-cluster'

## cadvisor
# cadvisor:
# disabled_metrics:
# - disk
# enabled_metrics:
# - percpu

启动&效果

  • 启动
docker-compose up -d
  • 效果

prometheus

grafana agent metrics 集成victoriametrics 试用_memcached

 

 

victoriametrics 信息

grafana agent metrics 集成victoriametrics 试用_docker_02

 

 

grafana 效果(需要添加victoriametrics prometheus 数据源以及配置dashboard,dashboard 配置了node以及process exporter 的)

grafana agent metrics 集成victoriametrics 试用_memcached_03

 

 

grafana agent metrics 集成victoriametrics 试用_memcached_04

 

 

说明

利用grafana agent all-in-one 的设计模式,我们只需要通过配置就可以很好的获取主机的metrics 可以简化不少exporter的安装,同时利用victoriametrics 多租户的能力可以很好的支持数据隔离提供不同环境的metrics 安全管理,实际上我以前我写过关于vmagent 与grafana agent 的对比,可以参考

参考资料

​https://grafana.com/docs/agent/latest/configuration/metrics-config/​​​
​​​https://grafana.com/docs/agent/latest/configuration/logs-config/​​​
​​​https://grafana.com/docs/agent/latest/configuration/traces-config/​​​
​​​https://grafana.com/docs/agent/latest/configuration/dynamic-config/​​​
​​​https://grafana.com/docs/agent/latest/configuration/integrations/​​​
​​​https://grafana.com/docs/agent/latest/configuration/create-config-file/​​​
​​​https://grafana.com/docs/agent/latest/configuration/​​​
​​​https://docs.gomplate.ca/datasources/​​​