1
0
mirror of https://github.com/vimagick/dockerfiles.git synced 2024-12-23 01:39:27 +02:00
dockerfiles/telegraf/telegraf.conf

1982 lines
66 KiB
Plaintext
Raw Normal View History

2016-06-01 08:54:18 +02:00
# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply prepend
# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
# Global tags can be specified here in key="value" format.
[global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
## Environment variables can be used as tags, and throughout the config file
# user = "$USER"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
2017-01-16 17:31:29 +02:00
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
2016-06-01 08:54:18 +02:00
metric_batch_size = 1000
2017-01-16 17:31:29 +02:00
2016-06-01 08:54:18 +02:00
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
## output, and will flush this buffer on a successful write. Oldest metrics
## are dropped first when this buffer fills.
2017-01-16 17:31:29 +02:00
## This buffer only fills when writes fail to output plugin(s).
2016-06-01 08:54:18 +02:00
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. You shouldn't set this below
## interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
2017-01-16 17:31:29 +02:00
## By default, precision will be set to the same timestamp order as the
## collection interval, with the maximum being 1s.
## Precision will NOT be used for service inputs, such as logparser and statsd.
## Valid values are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Logging configuration:
## Run telegraf with debug log messages.
2016-06-01 08:54:18 +02:00
debug = false
2017-01-16 17:31:29 +02:00
## Run telegraf in quiet mode (error log messages only).
2016-06-01 08:54:18 +02:00
quiet = false
2017-01-16 17:31:29 +02:00
## Specify the log file name. The empty string means to log to stderr.
logfile = ""
2016-06-01 08:54:18 +02:00
## Override default hostname, if empty use os.Hostname()
hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
###############################################################################
# OUTPUT PLUGINS #
###############################################################################
# Configuration for influxdb server to send metrics to
[[outputs.influxdb]]
## The full HTTP or UDP endpoint URL for your InfluxDB instance.
## Multiple urls can be specified as part of the same cluster,
## this means that only ONE of the urls will be written to each interval.
# urls = ["udp://localhost:8089"] # UDP endpoint example
urls = ["http://localhost:8086"] # required
## The target database for metrics (telegraf will create it if not exists).
database = "telegraf" # required
2017-01-16 17:31:29 +02:00
## Retention policy to write to. Empty string writes to the default rp.
retention_policy = ""
## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
write_consistency = "any"
2016-06-01 08:54:18 +02:00
## Write timeout (for the InfluxDB client), formatted as a string.
## If not provided, will default to 5s. 0s means no timeout (not recommended).
timeout = "5s"
# username = "telegraf"
# password = "metricsmetricsmetricsmetrics"
## Set the user agent for HTTP POSTs (can be useful for log differentiation)
# user_agent = "telegraf"
## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes)
# udp_payload = 512
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
# # Configuration for Amon Server to send metrics to.
# [[outputs.amon]]
# ## Amon Server Key
# server_key = "my-server-key" # required.
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Amon Instance URL
# amon_instance = "https://youramoninstance" # required
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Connection timeout.
# # timeout = "5s"
# # Configuration for the AMQP server to send metrics to
# [[outputs.amqp]]
# ## AMQP url
# url = "amqp://localhost:5672/influxdb"
# ## AMQP exchange
# exchange = "telegraf"
# ## Auth method. PLAIN and EXTERNAL are supported
# # auth_method = "PLAIN"
# ## Telegraf tag to use as a routing key
# ## ie, if this tag exists, it's value will be used as the routing key
# routing_tag = "host"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## InfluxDB retention policy
# # retention_policy = "default"
# ## InfluxDB database
# # database = "telegraf"
# ## InfluxDB precision
# # precision = "s"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
# # Configuration for AWS CloudWatch output.
# [[outputs.cloudwatch]]
# ## Amazon REGION
2017-01-16 17:31:29 +02:00
# region = "us-east-1"
#
2016-06-01 08:54:18 +02:00
# ## Amazon Credentials
# ## Credentials are loaded in the following order
2017-01-16 17:31:29 +02:00
# ## 1) Assumed credentials via STS if role_arn is specified
# ## 2) explicit credentials from 'access_key' and 'secret_key'
# ## 3) shared profile from 'profile'
# ## 4) environment variables
# ## 5) shared credentials file
# ## 6) EC2 Instance Profile
2016-06-01 08:54:18 +02:00
# #access_key = ""
# #secret_key = ""
2017-01-16 17:31:29 +02:00
# #token = ""
# #role_arn = ""
# #profile = ""
# #shared_credential_file = ""
#
2016-06-01 08:54:18 +02:00
# ## Namespace for the CloudWatch MetricDatums
2017-01-16 17:31:29 +02:00
# namespace = "InfluxData/Telegraf"
2016-06-01 08:54:18 +02:00
# # Configuration for DataDog API to send metrics to.
# [[outputs.datadog]]
# ## Datadog API key
# apikey = "my-secret-key" # required.
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Connection timeout.
# # timeout = "5s"
# # Send telegraf metrics to file(s)
# [[outputs.file]]
# ## Files to write to, "stdout" is a specially handled file.
# files = ["stdout", "/tmp/metrics.out"]
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
# # Configuration for Graphite server to send metrics to
# [[outputs.graphite]]
# ## TCP endpoint for your graphite instance.
2017-01-16 17:31:29 +02:00
# ## If multiple endpoints are configured, output will be load balanced.
# ## Only one of the endpoints will be written to with each iteration.
2016-06-01 08:54:18 +02:00
# servers = ["localhost:2003"]
# ## Prefix metrics name
# prefix = ""
# ## Graphite output template
# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# template = "host.tags.measurement.field"
# ## timeout in seconds for the write connection to graphite
# timeout = 2
2017-01-16 17:31:29 +02:00
# # Send telegraf metrics to graylog(s)
# [[outputs.graylog]]
# ## Udp endpoint for your graylog instance.
# servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
# # Configuration for sending metrics to an Instrumental project
# [[outputs.instrumental]]
# ## Project API Token (required)
# api_token = "API Token" # required
# ## Prefix the metrics with a given name
# prefix = ""
# ## Stats output template (Graphite formatting)
# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
# template = "host.tags.measurement.field"
# ## Timeout in seconds to connect
# timeout = "2s"
# ## Display Communcation to Instrumental
# debug = false
2016-06-01 08:54:18 +02:00
# # Configuration for the Kafka server to send metrics to
# [[outputs.kafka]]
# ## URLs of kafka brokers
# brokers = ["localhost:9092"]
# ## Kafka topic for producer messages
# topic = "telegraf"
# ## Telegraf tag to use as a routing key
# ## ie, if this tag exists, it's value will be used as the routing key
# routing_tag = "host"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## CompressionCodec represents the various compression codecs recognized by
# ## Kafka in messages.
# ## 0 : No compression
# ## 1 : Gzip compression
# ## 2 : Snappy compression
# compression_codec = 0
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## RequiredAcks is used in Produce Requests to tell the broker how many
# ## replica acknowledgements it must see before responding
# ## 0 : the producer never waits for an acknowledgement from the broker.
# ## This option provides the lowest latency but the weakest durability
# ## guarantees (some data will be lost when a server fails).
# ## 1 : the producer gets an acknowledgement after the leader replica has
# ## received the data. This option provides better durability as the
# ## client waits until the server acknowledges the request as successful
# ## (only messages that were written to the now-dead leader but not yet
# ## replicated will be lost).
# ## -1: the producer gets an acknowledgement after all in-sync replicas have
# ## received the data. This option provides the best durability, we
# ## guarantee that no messages will be lost as long as at least one in
# ## sync replica remains.
# required_acks = -1
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## The total number of times to retry sending a message
# max_retry = 3
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
# # Configuration for the AWS Kinesis output.
# [[outputs.kinesis]]
# ## Amazon REGION of kinesis endpoint.
# region = "ap-southeast-2"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Amazon Credentials
# ## Credentials are loaded in the following order
2017-01-16 17:31:29 +02:00
# ## 1) Assumed credentials via STS if role_arn is specified
# ## 2) explicit credentials from 'access_key' and 'secret_key'
# ## 3) shared profile from 'profile'
# ## 4) environment variables
# ## 5) shared credentials file
# ## 6) EC2 Instance Profile
2016-06-01 08:54:18 +02:00
# #access_key = ""
# #secret_key = ""
2017-01-16 17:31:29 +02:00
# #token = ""
# #role_arn = ""
# #profile = ""
# #shared_credential_file = ""
#
2016-06-01 08:54:18 +02:00
# ## Kinesis StreamName must exist prior to starting telegraf.
# streamname = "StreamName"
# ## PartitionKey as used for sharding data.
# partitionkey = "PartitionKey"
# ## format of the Data payload in the kinesis PutRecord, supported
# ## String and Custom.
# format = "string"
# ## debug will show upstream aws messages.
# debug = false
# # Configuration for Librato API to send metrics to.
# [[outputs.librato]]
# ## Librator API Docs
# ## http://dev.librato.com/v1/metrics-authentication
# ## Librato API user
# api_user = "telegraf@influxdb.com" # required.
# ## Librato API token
# api_token = "my-secret-token" # required.
# ## Debug
# # debug = false
# ## Connection timeout.
# # timeout = "5s"
2017-01-16 17:31:29 +02:00
# ## Output source Template (same as graphite buckets)
2016-06-01 08:54:18 +02:00
# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
2017-01-16 17:31:29 +02:00
# ## This template is used in librato's source (not metric's name)
# template = "host"
#
2016-06-01 08:54:18 +02:00
# # Configuration for MQTT server to send metrics to
# [[outputs.mqtt]]
# servers = ["localhost:1883"] # required.
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## MQTT outputs send metrics to this topic format
# ## "<topic_prefix>/<hostname>/<pluginname>/"
# ## ex: prefix/web01.example.com/mem
# topic_prefix = "telegraf"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## username and password to connect MQTT server.
# # username = "telegraf"
# # password = "metricsmetricsmetricsmetrics"
2017-01-16 17:31:29 +02:00
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
# # Send telegraf measurements to NATS
# [[outputs.nats]]
# ## URLs of NATS servers
# servers = ["nats://localhost:4222"]
# ## Optional credentials
# # username = ""
# # password = ""
# ## NATS subject for producer messages
# subject = "telegraf"
#
2016-06-01 08:54:18 +02:00
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
# # Send telegraf measurements to NSQD
# [[outputs.nsq]]
# ## Location of nsqd instance listening on TCP
# server = "localhost:4150"
# ## NSQ topic for producer messages
# topic = "telegraf"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Data format to output.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# data_format = "influx"
# # Configuration for OpenTSDB server to send metrics to
# [[outputs.opentsdb]]
# ## prefix for metrics keys
# prefix = "my.specific.prefix."
2017-01-16 17:31:29 +02:00
#
# ## DNS name of the OpenTSDB server
# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the
# ## telnet API. "http://opentsdb.example.com" will use the Http API.
2016-06-01 08:54:18 +02:00
# host = "opentsdb.example.com"
2017-01-16 17:31:29 +02:00
#
# ## Port of the OpenTSDB server
2016-06-01 08:54:18 +02:00
# port = 4242
2017-01-16 17:31:29 +02:00
#
# ## Number of data points to send to OpenTSDB in Http requests.
# ## Not used with telnet API.
# httpBatchSize = 50
#
2016-06-01 08:54:18 +02:00
# ## Debug true - Prints OpenTSDB communication
# debug = false
# # Configuration for the Prometheus client to spawn
# [[outputs.prometheus_client]]
# ## Address to listen on
# # listen = ":9126"
# # Configuration for the Riemann server to send metrics to
# [[outputs.riemann]]
# ## URL of server
# url = "localhost:5555"
# ## transport protocol to use either tcp or udp
# transport = "tcp"
# ## separator to use between input name and field name in Riemann service name
# separator = " "
2017-01-16 17:31:29 +02:00
###############################################################################
# PROCESSOR PLUGINS #
###############################################################################
# # Print all metrics that pass through this filter.
# [[processors.printer]]
###############################################################################
# AGGREGATOR PLUGINS #
###############################################################################
# # Keep the aggregate min/max of each metric passing through.
# [[aggregators.minmax]]
# ## General Aggregator Arguments:
# ## The period on which to flush & clear the aggregator.
# period = "30s"
# ## If true, the original metric will be dropped by the
# ## aggregator and will not get sent to the output plugins.
# drop_original = false
2016-06-01 08:54:18 +02:00
###############################################################################
# INPUT PLUGINS #
###############################################################################
# Read metrics about cpu usage
[[inputs.cpu]]
## Whether to report per-cpu stats or not
percpu = true
## Whether to report total system cpu stats or not
totalcpu = true
2017-01-16 17:31:29 +02:00
## If true, collect raw CPU time metrics.
collect_cpu_time = false
2016-06-01 08:54:18 +02:00
# Read metrics about disk usage by mount point
[[inputs.disk]]
## By default, telegraf gather stats for all mountpoints.
## Setting mountpoints will restrict the stats to the specified mountpoints.
# mount_points = ["/"]
## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
## present on /run, /var/run, /dev/shm or /dev).
ignore_fs = ["tmpfs", "devtmpfs"]
# Read metrics about disk IO by device
[[inputs.diskio]]
## By default, telegraf will gather stats for all devices including
## disk partitions.
## Setting devices will restrict the stats to the specified devices.
# devices = ["sda", "sdb"]
2017-01-16 17:31:29 +02:00
## Uncomment the following line if you need disk serial numbers.
# skip_serial_number = false
2016-06-01 08:54:18 +02:00
# Get kernel statistics from /proc/stat
[[inputs.kernel]]
# no configuration
# Read metrics about memory usage
[[inputs.mem]]
# no configuration
# Get the number of processes and group them by status
[[inputs.processes]]
# no configuration
# Read metrics about swap memory usage
[[inputs.swap]]
# no configuration
# Read metrics about system load & uptime
[[inputs.system]]
# no configuration
2017-01-16 17:31:29 +02:00
# # Read stats from aerospike server(s)
2016-06-01 08:54:18 +02:00
# [[inputs.aerospike]]
# ## Aerospike servers to connect to (with port)
# ## This plugin will query all namespaces the aerospike
# ## server has configured and get stats for them.
# servers = ["localhost:3000"]
# # Read Apache status information (mod_status)
# [[inputs.apache]]
# ## An array of Apache status URI to gather stats.
2017-01-16 17:31:29 +02:00
# ## Default is "http://localhost/server-status?auto".
2016-06-01 08:54:18 +02:00
# urls = ["http://localhost/server-status?auto"]
# # Read metrics of bcache from stats_total and dirty_data
# [[inputs.bcache]]
# ## Bcache sets path
# ## If not specified, then default is:
# bcachePath = "/sys/fs/bcache"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## By default, telegraf gather stats for all bcache devices
# ## Setting devices will restrict the stats to the specified
# ## bcache devices.
# bcacheDevs = ["bcache0"]
# # Read Cassandra metrics through Jolokia
# [[inputs.cassandra]]
# # This is the context root used to compose the jolokia url
# context = "/jolokia/read"
# ## List of cassandra servers exposing jolokia read service
# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
# ## List of metrics collected on above servers
# ## Each metric consists of a jmx path.
# ## This will collect all heap memory usage metrics from the jvm and
# ## ReadLatency metrics for all keyspaces and tables.
# ## "type=Table" in the query works with Cassandra3.0. Older versions might
# ## need to use "type=ColumnFamily"
# metrics = [
# "/java.lang:type=Memory/HeapMemoryUsage",
# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
# ]
2017-01-16 17:31:29 +02:00
# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
# [[inputs.ceph]]
# ## This is the recommended interval to poll. Too frequent and you will lose
# ## data points due to timeouts during rebalancing and recovery
# interval = '1m'
#
# ## All configuration values are optional, defaults are shown below
#
# ## location of ceph binary
# ceph_binary = "/usr/bin/ceph"
#
# ## directory in which to look for socket files
# socket_dir = "/var/run/ceph"
#
# ## prefix of MON and OSD socket files, used to determine socket type
# mon_prefix = "ceph-mon"
# osd_prefix = "ceph-osd"
#
# ## suffix used to identify socket files
# socket_suffix = "asok"
#
# ## Ceph user to authenticate as
# ceph_user = "client.admin"
#
# ## Ceph configuration to use to locate the cluster
# ceph_config = "/etc/ceph/ceph.conf"
#
# ## Whether to gather statistics via the admin socket
# gather_admin_socket_stats = true
#
# ## Whether to gather statistics via ceph commands
# gather_cluster_stats = true
# # Read specific statistics per cgroup
# [[inputs.cgroup]]
# ## Directories in which to look for files, globs are supported.
# ## Consider restricting paths to the set of cgroups you really
# ## want to monitor if you have a large number of cgroups, to avoid
# ## any cardinality issues.
# # paths = [
# # "/cgroup/memory",
# # "/cgroup/memory/child1",
# # "/cgroup/memory/child2/*",
# # ]
# ## cgroup stat fields, as file names, globs are supported.
# ## these file names are appended to each path from above.
# # files = ["memory.*usage*", "memory.limit_in_bytes"]
2016-06-01 08:54:18 +02:00
# # Pull Metric Statistics from Amazon CloudWatch
# [[inputs.cloudwatch]]
# ## Amazon Region
2017-01-16 17:31:29 +02:00
# region = "us-east-1"
#
2016-06-01 08:54:18 +02:00
# ## Amazon Credentials
# ## Credentials are loaded in the following order
2017-01-16 17:31:29 +02:00
# ## 1) Assumed credentials via STS if role_arn is specified
# ## 2) explicit credentials from 'access_key' and 'secret_key'
# ## 3) shared profile from 'profile'
# ## 4) environment variables
# ## 5) shared credentials file
# ## 6) EC2 Instance Profile
2016-06-01 08:54:18 +02:00
# #access_key = ""
# #secret_key = ""
2017-01-16 17:31:29 +02:00
# #token = ""
# #role_arn = ""
# #profile = ""
# #shared_credential_file = ""
#
2016-06-01 08:54:18 +02:00
# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
2017-01-16 17:31:29 +02:00
# period = "5m"
#
2016-06-01 08:54:18 +02:00
# ## Collection Delay (required - must account for metrics availability via CloudWatch API)
2017-01-16 17:31:29 +02:00
# delay = "5m"
#
2016-06-01 08:54:18 +02:00
# ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
# ## gaps or overlap in pulled data
2017-01-16 17:31:29 +02:00
# interval = "5m"
#
# ## Configure the TTL for the internal cache of metrics.
# ## Defaults to 1 hr if not specified
# #cache_ttl = "10m"
#
2016-06-01 08:54:18 +02:00
# ## Metric Statistic Namespace (required)
2017-01-16 17:31:29 +02:00
# namespace = "AWS/ELB"
#
# ## Maximum requests per second. Note that the global default AWS rate limit is
# ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a
# ## maximum of 10. Optional - default value is 10.
# ratelimit = 10
#
2016-06-01 08:54:18 +02:00
# ## Metrics to Pull (optional)
# ## Defaults to all Metrics in Namespace if nothing is provided
# ## Refreshes Namespace available metrics every 1h
# #[[inputs.cloudwatch.metrics]]
2017-01-16 17:31:29 +02:00
# # names = ["Latency", "RequestCount"]
2016-06-01 08:54:18 +02:00
# #
# # ## Dimension filters for Metric (optional)
# # [[inputs.cloudwatch.metrics.dimensions]]
2017-01-16 17:31:29 +02:00
# # name = "LoadBalancerName"
# # value = "p-example"
# # Gather health check statuses from services registered in Consul
# [[inputs.consul]]
# ## Most of these values defaults to the one configured on a Consul's agent level.
# ## Optional Consul server address (default: "localhost")
# # address = "localhost"
# ## Optional URI scheme for the Consul server (default: "http")
# # scheme = "http"
# ## Optional ACL token used in every request (default: "")
# # token = ""
# ## Optional username used for request HTTP Basic Authentication (default: "")
# # username = ""
# ## Optional password used for HTTP Basic Authentication (default: "")
# # password = ""
# ## Optional data centre to query the health checks from (default: "")
# # datacentre = ""
2016-06-01 08:54:18 +02:00
# # Read metrics from one or many couchbase clusters
# [[inputs.couchbase]]
# ## specify servers via a url matching:
# ## [protocol://][:password]@address[:port]
# ## e.g.
# ## http://couchbase-0.example.com/
# ## http://admin:secret@couchbase-0.example.com:8091/
# ##
# ## If no servers are specified, then localhost is used as the host.
# ## If no protocol is specifed, HTTP is used.
# ## If no port is specified, 8091 is used.
# servers = ["http://localhost:8091"]
# # Read CouchDB Stats from one or more servers
# [[inputs.couchdb]]
# ## Works with CouchDB stats endpoints out of the box
# ## Multiple HOSTs from which to read CouchDB stats:
# hosts = ["http://localhost:8086/_stats"]
# # Read metrics from one or many disque servers
# [[inputs.disque]]
# ## An array of URI to gather stats about. Specify an ip or hostname
# ## with optional port and password.
# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
# ## If no servers are specified, then localhost is used as the host.
# servers = ["localhost"]
# # Query given DNS server and gives statistics
# [[inputs.dns_query]]
# ## servers to query
# servers = ["8.8.8.8"] # required
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Domains or subdomains to query. "."(root) is default
# domains = ["."] # optional
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Query record type. Default is "A"
# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
# record_type = "A" # optional
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Dns server port. 53 is default
# port = 53 # optional
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Query timeout in seconds. Default is 2 seconds
# timeout = 2 # optional
# # Read metrics about docker containers
# [[inputs.docker]]
# ## Docker Endpoint
# ## To use TCP, set endpoint = "tcp://[ip]:[port]"
# ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
# endpoint = "unix:///var/run/docker.sock"
# ## Only collect metrics for these containers, collect all if empty
# container_names = []
# ## Timeout for docker list, info, and stats commands
# timeout = "5s"
2017-01-16 17:31:29 +02:00
#
# ## Whether to report for each container per-device blkio (8:0, 8:1...) and
# ## network (eth0, eth1, ...) stats or not
# perdevice = true
# ## Whether to report for each container total blkio and network stats or not
# total = false
#
2016-06-01 08:54:18 +02:00
# # Read statistics from one or many dovecot servers
# [[inputs.dovecot]]
# ## specify dovecot servers via an address:port list
# ## e.g.
# ## localhost:24242
# ##
# ## If no servers are specified, then localhost is used as the host.
# servers = ["localhost:24242"]
# ## Type is one of "user", "domain", "ip", or "global"
# type = "global"
# ## Wildcard matches like "*.com". An empty string "" is same as "*"
# ## If type = "ip" filters should be <IP/network>
# filters = [""]
# # Read stats from one or more Elasticsearch servers or clusters
# [[inputs.elasticsearch]]
# ## specify a list of one or more Elasticsearch servers
# servers = ["http://localhost:9200"]
2017-01-16 17:31:29 +02:00
#
# ## Timeout for HTTP requests to the elastic search server(s)
# http_timeout = "5s"
#
2016-06-01 08:54:18 +02:00
# ## set local to false when you want to read the indices stats from all nodes
# ## within the cluster
# local = true
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## set cluster_health to true when you want to also obtain cluster level stats
# cluster_health = false
2017-01-16 17:31:29 +02:00
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
2016-06-01 08:54:18 +02:00
# # Read metrics from one or more commands that can output to stdout
# [[inputs.exec]]
# ## Commands array
2017-01-16 17:31:29 +02:00
# commands = [
# "/tmp/test.sh",
# "/usr/bin/mycollector --foo=bar",
# "/tmp/collect_*.sh"
# ]
#
2016-06-01 08:54:18 +02:00
# ## Timeout for each command to complete.
# timeout = "5s"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## measurement name suffix (for separating different commands)
# name_suffix = "_mycollector"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
# # Read stats about given file(s)
# [[inputs.filestat]]
# ## Files to gather stats about.
# ## These accept standard unix glob matching rules, but with the addition of
# ## ** as a "super asterisk". ie:
# ## "/var/log/**.log" -> recursively find all .log files in /var/log
# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
# ## "/var/log/apache.log" -> just tail the apache log file
# ##
# ## See https://github.com/gobwas/glob for more examples
# ##
# files = ["/var/log/**.log"]
# ## If true, read the entire file and calculate an md5 checksum.
# md5 = false
2017-01-16 17:31:29 +02:00
# # Read flattened metrics from one or more GrayLog HTTP endpoints
# [[inputs.graylog]]
# ## API endpoint, currently supported API:
# ##
# ## - multiple (Ex http://<host>:12900/system/metrics/multiple)
# ## - namespace (Ex http://<host>:12900/system/metrics/namespace/{namespace})
# ##
# ## For namespace endpoint, the metrics array will be ignored for that call.
# ## Endpoint can contain namespace and multiple type calls.
# ##
# ## Please check http://[graylog-server-ip]:12900/api-browser for full list
# ## of endpoints
# servers = [
# "http://[graylog-server-ip]:12900/system/metrics/multiple",
# ]
#
# ## Metrics list
# ## List of metrics can be found on Graylog webservice documentation.
# ## Or by hitting the the web service api at:
# ## http://[graylog-host]:12900/system/metrics
# metrics = [
# "jvm.cl.loaded",
# "jvm.memory.pools.Metaspace.committed"
# ]
#
# ## Username and password
# username = ""
# password = ""
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
2016-06-01 08:54:18 +02:00
# # Read metrics of haproxy, via socket or csv stats page
# [[inputs.haproxy]]
# ## An array of address to gather stats about. Specify an ip on hostname
# ## with optional port. ie localhost, 10.10.3.33:1936, etc.
2017-01-16 17:31:29 +02:00
# ## Make sure you specify the complete path to the stats endpoint
# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
# #
# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
# servers = ["http://myhaproxy.com:1936/haproxy?stats"]
# ##
# ## You can also use local socket with standard wildcard globbing.
# ## Server address not starting with 'http' will be treated as a possible
# ## socket, so both examples below are valid.
# ## servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
2016-06-01 08:54:18 +02:00
# # HTTP/HTTPS request given an address a method and a timeout
# [[inputs.http_response]]
# ## Server address (default http://localhost)
# address = "http://github.com"
# ## Set response_timeout (default 5 seconds)
2017-01-16 17:31:29 +02:00
# response_timeout = "5s"
2016-06-01 08:54:18 +02:00
# ## HTTP Request Method
# method = "GET"
# ## Whether to follow redirects from the server (defaults to false)
# follow_redirects = true
# ## HTTP Request Headers (all values must be strings)
# # [inputs.http_response.headers]
# # Host = "github.com"
# ## Optional HTTP Request Body
# # body = '''
# # {'fake':'data'}
# # '''
2017-01-16 17:31:29 +02:00
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
2016-06-01 08:54:18 +02:00
# # Read flattened metrics from one or more JSON HTTP endpoints
# [[inputs.httpjson]]
# ## NOTE This plugin only reads numerical measurements, strings and booleans
# ## will be ignored.
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## a name for the service being polled
# name = "webserver_stats"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## URL of each server in the service's cluster
# servers = [
# "http://localhost:9999/stats/",
# "http://localhost:9998/stats/",
# ]
2017-01-16 17:31:29 +02:00
# ## Set response_timeout (default 5 seconds)
# response_timeout = "5s"
#
2016-06-01 08:54:18 +02:00
# ## HTTP method to use: GET or POST (case-sensitive)
# method = "GET"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## List of tag names to extract from top-level of JSON server response
# # tag_keys = [
# # "my_tag_1",
# # "my_tag_2"
# # ]
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## HTTP parameters (all values must be strings)
# [inputs.httpjson.parameters]
# event_type = "cpu_spike"
# threshold = "0.75"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## HTTP Header parameters (all values must be strings)
# # [inputs.httpjson.headers]
# # X-Auth-Token = "my-xauth-token"
# # apiVersion = "v1"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
# [[inputs.influxdb]]
# ## Works with InfluxDB debug endpoints out of the box,
# ## but other services can use this format too.
# ## See the influxdb plugin's README for more details.
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Multiple URLs from which to read InfluxDB-formatted JSON
2017-01-16 17:31:29 +02:00
# ## Default is "http://localhost:8086/debug/vars".
2016-06-01 08:54:18 +02:00
# urls = [
# "http://localhost:8086/debug/vars"
# ]
2017-01-16 17:31:29 +02:00
#
# ## http request & header timeout
# timeout = "5s"
2016-06-01 08:54:18 +02:00
# # Read metrics from one or many bare metal servers
# [[inputs.ipmi_sensor]]
# ## specify servers via a url matching:
# ## [username[:password]@][protocol[(address)]]
# ## e.g.
# ## root:passwd@lan(127.0.0.1)
# ##
# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
# # Read JMX metrics through Jolokia
# [[inputs.jolokia]]
# ## This is the context root used to compose the jolokia url
2017-01-16 17:31:29 +02:00
# ## NOTE that your jolokia security policy must allow for POST requests.
2016-06-01 08:54:18 +02:00
# context = "/jolokia"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## This specifies the mode used
# # mode = "proxy"
# #
# ## When in proxy mode this section is used to specify further
# ## proxy address configurations.
# ## Remember to change host address to fit your environment.
# # [inputs.jolokia.proxy]
# # host = "127.0.0.1"
# # port = "8080"
2017-01-16 17:31:29 +02:00
#
#
2016-06-01 08:54:18 +02:00
# ## List of servers exposing jolokia read service
# [[inputs.jolokia.servers]]
# name = "as-server-01"
# host = "127.0.0.1"
# port = "8080"
# # username = "myuser"
# # password = "mypassword"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## List of metrics collected on above servers
# ## Each metric consists in a name, a jmx path and either
# ## a pass or drop slice attribute.
# ## This collect all heap memory usage metrics.
# [[inputs.jolokia.metrics]]
# name = "heap_memory_usage"
# mbean = "java.lang:type=Memory"
# attribute = "HeapMemoryUsage"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## This collect thread counts metrics.
# [[inputs.jolokia.metrics]]
# name = "thread_count"
# mbean = "java.lang:type=Threading"
# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## This collect number of class loaded/unloaded counts metrics.
# [[inputs.jolokia.metrics]]
# name = "class_count"
# mbean = "java.lang:type=ClassLoading"
# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
2017-01-16 17:31:29 +02:00
# # Read metrics from the kubernetes kubelet api
# [[inputs.kubernetes]]
# ## URL for the kubelet
# url = "http://1.1.1.1:10255"
#
# ## Use bearer token for authorization
# # bearer_token = /path/to/bearer/token
#
# ## Optional SSL Config
# # ssl_ca = /path/to/cafile
# # ssl_cert = /path/to/certfile
# # ssl_key = /path/to/keyfile
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
2016-06-01 08:54:18 +02:00
# # Read metrics from a LeoFS Server via SNMP
# [[inputs.leofs]]
# ## An array of URI to gather stats about LeoFS.
# ## Specify an ip or hostname with port. ie 127.0.0.1:4020
# servers = ["127.0.0.1:4021"]
# # Read metrics from local Lustre service on OST, MDS
# [[inputs.lustre2]]
# ## An array of /proc globs to search for Lustre stats
# ## If not specified, the default will work on Lustre 2.5.x
# ##
# # ost_procfiles = [
# # "/proc/fs/lustre/obdfilter/*/stats",
# # "/proc/fs/lustre/osd-ldiskfs/*/stats",
# # "/proc/fs/lustre/obdfilter/*/job_stats",
# # ]
# # mds_procfiles = [
# # "/proc/fs/lustre/mdt/*/md_stats",
# # "/proc/fs/lustre/mdt/*/job_stats",
# # ]
# # Gathers metrics from the /3.0/reports MailChimp API
# [[inputs.mailchimp]]
# ## MailChimp API key
# ## get from https://admin.mailchimp.com/account/api/
# api_key = "" # required
# ## Reports for campaigns sent more than days_old ago will not be collected.
# ## 0 means collect all.
# days_old = 0
# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old
# # campaign_id = ""
# # Read metrics from one or many memcached servers
# [[inputs.memcached]]
# ## An array of address to gather stats about. Specify an ip on hostname
# ## with optional port. ie localhost, 10.0.0.1:11211, etc.
# servers = ["localhost:11211"]
# # unix_sockets = ["/var/run/memcached.sock"]
# # Telegraf plugin for gathering metrics from N Mesos masters
# [[inputs.mesos]]
2017-01-16 17:31:29 +02:00
# ## Timeout, in ms.
2016-06-01 08:54:18 +02:00
# timeout = 100
2017-01-16 17:31:29 +02:00
# ## A list of Mesos masters.
2016-06-01 08:54:18 +02:00
# masters = ["localhost:5050"]
2017-01-16 17:31:29 +02:00
# ## Master metrics groups to be collected, by default, all enabled.
2016-06-01 08:54:18 +02:00
# master_collections = [
# "resources",
# "master",
# "system",
2017-01-16 17:31:29 +02:00
# "agents",
2016-06-01 08:54:18 +02:00
# "frameworks",
2017-01-16 17:31:29 +02:00
# "tasks",
2016-06-01 08:54:18 +02:00
# "messages",
# "evqueue",
# "registrar",
# ]
2017-01-16 17:31:29 +02:00
# ## A list of Mesos slaves, default is []
# # slaves = []
# ## Slave metrics groups to be collected, by default, all enabled.
# # slave_collections = [
# # "resources",
# # "agent",
# # "system",
# # "executors",
# # "tasks",
# # "messages",
# # ]
2016-06-01 08:54:18 +02:00
# # Read metrics from one or many MongoDB servers
# [[inputs.mongodb]]
# ## An array of URI to gather stats about. Specify an ip or hostname
# ## with optional port add password. ie,
# ## mongodb://user:auth_key@10.10.3.30:27017,
# ## mongodb://10.10.3.33:18832,
# ## 10.0.0.1:10000, etc.
# servers = ["127.0.0.1:27017"]
2017-01-16 17:31:29 +02:00
# gather_perdb_stats = false
2016-06-01 08:54:18 +02:00
# # Read metrics from one or many mysql servers
# [[inputs.mysql]]
# ## specify servers via a url matching:
# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
# ## e.g.
2017-01-16 17:31:29 +02:00
# ## db_user:passwd@tcp(127.0.0.1:3306)/?tls=false
# ## db_user@tcp(127.0.0.1:3306)/?tls=false
2016-06-01 08:54:18 +02:00
# #
# ## If no servers are specified, then localhost is used as the host.
# servers = ["tcp(127.0.0.1:3306)/"]
# ## the limits for metrics form perf_events_statements
# perf_events_statements_digest_text_limit = 120
# perf_events_statements_limit = 250
# perf_events_statements_time_limit = 86400
# #
# ## if the list is empty, then metrics are gathered from all databasee tables
# table_schema_databases = []
# #
# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
# gather_table_schema = false
# #
# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
# gather_process_list = true
# #
# ## gather auto_increment columns and max values from information schema
# gather_info_schema_auto_inc = true
# #
# ## gather metrics from SHOW SLAVE STATUS command output
# gather_slave_status = true
# #
# ## gather metrics from SHOW BINARY LOGS command output
# gather_binary_logs = false
# #
2017-01-16 17:31:29 +02:00
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
2016-06-01 08:54:18 +02:00
# gather_table_io_waits = false
# #
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
# gather_table_lock_waits = false
# #
2017-01-16 17:31:29 +02:00
# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
2016-06-01 08:54:18 +02:00
# gather_index_io_waits = false
# #
# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
# gather_event_waits = false
# #
# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
# gather_file_events_stats = false
# #
# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
# gather_perf_events_statements = false
# #
# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
# interval_slow = "30m"
# # Read metrics about network interface usage
# [[inputs.net]]
# ## By default, telegraf gathers stats from any up interface (excluding loopback)
# ## Setting interfaces will tell it to gather these explicit interfaces,
# ## regardless of status.
# ##
# # interfaces = ["eth0"]
# # TCP or UDP 'ping' given url and collect response time in seconds
# [[inputs.net_response]]
# ## Protocol, must be "tcp" or "udp"
# protocol = "tcp"
# ## Server address (default localhost)
# address = "github.com:80"
2017-01-16 17:31:29 +02:00
# ## Set timeout
# timeout = "1s"
#
2016-06-01 08:54:18 +02:00
# ## Optional string sent to the server
# # send = "ssh"
# ## Optional expected string in answer
# # expect = "ssh"
2017-01-16 17:31:29 +02:00
# ## Set read timeout (only used if expecting a response)
# read_timeout = "1s"
2016-06-01 08:54:18 +02:00
# # Read TCP metrics such as established, time wait and sockets counts.
# [[inputs.netstat]]
# # no configuration
# # Read Nginx's basic status information (ngx_http_stub_status_module)
# [[inputs.nginx]]
# ## An array of Nginx stub_status URI to gather stats.
# urls = ["http://localhost/status"]
# # Read NSQ topic and channel statistics.
# [[inputs.nsq]]
# ## An array of NSQD HTTP API endpoints
# endpoints = ["http://localhost:4151"]
2017-01-16 17:31:29 +02:00
# # Collect kernel snmp counters and network interface statistics
# [[inputs.nstat]]
# ## file paths for proc files. If empty default paths will be used:
# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
# ## These can also be overridden with env variables, see README.
# proc_net_netstat = "/proc/net/netstat"
# proc_net_snmp = "/proc/net/snmp"
# proc_net_snmp6 = "/proc/net/snmp6"
# ## dump metrics with 0 values too
# dump_zeros = true
2016-06-01 08:54:18 +02:00
# # Get standard NTP query metrics, requires ntpq executable.
# [[inputs.ntpq]]
# ## If false, set the -n ntpq flag. Can reduce metric gather time.
# dns_lookup = true
# # Read metrics of passenger using passenger-status
# [[inputs.passenger]]
# ## Path of passenger-status.
# ##
# ## Plugin gather metric via parsing XML output of passenger-status
# ## More information about the tool:
# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
# ##
# ## If no path is specified, then the plugin simply execute passenger-status
# ## hopefully it can be found in your PATH
# command = "passenger-status -v --show=xml"
# # Read metrics of phpfpm, via HTTP status page or socket
# [[inputs.phpfpm]]
# ## An array of addresses to gather stats about. Specify an ip or hostname
# ## with optional port and path
# ##
# ## Plugin can be configured in three modes (either can be used):
# ## - http: the URL must start with http:// or https://, ie:
# ## "http://localhost/status"
# ## "http://192.168.130.1/status?full"
# ##
# ## - unixsocket: path to fpm socket, ie:
# ## "/var/run/php5-fpm.sock"
# ## or using a custom fpm status path:
# ## "/var/run/php5-fpm.sock:fpm-custom-status-path"
# ##
# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
# ## "fcgi://10.0.0.12:9000/status"
# ## "cgi://10.0.10.12:9001/status"
# ##
# ## Example of multiple gathering from local socket and remove host
# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
# urls = ["http://localhost/status"]
# # Ping given url(s) and return statistics
# [[inputs.ping]]
# ## NOTE: this plugin forks the ping command. You may need to set capabilities
# ## via setcap cap_net_raw+p /bin/ping
# #
# ## urls to ping
# urls = ["www.google.com"] # required
# ## number of pings to send per collection (ping -c <COUNT>)
2017-01-16 17:31:29 +02:00
# # count = 1
2016-06-01 08:54:18 +02:00
# ## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
2017-01-16 17:31:29 +02:00
# # ping_interval = 1.0
# ## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
# # timeout = 1.0
2016-06-01 08:54:18 +02:00
# ## interface to send ping from (ping -I <INTERFACE>)
2017-01-16 17:31:29 +02:00
# # interface = ""
2016-06-01 08:54:18 +02:00
# # Read metrics from one or many postgresql servers
# [[inputs.postgresql]]
# ## specify address via a url matching:
# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
# ## ?sslmode=[disable|verify-ca|verify-full]
# ## or a simple string:
# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
# ##
# ## All connection parameters are optional.
# ##
# ## Without the dbname parameter, the driver will default to a database
# ## with the same name as the user. This dbname is just for instantiating a
# ## connection with the server and doesn't restrict the databases we are trying
# ## to grab metrics for.
# ##
# address = "host=localhost user=postgres sslmode=disable"
2017-01-16 17:31:29 +02:00
#
# ## A list of databases to explicitly ignore. If not specified, metrics for all
# ## databases are gathered. Do NOT use with the 'databases' option.
# # ignored_databases = ["postgres", "template0", "template1"]
#
2016-06-01 08:54:18 +02:00
# ## A list of databases to pull metrics about. If not specified, metrics for all
2017-01-16 17:31:29 +02:00
# ## databases are gathered. Do NOT use with the 'ignore_databases' option.
2016-06-01 08:54:18 +02:00
# # databases = ["app_production", "testing"]
# # Read metrics from one or many postgresql servers
# [[inputs.postgresql_extensible]]
# ## specify address via a url matching:
# ## postgres://[pqgotest[:password]]@localhost[/dbname]\
# ## ?sslmode=[disable|verify-ca|verify-full]
# ## or a simple string:
# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
# #
# ## All connection parameters are optional. #
# ## Without the dbname parameter, the driver will default to a database
# ## with the same name as the user. This dbname is just for instantiating a
# ## connection with the server and doesn't restrict the databases we are trying
# ## to grab metrics for.
# #
# address = "host=localhost user=postgres sslmode=disable"
# ## A list of databases to pull metrics about. If not specified, metrics for all
# ## databases are gathered.
# ## databases = ["app_production", "testing"]
# #
# # outputaddress = "db01"
# ## A custom name for the database that will be used as the "server" tag in the
# ## measurement output. If not specified, a default one generated from
# ## the connection address is used.
# #
# ## Define the toml config where the sql queries are stored
# ## New queries can be added, if the withdbname is set to true and there is no
# ## databases defined in the 'databases field', the sql query is ended by a
# ## 'is not null' in order to make the query succeed.
# ## Example :
# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
# ## because the databases variable was set to ['postgres', 'pgbench' ] and the
# ## withdbname was true. Be careful that if the withdbname is set to false you
# ## don't have to define the where clause (aka with the dbname) the tagvalue
# ## field is used to define custom tags (separated by commas)
# ## The optional "measurement" value can be used to override the default
# ## output measurement name ("postgresql").
# #
# ## Structure :
# ## [[inputs.postgresql_extensible.query]]
# ## sqlquery string
# ## version string
# ## withdbname boolean
# ## tagvalue string (comma separated)
# ## measurement string
# [[inputs.postgresql_extensible.query]]
# sqlquery="SELECT * FROM pg_stat_database"
# version=901
# withdbname=false
# tagvalue=""
# measurement=""
# [[inputs.postgresql_extensible.query]]
# sqlquery="SELECT * FROM pg_stat_bgwriter"
# version=901
# withdbname=false
# tagvalue="postgresql.stats"
# # Read metrics from one or many PowerDNS servers
# [[inputs.powerdns]]
# ## An array of sockets to gather stats about.
# ## Specify a path to unix socket.
# unix_sockets = ["/var/run/pdns.controlsocket"]
# # Monitor process cpu and memory usage
# [[inputs.procstat]]
# ## Must specify one of: pid_file, exe, or pattern
# ## PID file to monitor process
# pid_file = "/var/run/nginx.pid"
# ## executable name (ie, pgrep <exe>)
# # exe = "nginx"
# ## pattern as argument for pgrep (ie, pgrep -f <pattern>)
# # pattern = "nginx"
# ## user as argument for pgrep (ie, pgrep -u <user>)
# # user = "nginx"
2017-01-16 17:31:29 +02:00
#
# ## override for process_name
# ## This is optional; default is sourced from /proc/<pid>/status
# # process_name = "bar"
2016-06-01 08:54:18 +02:00
# ## Field name prefix
# prefix = ""
# ## comment this out if you want raw cpu_time stats
# fielddrop = ["cpu_time_*"]
# # Read metrics from one or many prometheus clients
# [[inputs.prometheus]]
# ## An array of urls to scrape metrics from.
# urls = ["http://localhost:9100/metrics"]
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Use bearer token for authorization
# # bearer_token = /path/to/bearer/token
2017-01-16 17:31:29 +02:00
#
# ## Optional SSL Config
# # ssl_ca = /path/to/cafile
# # ssl_cert = /path/to/certfile
# # ssl_key = /path/to/keyfile
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
2016-06-01 08:54:18 +02:00
# # Reads last_run_summary.yaml file and converts to measurments
# [[inputs.puppetagent]]
# ## Location of puppet last run summary file
# location = "/var/lib/puppet/state/last_run_summary.yaml"
# # Read metrics from one or many RabbitMQ servers via the management API
# [[inputs.rabbitmq]]
2017-01-16 17:31:29 +02:00
# # url = "http://localhost:15672"
2016-06-01 08:54:18 +02:00
# # name = "rmq-server-1" # optional tag
# # username = "guest"
# # password = "guest"
2017-01-16 17:31:29 +02:00
#
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
#
2016-06-01 08:54:18 +02:00
# ## A list of nodes to pull metrics about. If not specified, metrics for
# ## all nodes are gathered.
# # nodes = ["rabbit@node1", "rabbit@node2"]
# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
# [[inputs.raindrops]]
# ## An array of raindrops middleware URI to gather stats.
# urls = ["http://localhost:8080/_raindrops"]
# # Read metrics from one or many redis servers
# [[inputs.redis]]
# ## specify servers via a url matching:
# ## [protocol://][:password]@address[:port]
# ## e.g.
# ## tcp://localhost:6379
# ## tcp://:password@192.168.99.100
2017-01-16 17:31:29 +02:00
# ## unix:///var/run/redis.sock
2016-06-01 08:54:18 +02:00
# ##
# ## If no servers are specified, then localhost is used as the host.
# ## If no port is specified, 6379 is used
# servers = ["tcp://localhost:6379"]
# # Read metrics from one or many RethinkDB servers
# [[inputs.rethinkdb]]
# ## An array of URI to gather stats about. Specify an ip or hostname
# ## with optional port add password. ie,
# ## rethinkdb://user:auth_key@10.10.3.30:28105,
# ## rethinkdb://10.10.3.33:18832,
# ## 10.0.0.1:10000, etc.
# servers = ["127.0.0.1:28015"]
# # Read metrics one or many Riak servers
# [[inputs.riak]]
# # Specify a list of one or more riak http servers
# servers = ["http://localhost:8098"]
2017-01-16 17:31:29 +02:00
# # Retrieves SNMP values from remote agents
2016-06-01 08:54:18 +02:00
# [[inputs.snmp]]
2017-01-16 17:31:29 +02:00
# agents = [ "127.0.0.1:161" ]
# ## Timeout for each SNMP query.
# timeout = "5s"
# ## Number of retries to attempt within timeout.
# retries = 3
# ## SNMP version, values can be 1, 2, or 3
# version = 2
#
# ## SNMP community string.
# community = "public"
#
# ## The GETBULK max-repetitions parameter
# max_repetitions = 10
#
# ## SNMPv3 auth parameters
# #sec_name = "myuser"
# #auth_protocol = "md5" # Values: "MD5", "SHA", ""
# #auth_password = "pass"
# #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
# #context_name = ""
# #priv_protocol = "" # Values: "DES", "AES", ""
# #priv_password = ""
#
# ## measurement name
# name = "system"
# [[inputs.snmp.field]]
# name = "hostname"
# oid = ".1.0.0.1.1"
# [[inputs.snmp.field]]
# name = "uptime"
# oid = ".1.0.0.1.2"
# [[inputs.snmp.field]]
# name = "load"
# oid = ".1.0.0.1.3"
# [[inputs.snmp.field]]
# oid = "HOST-RESOURCES-MIB::hrMemorySize"
#
# [[inputs.snmp.table]]
# ## measurement name
# name = "remote_servers"
# inherit_tags = [ "hostname" ]
# [[inputs.snmp.table.field]]
# name = "server"
# oid = ".1.0.0.0.1.0"
# is_tag = true
# [[inputs.snmp.table.field]]
# name = "connections"
# oid = ".1.0.0.0.1.1"
# [[inputs.snmp.table.field]]
# name = "latency"
# oid = ".1.0.0.0.1.2"
#
# [[inputs.snmp.table]]
# ## auto populate table's fields using the MIB
# oid = "HOST-RESOURCES-MIB::hrNetworkTable"
# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
# [[inputs.snmp_legacy]]
2016-06-01 08:54:18 +02:00
# ## Use 'oids.txt' file to translate oids to names
# ## To generate 'oids.txt' you need to run:
# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
# ## Or if you have an other MIB folder with custom MIBs
# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
# snmptranslate_file = "/tmp/oids.txt"
# [[inputs.snmp.host]]
# address = "192.168.2.2:161"
# # SNMP community
# community = "public" # default public
# # SNMP version (1, 2 or 3)
# # Version 3 not supported yet
# version = 2 # default 2
# # SNMP response timeout
# timeout = 2.0 # default 2.0
# # SNMP request retries
# retries = 2 # default 2
# # Which get/bulk do you want to collect for this host
# collect = ["mybulk", "sysservices", "sysdescr"]
# # Simple list of OIDs to get, in addition to "collect"
# get_oids = []
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# [[inputs.snmp.host]]
# address = "192.168.2.3:161"
# community = "public"
# version = 2
# timeout = 2.0
# retries = 2
# collect = ["mybulk"]
# get_oids = [
# "ifNumber",
# ".1.3.6.1.2.1.1.3.0",
# ]
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# [[inputs.snmp.get]]
# name = "ifnumber"
# oid = "ifNumber"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# [[inputs.snmp.get]]
# name = "interface_speed"
# oid = "ifSpeed"
# instance = "0"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# [[inputs.snmp.get]]
# name = "sysuptime"
# oid = ".1.3.6.1.2.1.1.3.0"
# unit = "second"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# [[inputs.snmp.bulk]]
# name = "mybulk"
# max_repetition = 127
# oid = ".1.3.6.1.2.1.1"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# [[inputs.snmp.bulk]]
# name = "ifoutoctets"
# max_repetition = 127
# oid = "ifOutOctets"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# [[inputs.snmp.host]]
# address = "192.168.2.13:161"
# #address = "127.0.0.1:161"
# community = "public"
# version = 2
# timeout = 2.0
# retries = 2
# #collect = ["mybulk", "sysservices", "sysdescr", "systype"]
# collect = ["sysuptime" ]
# [[inputs.snmp.host.table]]
# name = "iftable3"
# include_instances = ["enp5s0", "eth1"]
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# # SNMP TABLEs
# # table without mapping neither subtables
# [[inputs.snmp.table]]
# name = "iftable1"
# oid = ".1.3.6.1.2.1.31.1.1.1"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# # table without mapping but with subtables
# [[inputs.snmp.table]]
# name = "iftable2"
# oid = ".1.3.6.1.2.1.31.1.1.1"
# sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# # table with mapping but without subtables
# [[inputs.snmp.table]]
# name = "iftable3"
# oid = ".1.3.6.1.2.1.31.1.1.1"
# # if empty. get all instances
# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
# # if empty, get all subtables
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# # table with both mapping and subtables
# [[inputs.snmp.table]]
# name = "iftable4"
# oid = ".1.3.6.1.2.1.31.1.1.1"
# # if empty get all instances
# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
# # if empty get all subtables
# # sub_tables could be not "real subtables"
# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
# # Read metrics from Microsoft SQL Server
# [[inputs.sqlserver]]
# ## Specify instances to monitor with a list of connection strings.
# ## All connection parameters are optional.
# ## By default, the host is localhost, listening on default port, TCP 1433.
# ## for Windows, the user is the currently running AD user (SSO).
# ## See https://github.com/denisenkom/go-mssqldb for detailed connection
# ## parameters.
# # servers = [
# # "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
# # ]
# # Inserts sine and cosine waves for demonstration purposes
# [[inputs.trig]]
# ## Set the amplitude
# amplitude = 10.0
# # Read Twemproxy stats data
# [[inputs.twemproxy]]
# ## Twemproxy stats address and port (no scheme)
# addr = "localhost:22222"
# ## Monitor pool name
# pools = ["redis_pool", "mc_pool"]
2017-01-16 17:31:29 +02:00
# # A plugin to collect stats from Varnish HTTP Cache
# [[inputs.varnish]]
# ## The default location of the varnishstat binary can be overridden with:
# binary = "/usr/bin/varnishstat"
#
# ## By default, telegraf gather stats for 3 metric points.
# ## Setting stats will override the defaults shown below.
# ## Glob matching can be used, ie, stats = ["MAIN.*"]
# ## stats may also be set to ["*"], which will collect all stats
# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
2016-06-01 08:54:18 +02:00
# [[inputs.zfs]]
2017-01-16 17:31:29 +02:00
# ## ZFS kstat path. Ignored on FreeBSD
2016-06-01 08:54:18 +02:00
# ## If not specified, then default is:
2017-01-16 17:31:29 +02:00
# # kstatPath = "/proc/spl/kstat/zfs"
#
2016-06-01 08:54:18 +02:00
# ## By default, telegraf gather all zfs stats
# ## If not specified, then default is:
2017-01-16 17:31:29 +02:00
# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
#
2016-06-01 08:54:18 +02:00
# ## By default, don't gather zpool stats
2017-01-16 17:31:29 +02:00
# # poolMetrics = false
2016-06-01 08:54:18 +02:00
# # Reads 'mntr' stats from one or many zookeeper servers
# [[inputs.zookeeper]]
# ## An array of address to gather stats about. Specify an ip or hostname
# ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## If no servers are specified, then localhost is used as the host.
# ## If no port is specified, 2181 is used
# servers = [":2181"]
###############################################################################
# SERVICE INPUT PLUGINS #
###############################################################################
2017-01-16 17:31:29 +02:00
# # Influx HTTP write listener
# [[inputs.http_listener]]
# ## Address and port to host HTTP listener on
# service_address = ":8186"
#
# ## maximum duration before timing out read of the request
# read_timeout = "10s"
# ## maximum duration before timing out write of the response
# write_timeout = "10s"
#
# ## Maximum allowed http request body size in bytes.
# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes)
# max_body_size = 0
#
# ## Maximum line size allowed to be sent in bytes.
# ## 0 means to use the default of 65536 bytes (64 kibibytes)
# max_line_size = 0
2016-06-01 08:54:18 +02:00
# # Read metrics from Kafka topic(s)
# [[inputs.kafka_consumer]]
# ## topic(s) to consume
# topics = ["telegraf"]
# ## an array of Zookeeper connection strings
# zookeeper_peers = ["localhost:2181"]
# ## Zookeeper Chroot
2017-01-16 17:31:29 +02:00
# zookeeper_chroot = ""
2016-06-01 08:54:18 +02:00
# ## the name of the consumer group
# consumer_group = "telegraf_metrics_consumers"
# ## Offset (must be either "oldest" or "newest")
# offset = "oldest"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
2017-01-16 17:31:29 +02:00
# # Stream and parse log file(s).
# [[inputs.logparser]]
# ## Log files to parse.
# ## These accept standard unix glob matching rules, but with the addition of
# ## ** as a "super asterisk". ie:
# ## /var/log/**.log -> recursively find all .log files in /var/log
# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
# ## /var/log/apache.log -> only tail the apache log file
# files = ["/var/log/apache/access.log"]
# ## Read file from beginning.
# from_beginning = false
#
# ## Parse logstash-style "grok" patterns:
# ## Telegraf built-in parsing patterns: https://goo.gl/dkay10
# [inputs.logparser.grok]
# ## This is a list of patterns to check the given log file(s) for.
# ## Note that adding patterns here increases processing time. The most
# ## efficient configuration is to have one pattern per logparser.
# ## Other common built-in patterns are:
# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
# patterns = ["%{COMBINED_LOG_FORMAT}"]
# ## Name of the outputted measurement name.
# measurement = "apache_access_log"
# ## Full path(s) to custom pattern files.
# custom_pattern_files = []
# ## Custom patterns can also be defined here. Put one pattern per line.
# custom_patterns = '''
# '''
2016-06-01 08:54:18 +02:00
# # Read metrics from MQTT topic(s)
# [[inputs.mqtt_consumer]]
# servers = ["localhost:1883"]
# ## MQTT QoS, must be 0, 1, or 2
# qos = 0
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Topics to subscribe to
# topics = [
# "telegraf/host01/cpu",
# "telegraf/+/mem",
# "sensors/#",
# ]
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# # if true, messages that can't be delivered while the subscriber is offline
# # will be delivered when it comes back (such as on service restart).
# # NOTE: if true, client_id MUST be set
# persistent_session = false
# # If empty, a random client ID will be generated.
# client_id = ""
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## username and password to connect MQTT server.
# # username = "telegraf"
# # password = "metricsmetricsmetricsmetrics"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Optional SSL Config
# # ssl_ca = "/etc/telegraf/ca.pem"
# # ssl_cert = "/etc/telegraf/cert.pem"
# # ssl_key = "/etc/telegraf/key.pem"
# ## Use SSL but skip chain & host verification
# # insecure_skip_verify = false
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
# # Read metrics from NATS subject(s)
# [[inputs.nats_consumer]]
# ## urls of NATS servers
2017-01-16 17:31:29 +02:00
# # servers = ["nats://localhost:4222"]
2016-06-01 08:54:18 +02:00
# ## Use Transport Layer Security
2017-01-16 17:31:29 +02:00
# # secure = false
2016-06-01 08:54:18 +02:00
# ## subject(s) to consume
2017-01-16 17:31:29 +02:00
# # subjects = ["telegraf"]
2016-06-01 08:54:18 +02:00
# ## name a queue group
2017-01-16 17:31:29 +02:00
# # queue_group = "telegraf_consumers"
#
# ## Sets the limits for pending msgs and bytes for each subscription
# ## These shouldn't need to be adjusted except in very high throughput scenarios
# # pending_message_limit = 65536
# # pending_bytes_limit = 67108864
#
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
# # Read NSQ topic for metrics.
# [[inputs.nsq_consumer]]
# ## An string representing the NSQD TCP Endpoint
# server = "localhost:4150"
# topic = "telegraf"
# channel = "consumer"
# max_in_flight = 100
#
2016-06-01 08:54:18 +02:00
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
# # Statsd Server
# [[inputs.statsd]]
# ## Address and port to host UDP listener on
# service_address = ":8125"
# ## Delete gauges every interval (default=false)
# delete_gauges = false
# ## Delete counters every interval (default=false)
# delete_counters = false
# ## Delete sets every interval (default=false)
# delete_sets = false
# ## Delete timings & histograms every interval (default=true)
# delete_timings = true
# ## Percentiles to calculate for timing & histogram stats
# percentiles = [90]
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## separator to use between elements of a statsd metric
# metric_separator = "_"
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Parses tags in the datadog statsd format
# ## http://docs.datadoghq.com/guides/dogstatsd/
# parse_data_dog_tags = false
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Statsd data translation templates, more info can be read here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite
# # templates = [
# # "cpu.* measurement*"
# # ]
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Number of UDP messages allowed to queue up, once filled,
# ## the statsd server will start dropping packets
# allowed_pending_messages = 10000
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Number of timing/histogram values to track per-measurement in the
# ## calculation of percentiles. Raising this limit increases the accuracy
# ## of percentiles but also increases the memory usage and cpu time.
# percentile_limit = 1000
# # Stream a log file, like the tail -f command
# [[inputs.tail]]
# ## files to tail.
# ## These accept standard unix glob matching rules, but with the addition of
# ## ** as a "super asterisk". ie:
# ## "/var/log/**.log" -> recursively find all .log files in /var/log
# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
# ## "/var/log/apache.log" -> just tail the apache log file
# ##
# ## See https://github.com/gobwas/glob for more examples
# ##
# files = ["/var/mymetrics.out"]
# ## Read file from beginning.
# from_beginning = false
2017-01-16 17:31:29 +02:00
#
2016-06-01 08:54:18 +02:00
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
# # Generic TCP listener
# [[inputs.tcp_listener]]
# ## Address and port to host TCP listener on
2017-01-16 17:31:29 +02:00
# # service_address = ":8094"
#
2016-06-01 08:54:18 +02:00
# ## Number of TCP messages allowed to queue up. Once filled, the
# ## TCP listener will start dropping packets.
2017-01-16 17:31:29 +02:00
# # allowed_pending_messages = 10000
#
2016-06-01 08:54:18 +02:00
# ## Maximum number of concurrent TCP connections to allow
2017-01-16 17:31:29 +02:00
# # max_tcp_connections = 250
#
2016-06-01 08:54:18 +02:00
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
# # Generic UDP listener
# [[inputs.udp_listener]]
# ## Address and port to host UDP listener on
2017-01-16 17:31:29 +02:00
# # service_address = ":8092"
#
2016-06-01 08:54:18 +02:00
# ## Number of UDP messages allowed to queue up. Once filled, the
# ## UDP listener will start dropping packets.
2017-01-16 17:31:29 +02:00
# # allowed_pending_messages = 10000
#
2016-06-01 08:54:18 +02:00
# ## Data format to consume.
# ## Each data format has it's own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
2017-01-16 17:31:29 +02:00
# # A Webhooks Event collector
# [[inputs.webhooks]]
# ## Address and port to host Webhook listener on
# service_address = ":1619"
#
# [inputs.webhooks.filestack]
# path = "/filestack"
#
# [inputs.webhooks.github]
# path = "/github"
#
# [inputs.webhooks.mandrill]
# path = "/mandrill"
#
# [inputs.webhooks.rollbar]
# path = "/rollbar"