mirror of
https://github.com/vimagick/dockerfiles.git
synced 2024-12-23 01:39:27 +02:00
220 lines
7.9 KiB
YAML
220 lines
7.9 KiB
YAML
# TCP address that gRPC API server should listen on.
|
|
grpc_addr: 0.0.0.0:19091
|
|
|
|
# TCP address that RESTful API server should listen on.
|
|
tcp_addr: 0.0.0.0:19092
|
|
|
|
# Unix domain socket address that RESTful API server should listen on.
|
|
# Listening on a unix domain socket is disabled by default.
|
|
# unix_addr: "/var/run/kafka-pixy.sock"
|
|
|
|
# A map of cluster names to respective proxy configurations. The first proxy
|
|
# in the map is considered to be `default`. It is used in API calls that do not
|
|
# specify cluster name explicitly.
|
|
proxies:
|
|
|
|
# Name of a Kafka+ZooKeeper cluster. The only requirement to the name is that
|
|
# it should be unique in this config file. The value of this parameter is
|
|
# a configuration of a proxy to access the cluster.
|
|
default:
|
|
|
|
# Unique ID that identifies a Kafka-Pixy instance in both ZooKeeper and
|
|
# Kafka. It is automatically generated by default and it is recommended to
|
|
# leave it like that.
|
|
# client_id: AUTOGENERATED
|
|
|
|
# Kafka parameters section.
|
|
kafka:
|
|
|
|
# List of seed Kafka peers that Kafka-Pixy should access to resolve the
|
|
# Kafka cluster topology.
|
|
seed_peers:
|
|
- kafka:9092
|
|
|
|
# Version of the Kafka cluster. Supported versions are 0.10.2.1 - 2.0.0
|
|
version: 2.0.0
|
|
|
|
# Enable TLS when connecting to the Kafka cluster
|
|
tls: false
|
|
|
|
# The filepath to the CA root certificate
|
|
# ca_certificate_file:
|
|
|
|
# The filepath to the client certificate
|
|
# client_certificate_file:
|
|
|
|
# The filepath to the client certificate key
|
|
# client_key_file:
|
|
|
|
# Disable hostname verification
|
|
# insecure: false
|
|
|
|
# Networking parameters section. These all pass through to sarama's
|
|
# `config.Net` field.
|
|
net:
|
|
|
|
# How long to wait for the initial connection.
|
|
dial_timeout: 30s
|
|
|
|
# How long to wait for a response.
|
|
read_timeout: 30s
|
|
|
|
# How long to wait for a transmit.
|
|
write_timeout: 30s
|
|
|
|
# ZooKeeper parameters section.
|
|
zoo_keeper:
|
|
|
|
# List of seed ZooKeeper peers that Kafka-Pixy should access to resolve the
|
|
# ZooKeeper cluster topology.
|
|
seed_peers:
|
|
- zookeeper:2181
|
|
|
|
# A root directory in ZooKeeper to store consumers data.
|
|
# chroot: ""
|
|
|
|
# ZooKeeper session timeout has to be a minimum of 2 times the tickTime
|
|
# (as set in the server configuration) and a maximum of 20 times the
|
|
# tickTime. The default ZooKeeper tickTime is 2 seconds.
|
|
#
|
|
# See http://zookeeper.apache.org/doc/trunk/zookeeperProgrammers.html#ch_zkSessions
|
|
session_timeout: 15s
|
|
|
|
# Producer parameters section.
|
|
producer:
|
|
|
|
# Size of all buffered channels created by the producer module.
|
|
channel_buffer_size: 4096
|
|
|
|
# The maximum permitted size of a message (defaults to 1000000). Should be
|
|
# set equal to or smaller than the broker's `message.max.bytes`.
|
|
max_message_bytes: 1000000
|
|
|
|
# The type of compression to use on messages. Allowed values are:
|
|
# none, gzip, snappy, and lz4.
|
|
compression: none
|
|
|
|
# The best-effort number of bytes needed to trigger a flush.
|
|
flush_bytes: 1048576
|
|
|
|
# The best-effort frequency of flushes.
|
|
flush_frequency: 500ms
|
|
|
|
# How long to wait for the cluster to settle between retries.
|
|
retry_backoff: 10s
|
|
|
|
# The total number of times to retry sending a message before giving up.
|
|
retry_max: 6
|
|
|
|
# The level of acknowledgement reliability needed from the broker.
|
|
# Allowed values are:
|
|
# * no_response: the broker doesn't send any response, the TCP ACK
|
|
# is all you get.
|
|
# * wait_for_local: the broker responds as soon as it commits to the
|
|
# local disk.
|
|
# * wait_for_all: the broker waits for all in-sync replicas to commit
|
|
# before responding.
|
|
required_acks: wait_for_all
|
|
|
|
# Period of time that Kafka-Pixy should keep trying to submit buffered
|
|
# messages to Kafka. It is recommended to make it large enough to survive
|
|
# a ZooKeeper leader election in your setup.
|
|
shutdown_timeout: 30s
|
|
|
|
# How to assign incoming messages to a Kafka partition. Defaults to using
|
|
# a hash of the specified message key, or random if the key is
|
|
# unspecified. Allowed values are:
|
|
# * hash: for messages with a key, take the FNV-1a hash of the
|
|
# bytes, modulus the number of partitions; otherwise use a
|
|
# random partition.
|
|
# * random: all messages are published to a random partition.
|
|
# * roundrobin: iterate over partitions sequentially
|
|
partitioner: hash
|
|
|
|
# The timeout to specify on individual produce requests to the broker. The
|
|
# broker will wait for replication to complete up to this duration before
|
|
# returning an error.
|
|
timeout: 10s
|
|
|
|
# Consumer parameters section.
|
|
consumer:
|
|
|
|
# If set, Kafka-Pixy will not configure a consumer, and any attempts to
|
|
# call the consumer APIs will return an error.
|
|
disabled: false
|
|
|
|
# Period of time that Kafka-Pixy should wait for an acknowledgement
|
|
# before retrying.
|
|
ack_timeout: 5m
|
|
|
|
# Size of all buffered channels created by the consumer module.
|
|
channel_buffer_size: 64
|
|
|
|
# The number of bytes of messages to attempt to fetch for each
|
|
# topic-partition in each fetch request. These bytes will be read into
|
|
# memory for each partition, so this helps control the memory used by
|
|
# the consumer. The fetch request size must be at least as large as
|
|
# the maximum message size the server allows or else it is possible
|
|
# for the producer to send messages larger than the consumer can fetch.
|
|
fetch_max_bytes: 1048576
|
|
|
|
# The maximum amount of time the server will block before answering
|
|
# the fetch request if there isn't data immediately available.
|
|
fetch_max_wait: 250ms
|
|
|
|
# Consume request will wait at most this long until for a message from a
|
|
# topic to become available before expiring.
|
|
long_polling_timeout: 3s
|
|
|
|
# The maximum number of unacknowledged messages allowed for a particular
|
|
# group-topic-partition at a time. When this number is reached subsequent
|
|
# consume requests will return long polling timeout errors, until some of
|
|
# the pending messages are acknowledged.
|
|
max_pending_messages: 300
|
|
|
|
# The maximum number of retries Kafka-Pixy will make to offer an
|
|
# unack message. Messages that exceeded the number of retries are
|
|
# discarded by Kafka-Pixy and acknowledged in Kafka. Zero retries
|
|
# means that messages will be offered just once.
|
|
#
|
|
# If you want Kafka-Pixy to retry indefinitely, then set this
|
|
# parameter to -1.
|
|
max_retries: -1
|
|
|
|
# How frequently to commit offsets to Kafka.
|
|
offsets_commit_interval: 500ms
|
|
|
|
# If a request to a Kafka-Pixy fails for any reason, then it should wait this
|
|
# long before retrying.
|
|
retry_backoff: 500ms
|
|
|
|
# Period of time that Kafka-Pixy should keep a subscription for a
|
|
# topic by a group in absence of requests to from the consumer group.
|
|
subscription_timeout: 15s
|
|
|
|
# Configuration for securely accessing the gRPC and web servers
|
|
tls:
|
|
|
|
# Path to the server certificate file.
|
|
# Required if using gRPC SSL/TLS or HTTPS.
|
|
# certificate_path: /usr/local/etc/server.crt
|
|
|
|
# Path to the server certificate key file.
|
|
# Required if using gRPC SSL/TLS or HTTPS.
|
|
# key_path: /usr/local/etc/server.key
|
|
|
|
# A list of defined loggers, multiple loggers are allowed and each log line will be sent to every logger defined.
|
|
logging:
|
|
# Logs to stdout in human readable format
|
|
- name: console
|
|
severity: info
|
|
# # Logs to stdout in a JSON format
|
|
# - name: json
|
|
# severity: info
|
|
# # Logs to kafka topic in JSON format
|
|
# - name: udplog
|
|
# severity: error
|
|
# # Logs to syslog
|
|
# - name: syslog
|
|
# severity: debug
|