-
Notifications
You must be signed in to change notification settings - Fork 0
/
server.properties.erb
120 lines (87 loc) · 4.7 KB
/
server.properties.erb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
############################# Server Basics #############################
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=<%= @broker_id %>
############################# Socket Server Settings #############################
# The port the socket server listens on
port=<%= @ports[0] %>
# Hostname the broker will bind to and advertise to producers and consumers.
# If not set, the server will bind to all interfaces and advertise the value returned from
# from java.net.InetAddress.getCanonicalHostName().
host.name=<%= @hostname %>
# The number of threads handling network requests
num.network.threads=16
# The number of threads doing disk I/O
num.io.threads=4
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=8388608
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=8388608
# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600
############################# Log Basics #############################
# A comma seperated list of directories under which to store log files
log.dirs=/kafka/kafka-logs
# The number of logical partitions per topic per server. More partitions allow greater parallelism
# for consumption, but also mean more files.
num.partitions=48
############################# Log Flush Policy #############################
# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
# 1. Durability: Unflushed data may be lost if you are not using replication.
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
# The number of messages to accept before forcing a flush of data to disk
log.flush.interval.messages=100000
# The maximum amount of time a message can sit in a log before we force a flush
log.flush.interval.ms=10000
log.flush.scheduler.interval.ms=10000
############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion
log.retention.hours=36
# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
# segments don't drop below log.retention.bytes.
#log.retention.bytes=1073741824
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=268435456
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.cleanup.interval.mins=1
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=<%= @zk_servers %>/kafka-<%= @cluster %>
# Timeout in ms for connecting to zookeeper
zookeeper.session.timeout.ms=6000
zookeeper.connection.timeout.ms=6000
zookeeper.sync.time.ms=2000
log.cleanup.policy=delete
#kafka.graphite.metrics.host=graphite-di.musta.ch
#kafka.graphite.metrics.prefix=kafka.<%= @cluster %>.<%= @broker_id %>
#kafka.graphite.metrics.reporter.enabled=true
#kafka.metrics.reporters=kafka.metrics.KafkaGraphiteMetricsReporter
#kafka.metrics.polling.interval.secs=60
controlled.shutdown.enable=true
replica.fetch.wait.max.ms=5000
replica.fetch.max.bytes=8388608
replica.socket.receive.buffer.bytes=8388608
replica.lag.max.messages=10000
replica.lag.time.max.ms=30000
replica.high.watermark.checkpoint.interval.ms=5000
replica.socket.timeout.ms=20000
num.replica.fetchers=3
controller.socket.timeout.ms=20000
controller.message.queue.size=10
auto.leader.rebalance.enable=true
message.max.bytes=8000000
fetch.purgatory.purge.interval.requests=1000
producer.purgatory.purge.interval.requests=1000