docker-compose多伺服器部署kafka叢集

2023-06-21 18:01:23
  • Kafka 是一個開源的分散式事件流平臺,依賴Zookeeper或者KRaft,本文基於Zookeeper。

伺服器IP設定

本文使用三個伺服器來做叢集搭建,IP如下:

nodeName IP
node1 10.10.210.96
node2 10.10.210.97
node3 10.10.210.98

部署zookeeper

  • 工作目錄為/home/zookeeper

node1設定

目錄結構

- zookeeper
  - config
    - zoo.cfg
  - docker-compose.yml

zoo.cfg

dataDir=/data
dataLogDir=/datalog
tickTime=2000
initLimit=5
syncLimit=2
clientPort:2181
server.1=127.0.0.1:2888:3888
server.2=10.10.210.97:2888:3888
server.3=10.10.210.98:2888:3888

docker-compose.yml

version: '3'
services:
  zookeeper:
    image: zookeeper:3.7.0
    restart: always
    hostname: zookeeper-node-1
    container_name: zookeeper
    ports:
    - 2181:2181
    - 2888:2888
    - 3888:3888
    - 8080:8080
    volumes:
    - ./data:/data
    - ./datalog:/datalog
    - ./config/zoo.cfg:/conf/zoo.cfg
    environment:
      ZOO_MY_ID: 1

node2設定

目錄結構

- zookeeper
  - config
    - zoo.cfg
  - docker-compose.yml

zoo.cfg

dataDir=/data
dataLogDir=/datalog
tickTime=2000
initLimit=5
syncLimit=2
clientPort:2181
server.1=10.10.210.96:2888:3888
server.2=127.0.0.1:2888:3888
server.3=10.10.210.98:2888:3888

docker-compose.yml

version: '3'
services:
  zookeeper:
    image: zookeeper:3.7.0
    restart: always
    hostname: zookeeper-node-2
    container_name: zookeeper
    ports:
    - 2181:2181
    - 2888:2888
    - 3888:3888
    - 8080:8080
    volumes:
    - ./data:/data
    - ./datalog:/datalog
    - ./config/zoo.cfg:/conf/zoo.cfg
    environment:
      ZOO_MY_ID: 2

node3設定

目錄結構

- zookeeper
  - config
    - zoo.cfg
  - docker-compose.yml

zoo.cfg

dataDir=/data
dataLogDir=/datalog
tickTime=2000
initLimit=5
syncLimit=2
clientPort:2181
server.1=10.10.210.96:2888:3888
server.2=10.10.210.97:2888:3888
server.3=127.0.0.1:2888:3888

docker-compose.yml

version: '3'
services:
  zookeeper:
    image: zookeeper:3.7.0
    restart: always
    hostname: zookeeper-node-3
    container_name: zookeeper
    ports:
    - 2181:2181
    - 2888:2888
    - 3888:3888
    - 8080:8080
    volumes:
    - ./data:/data
    - ./datalog:/datalog
    - ./config/zoo.cfg:/conf/zoo.cfg
    environment:
      ZOO_MY_ID: 3
  • 在對應伺服器的/home/zookeeper執行 docker-compose up -d 啟動三個Zookeeper服務,通過docker-compose logs -f觀察啟動紀錄檔
  • ZOO_MY_ID 對應zookeeper的id,多臺伺服器需設定不同,對應zoo.cfg的server.1,其中.1 就是對應的ZOO_MY_ID
  • zoo.cfg設定資訊具體可參考 Zookeeper部署和管理指南

部署kafka

  • 工作目錄為/home/kafka

node1設定

目錄結構

- kafka
  - docker-compose.yml
  - config/server.properties

docker-compose.yml

version: '3'
services:
  kafka:
    image: bitnami/kafka:3.0.0
    restart: always
    hostname: kafka-node-1
    container_name: kafka
    ports:
    - 9092:9092
    - 9999:9999
    volumes:
    - ./logs:/opt/bitnami/kafka/logs
    - ./data:/bitnami/kafka/data
    - ./config/server.properties:/opt/bitnami/kafka/config/server.properties

server.properties

broker.id=1
listeners=PLAINTEXT://:9092
advertised.listeners=PLAINTEXT://10.10.210.96:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/bitnami/kafka/data
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=10.10.210.96:2181,10.10.210.97:2181,10.10.210.98:2181
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0
auto.create.topics.enable=true
max.partition.fetch.bytes=1048576
max.request.size=1048576
sasl.enabled.mechanisms=PLAIN,SCRAM-SHA-256,SCRAM-SHA-512
sasl.mechanism.inter.broker.protocol=

node2設定

目錄結構

- kafka
  - docker-compose.yml
  - config/server.properties

docker-compose.yml

version: '3'
services:
  kafka:
    image: bitnami/kafka:3.0.0
    restart: always
    hostname: kafka-node-2
    container_name: kafka
    ports:
    - 9092:9092
    - 9999:9999
    volumes:
    - ./logs:/opt/bitnami/kafka/logs
    - ./data:/bitnami/kafka/data
    - ./config/server.properties:/opt/bitnami/kafka/config/server.properties

server.properties

broker.id=2
listeners=PLAINTEXT://:9092
advertised.listeners=PLAINTEXT://10.10.210.97:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/bitnami/kafka/data
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=10.10.210.96:2181,10.10.210.97:2181,10.10.210.98:2181
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0
auto.create.topics.enable=true
max.partition.fetch.bytes=1048576
max.request.size=1048576
sasl.enabled.mechanisms=PLAIN,SCRAM-SHA-256,SCRAM-SHA-512
sasl.mechanism.inter.broker.protocol=

node3設定

目錄結構

- kafka
  - docker-compose.yml
  - config/server.properties

docker-compose.yml

version: '3'
services:
  kafka:
    image: bitnami/kafka:3.0.0
    restart: always
    hostname: kafka-node-3
    container_name: kafka
    ports:
    - 9092:9092
    - 9999:9999
    volumes:
    - ./logs:/opt/bitnami/kafka/logs
    - ./data:/bitnami/kafka/data
    - ./config/server.properties:/opt/bitnami/kafka/config/server.properties

server.properties

broker.id=3
listeners=PLAINTEXT://:9092
advertised.listeners=PLAINTEXT://10.10.210.98:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/bitnami/kafka/data
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=10.10.210.96:2181,10.10.210.97:2181,10.10.210.98:2181
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0
auto.create.topics.enable=true
max.partition.fetch.bytes=1048576
max.request.size=1048576
sasl.enabled.mechanisms=PLAIN,SCRAM-SHA-256,SCRAM-SHA-512
sasl.mechanism.inter.broker.protocol=
  • 在對應伺服器的/home/kafka執行 docker-compose up -d 啟動三個Kafka服務,通過docker-compose logs -f觀察啟動紀錄檔
  • server.properties設定資訊具體可參考 Kafka Broker Configs

kafka測試使用

  • 通過offset explorer測試連線kafka是否可用。

後記

  • 如果想要簡單設定的情況下,可以通過environment的方式啟動kafka,參考如下:

docker-compose.yml

version: '3'
services:
  kafka:
    image: bitnami/kafka:3.0.0
    restart: always
    hostname: kafka-node
    container_name: kafka
    ports:
    - 9092:9092
    - 9999:9999
    environment:
      - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://10.10.210.96:9092
      - KAFKA_ADVERTISED_HOST_NAME=10.10.210.96
      - KAFKA_ADVERTISED_PORT=9092
      - KAFKA_ZOOKEEPER_CONNECT=10.10.210.96:2181,10.10.210.97:2181,10.10.210.98:2181
      - ALLOW_PLAINTEXT_LISTENER=yes
      - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092
      - JMX_PORT=9999 
    volumes:
    - ./logs:/opt/bitnami/kafka/logs
    - ./data:/bitnami/kafka/data