溫馨提示×

溫馨提示×

您好,登錄后才能下訂單哦!

密碼登錄×
登錄注冊×
其他方式登錄
點擊 登錄注冊 即表示同意《億速云用戶服務條款》

docke swarm怎么搭建部署zookeeper+kafka

發布時間:2021-08-27 14:48:37 來源:億速云 閱讀:348 作者:chen 欄目:大數據

這篇文章主要講解了“docke swarm怎么搭建部署zookeeper+kafka”,文中的講解內容簡單清晰,易于學習與理解,下面請大家跟著小編的思路慢慢深入,一起來研究和學習“docke swarm怎么搭建部署zookeeper+kafka”吧!

1. 機器準備

   準備三臺機器,IP分別為192.168.10.51、192.168.10.52、192.168.10.53;主機名分別為centos51、centos52、centos53。三臺機器已經準備好docker swarm環境。docker swarm搭建可以參考另一篇文章 docker swarm 集群搭建。

2. 準備鏡像。

    在 https://hub.docker.com/ 上拉取zookeeper、kafka、kafka manager相關鏡像

docker pull zookeeper:3.6.1
docker pull wurstmeister/kafka:2.12-2.5.0
docker pull kafkamanager/kafka-manager:3.0.0.4

3. zookeeper 相關compose準備。

 文件名:docker-stack-zookeeper.yml

version: "3.2"
services:
#zookeeper服務
  zookeeper-server-a:
    hostname: zookeeper-server-a
    image: zookeeper:3.6.1
    ports:
      - "12181:2181"
    networks:
      swarm-net:
        aliases:
          - zookeeper-server-a
    environment:
      TZ: Asia/Shanghai
      ZOO_MY_ID: 1
      ZOO_SERVERS: server.1=zookeeper-server-a:2888:3888;2181 server.2=zookeeper-server-b:2888:3888;2181 server.3=zookeeper-server-c:2888:3888;2181
    volumes:
      - /data/kafka_cluster/zookeeper/data:/data
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints: [node.hostname == centos51]
      resources:
        limits:
#          cpus: '1'
          memory: 1GB
        reservations:
#          cpus: '0.2'
          memory: 512M

  zookeeper-server-b:
    hostname: zookeeper-server-b
    image: zookeeper:3.6.1
    ports:
      - "22181:2181"
    networks:
      swarm-net:
        aliases:
          - zookeeper-server-b
    environment:
      TZ: Asia/Shanghai
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=zookeeper-server-a:2888:3888;2181 server.2=zookeeper-server-b:2888:3888;2181 server.3=zookeeper-server-c:2888:3888;2181
    volumes:
      - /data/kafka_cluster/zookeeper/data:/data
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints: [node.hostname == centos52]
      resources:
        limits:
#          cpus: '1'
          memory: 1GB
        reservations:
#          cpus: '0.2'
          memory: 512M

  zookeeper-server-c:
    hostname: zookeeper-server-c
    image: zookeeper:3.6.1
    ports:
      - "32181:2181"
    networks:
      swarm-net:
        aliases:
          - zookeeper-server-c
    environment:
      TZ: Asia/Shanghai
      ZOO_MY_ID: 3
      ZOO_SERVERS: server.1=zookeeper-server-a:2888:3888;2181 server.2=zookeeper-server-b:2888:3888;2181 server.3=zookeeper-server-c:2888:3888;2181
    volumes:
      - /data/kafka_cluster/zookeeper/data:/data
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints: [node.hostname == centos53]
      resources:
        limits:
#          cpus: '1'
          memory: 1GB
        reservations:
#          cpus: '0.2'
          memory: 512M
networks:
  swarm-net:
    external:
      name: swarm-net

4. kafka 相關compose 準備

文件名: docker-stack-kafka.yml

version: "3.2"
services:
#kafka服務
  kafka-server-a:
    hostname: kafka-server-a
    image: wurstmeister/kafka:2.12-2.5.0
    ports:
      - "19092:9092"
    networks:
      swarm-net:
        aliases:
          - kafka-server-a
    environment:
      - TZ=CST-8
      - KAFKA_ADVERTISED_HOST_NAME=kafka-server-a
      - HOST_IP=kafka-server-a
      - KAFKA_ADVERTISED_PORT=9092
      - KAFKA_ZOOKEEPER_CONNECT=zookeeper-server-a:2181,zookeeper-server-b:2181,zookeeper-server-c:2181
      - KAFKA_BROKER_ID=0
      - KAFKA_HEAP_OPTS="-Xmx512M -Xms16M"
    volumes:
      - /data/kafka_cluster/kafka/data:/kafka/kafka-logs-kafka-server-a
      - /data/kafka_cluster/kafka/logs:/opt/kafka/logs
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints: [node.hostname == centos51]
      resources:
        limits:
#          cpus: '1'
          memory: 1GB
        reservations:
#          cpus: '0.2'
          memory: 512M

  kafka-server-b:
    hostname: kafka-server-b
    image: wurstmeister/kafka:2.12-2.5.0
    ports:
      - "29092:9092"
    networks:
      swarm-net:
        aliases:
          - kafka-server-b
    environment:
      - TZ=CST-8
      - KAFKA_ADVERTISED_HOST_NAME=kafka-server-b
      - HOST_IP=kafka-server-b
      - KAFKA_ADVERTISED_PORT=9092
      - KAFKA_ZOOKEEPER_CONNECT=zookeeper-server-a:2181,zookeeper-server-b:2181,zookeeper-server-c:2181
      - KAFKA_BROKER_ID=1
      - KAFKA_HEAP_OPTS="-Xmx512M -Xms16M"
    volumes:
      - /data/kafka_cluster/kafka/data:/kafka/kafka-logs-kafka-server-b
      - /data/kafka_cluster/kafka/logs:/opt/kafka/logs
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints: [node.hostname == centos52]
      resources:
        limits:
#          cpus: '1'
          memory: 1GB
        reservations:
#          cpus: '0.2'
          memory: 512M

  kafka-server-c:
    hostname: kafka-server-c
    image: wurstmeister/kafka:2.12-2.5.0
    ports:
      - "39092:9092"
    networks:
      swarm-net:
        aliases:
          - kafka-server-c
    environment:
      - TZ=CST-8
      - KAFKA_ADVERTISED_HOST_NAME=kafka-server-c
      - HOST_IP=kafka-server-c
      - KAFKA_ADVERTISED_PORT=9092
      - KAFKA_ZOOKEEPER_CONNECT=zookeeper-server-a:2181,zookeeper-server-b:2181,zookeeper-server-c:2181
      - KAFKA_BROKER_ID=2
      - KAFKA_HEAP_OPTS="-Xmx512M -Xms16M"
    volumes:
      - /data/kafka_cluster/kafka/data:/kafka/kafka-logs-kafka-server-c
      - /data/kafka_cluster/kafka/logs:/opt/kafka/logs
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints: [node.hostname == centos53]
      resources:
        limits:
#          cpus: '1'
          memory: 1GB
        reservations:
#          cpus: '0.2'
          memory: 512M
networks:
  swarm-net:
    external:
      name: swarm-net

5. kafka manager相關compose準備

文件名: docker-stack-kafka-manager.yml

version: "3.2"
services:
#kafka manager服務
  kafka-manager:
    hostname: kafka-manager
    image: kafkamanager/kafka-manager:3.0.0.4
    ports:
      - "19000:9000"
    networks:
      swarm-net:
        aliases:
          - kafka-manager
    environment:
      - ZK_HOSTS=zookeeper-server-a:2181,zookeeper-server-b:2181,zookeeper-server-c:2181
    deploy:
      replicas: 1
      restart_policy:
        condition: on-failure
      placement:
        constraints: [node.hostname == centos51]
      resources:
        limits:
#          cpus: '1'
          memory: 1GB
        reservations:
#          cpus: '0.2'
          memory: 512M

networks:
  avatar-net:
    external:
      name: swarm-net

6. 在三臺機器上創建文件映射路徑

mkdir -p {/data/kafka_cluster/zookeeper/data,/data/kafka_cluster/kafka/data,/data/kafka_cluster/kafka/logs}
chown -R 777 /data/kafka_cluster/

7. 執行compose

一定要按照順序執行,執行成功再執行下一個命令

 docker stack deploy -c docker-stack-zookeeper.yml zoo --resolve-image=never --with-registry-auth
 docker stack deploy -c docker-stack-kafka.yml kafka  --resolve-image=never --with-registry-auth
docker stack deploy -c docker-stack-kafka-manager.yml kafka_manager  --resolve-image=never --with-registry-auth

感謝各位的閱讀,以上就是“docke swarm怎么搭建部署zookeeper+kafka”的內容了,經過本文的學習后,相信大家對docke swarm怎么搭建部署zookeeper+kafka這一問題有了更深刻的體會,具體使用情況還需要大家實踐驗證。這里是億速云,小編將為大家推送更多相關知識點的文章,歡迎關注!

向AI問一下細節

免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。

AI

亚洲午夜精品一区二区_中文无码日韩欧免_久久香蕉精品视频_欧美主播一区二区三区美女