docker-compose快速安装redis、redis集群、activemq、elasticsearch(head、ik、kibana、logstash)、mongodb、zookeeper等环境

发布时间 2023-06-05 11:45:04作者: 叮伱格斐呃

基本操作

部署docker容器并启动(进入docker-compose.yml文件所在目录执行)

docker-compose up -d

 

关闭容器(同理)

# 停止服务
docker-compose stop

#停止并删除容器、网络、卷、镜像。
docker-compose down

 

查看当前docker-compose编排运行的所有容器状态

docker-compose ps

 

查看日志

#进入docker容器的方式
docker exec -it  3377274e5854 /bin/bash

#docker 查看日志(退出的容器也可)
docker logs --tail 50 8f9ed73c0e9d

#docker 查看实时日志
docker logs -f 69207e4a8d15

 

部署

一、单点redis

vim docker-compose.yml

version: '3'
services:
  redis_a:
    image: redis:6.2.7       #镜像
    restart: always     #开机随docker启动自启
    container_name: redis_a
    command: redis-server /etc/redis/redis.conf --requirepass 123456 --masterauth 123456
    volumes:  #挂载
    - /opt/docker/redis_a/data/redis_data:/data   #本地主机:docker容器
    - /opt/docker/redis_a/conf/redis.conf:/etc/redis/redis.conf
    - /etc/localtime:/etc/localtime:ro  #同步docker容器系统时间
    network_mode: "host"     #设置网络模式

 

二、redis集群+哨兵

vim docker-compose.yml

version: '3'
services:
  master:
    image: redis:6.2.7       ## 镜像
    restart: always    # 随着docker服务开机自启
    container_name: redis-master
    command: redis-server /etc/redis/redis1.conf --requirepass redis135246 --masterauth redis135246
    volumes:
    - /opt/docker/redis/data/redis_data1:/data
    - /opt/docker/redis/conf1:/etc/redis
    - /etc/localtime:/etc/localtime:ro
    network_mode: "host"
  slave1:
    image: redis:6.2.7                
    restart: always
    container_name: redis-slave-1
    volumes:
    - /opt/docker/redis/data/redis_data2:/data
    - /opt/docker/redis/conf2:/etc/redis
    - /etc/localtime:/etc/localtime:ro
    command: redis-server /etc/redis/redis2.conf --slaveof 192.168.3.221 6379 --requirepass redis135246 --masterauth redis135246 
    depends_on:
    - master
    network_mode: "host"
  slave2:
    image: redis:6.2.7                
    restart: always
    container_name: redis-slave-2
    volumes:
    - /opt/docker/redis/data/redis_data3:/data
    - /opt/docker/redis/conf3:/etc/redis
    - /etc/localtime:/etc/localtime:ro
    command: redis-server /etc/redis/redis3.conf --slaveof 192.168.3.221 6379 --requirepass redis135246 --masterauth redis135246
    depends_on:
    - master
    network_mode: "host"
  sentinel1:
    image: redis:6.2.7       
    restart: always
    container_name: redis-sentinel-1
    command: redis-sentinel /usr/local/etc/redis/sentinel1.conf
    volumes:
    - /opt/docker/redis/conf1:/usr/local/etc/redis
    - /etc/localtime:/etc/localtime:ro
    network_mode: "host"
    depends_on:
    - master
    - slave1
    - slave2
  sentinel2:
    image: redis:6.2.7                
    restart: always
    container_name: redis-sentinel-2          
    command: redis-sentinel /usr/local/etc/redis/sentinel2.conf
    volumes:
    - /opt/docker/redis/conf2:/usr/local/etc/redis
    - /etc/localtime:/etc/localtime:ro
    network_mode: "host"
    depends_on:
    - master
    - slave1
    - slave2
  sentinel3:
    image: redis:6.2.7                
    restart: always
    container_name: redis-sentinel-3          
    command: redis-sentinel /usr/local/etc/redis/sentinel3.conf
    volumes:
    - /opt/docker/redis/conf3:/usr/local/etc/redis
    - /etc/localtime:/etc/localtime:ro
    network_mode: "host"
    depends_on:
    - master
    - slave1
    - slave2

 

创建对应目录及配置文件

 

vim redis1.conf

bind 0.0.0.0
port 6379
protected-mode no
slave-read-only no
logfile "/data/redis_6379.log"

vim sentinel1.conf port
26379 dir "/tmp" logfile "/tmp/sentinel1.log" protected-mode no sentinel monitor mymaster 192.168.3.221 6379 2 sentinel auth-pass mymaster redis135246 sentinel failover-timeout mymaster 10000 sentinel deny-scripts-reconfig yes # Generated by CONFIG REWRITE user default on nopass ~* &* +@all sentinel myid 67fe464a17621cb3ba851024637f910f67db4775 sentinel config-epoch mymaster 0 sentinel leader-epoch mymaster 0 sentinel current-epoch 0 sentinel known-replica mymaster 192.168.3.221 6381 sentinel known-replica mymaster 192.168.3.221 6380 sentinel known-sentinel mymaster 192.168.3.221 26381 2098f9b8d4a6c70c5c3a0db0975506bd9a71b26a sentinel known-sentinel mymaster 192.168.3.221 26380 49fb1b1921e5d12c6fc16b6a57570ec0419c6df4 vim redis2.conf bind 0.0.0.0 port 6380 protected-mode no slave-read-only no logfile "/data/redis_6380.log" vim sentinel2.conf port 26380 dir "/tmp" logfile "/tmp/sentinel2.log" protected-mode no sentinel monitor mymaster 192.168.3.221 6379 2 sentinel auth-pass mymaster redis135246 sentinel failover-timeout mymaster 10000 sentinel deny-scripts-reconfig yes # Generated by CONFIG REWRITE user default on nopass ~* &* +@all sentinel myid 49fb1b1921e5d12c6fc16b6a57570ec0419c6df4 sentinel config-epoch mymaster 0 sentinel leader-epoch mymaster 0 sentinel current-epoch 0 sentinel known-replica mymaster 192.168.3.221 6381 sentinel known-replica mymaster 192.168.3.221 6380 sentinel known-sentinel mymaster 192.168.3.221 26381 2098f9b8d4a6c70c5c3a0db0975506bd9a71b26a sentinel known-sentinel mymaster 192.168.3.221 26379 67fe464a17621cb3ba851024637f910f67db4775 vim redis3.conf bind 0.0.0.0 port 6381 protected-mode no slave-read-only no logfile "/data/redis_6381.log" vim sentinel3.conf port 26381 dir "/tmp" logfile "/tmp/sentinel3.log" protected-mode no sentinel monitor mymaster 192.168.3.221 6379 2 sentinel auth-pass mymaster redis135246 sentinel failover-timeout mymaster 10000 sentinel deny-scripts-reconfig yes # Generated by CONFIG REWRITE user default on nopass ~* &* +@all sentinel myid 2098f9b8d4a6c70c5c3a0db0975506bd9a71b26a sentinel config-epoch mymaster 0 sentinel leader-epoch mymaster 0 sentinel current-epoch 0 sentinel known-replica mymaster 192.168.3.221 6380 sentinel known-replica mymaster 192.168.3.221 6381 sentinel known-sentinel mymaster 192.168.3.221 26379 67fe464a17621cb3ba851024637f910f67db4775 sentinel known-sentinel mymaster 192.168.3.221 26380 49fb1b1921e5d12c6fc16b6a57570ec0419c6df4

 

部署docker容器并启动(进入docker-compose.yml文件所在目录执行)

docker-compose up -d

 

 

三、activemq集群

创建docker-compose.yml中对应文件及目录

 

mkdir cluster/master/slave/data/logs

vim docker-compose.yml

version: '3'
services:
  cluster:
    image: docker.io/webcenter/activemq
    ports:
      - 61616:61616
      - 61613:61613
      - 8161:8161
    restart: always
    volumes:
      - /opt/docker/activemq/data/activemq:/opt/activemq/data
      - /opt/docker/activemq/logs/activemq:/var/logs/activemq
      - /opt/docker/activemq/cluster/conf/activemq.xml:/opt/activemq/conf/activemq.xml
      - /etc/localtime:/etc/localtime:ro
    environment:
      - ACTIVEMQ_ADMIN_LOGIN=admin
      - ACTIVEMQ_ADMIN_PASSWORD=admin
      - ACTIVEMQ_CONFIG_MINMEMORY=512
      - ACTIVEMQ_CONFIG_MAXMEMORY=2048
    network_mode: "host"

#networks:
#  default:
#    external:
#      name: unreal-network

  master:
    image: docker.io/webcenter/activemq
    ports:
      - 61617:61616
      - 61613:61613
      - 8161:8161
    restart: always
    volumes:
      - /opt/docker/activemq/data/activemq:/opt/activemq/data
      - /opt/docker/activemq/logs/activemq:/var/log/activemq
      - /opt/docker/activemq/master/conf/activemq.xml:/opt/activemq/conf/activemq.xml
      - /etc/localtime:/etc/localtime:ro
    environment:
      - ACTIVEMQ_ADMIN_LOGIN=admin
      - ACTIVEMQ_ADMIN_PASSWORD=admin
      - ACTIVEMQ_CONFIG_MINMEMORY=512
      - ACTIVEMQ_CONFIG_MAXMEMORY=2048
    network_mode: "host"

  slave:
    image: docker.io/webcenter/activemq
    ports:
      - 61618:61616
      - 61613:61613
      - 8161:8161
    restart: always
    volumes:
      - /opt/docker/activemq/data/activemq:/opt/activemq/data
      - /opt/docker/activemq/logs/activemq:/var/logs/activemq
      - /opt/docker/activemq/slave/conf/activemq.xml:/opt/activemq/conf/activemq.xml
      - /etc/localtime:/etc/localtime:ro
    environment:
      - ACTIVEMQ_ADMIN_LOGIN=admin
      - ACTIVEMQ_ADMIN_PASSWORD=admin
      - ACTIVEMQ_CONFIG_MINMEMORY=512
      - ACTIVEMQ_CONFIG_MAXMEMORY=2048
    network_mode: "host"

 

#配置文件可以先docker创建一个单点的activemq,进入容器查看配置文件复制出来作为模板,下面只需要修改对应关键位置,其他不变即可

vim cluster/conf/activemq.xml

        <transportConnectors>
            <transportConnector name="openwire" uri="tcp://0.0.0.0:61616?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
            <transportConnector name="amqp" uri="amqp://0.0.0.0:5672?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
            <transportConnector name="stomp" uri="stomp://0.0.0.0:61613?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
            <transportConnector name="mqtt" uri="mqtt://0.0.0.0:1883?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
            <transportConnector name="ws" uri="ws://0.0.0.0:61614?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
        </transportConnectors>
        <networkConnectors>
                <networkConnector uri="static:(tcp://192.168.3.221:61617,tcp://192.168.3.221:61618)" duplex="true" />
        </networkConnectors>


vim master/conf/activemq.xml

        <transportConnectors>
            <transportConnector name="openwire" uri="tcp://0.0.0.0:61617?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
            <transportConnector name="amqp" uri="amqp://0.0.0.0:5672?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
            <transportConnector name="stomp" uri="stomp://0.0.0.0:61613?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
            <transportConnector name="mqtt" uri="mqtt://0.0.0.0:1883?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
            <transportConnector name="ws" uri="ws://0.0.0.0:61614?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
        </transportConnectors>
        <networkConnectors>
                <networkConnector uri="static:(tcp:192.168.3.221:61616)" duplex="true" />
        </networkConnectors>


vim slave/conf/activemq.xml

        <transportConnectors>
            <transportConnector name="openwire" uri="tcp://0.0.0.0:61618?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
            <transportConnector name="amqp" uri="amqp://0.0.0.0:5672?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
            <transportConnector name="stomp" uri="stomp://0.0.0.0:61613?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
            <transportConnector name="mqtt" uri="mqtt://0.0.0.0:1883?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
            <transportConnector name="ws" uri="ws://0.0.0.0:61614?maximumConnections=1000&amp;wireFormat.maxFrameSize=104857600"/>
        </transportConnectors>
        <networkConnectors>
                <networkConnector uri="static:(tcp://192.168.3.221:61616)" duplex="true" />
        </networkConnectors>

 

 部署启动

docker-compose up -d

 

 

四、mongodb(单点)

# 创建对应目录及文件
mkdir data/logs

 

vim docker-compose.yml

version:
"3" services: mongodb: image: mongo container_name: mongodb restart: always ports: - 27017:27017 volumes: - /opt/docker/mongodb/data:/data/db - /opt/docker/mongodb/log:/var/log/mongodb - /etc/localtime:/etc/localtime:ro # command: --wiredTigerCacheSizeGB 4 --auth # 限制内存大小, 需要认证 environment: - MONGO_INITDB_ROOT_USERNAME=admin - MONGO_INITDB_ROOT_PASSWORD=admin # mongo-express: #MongoDB数据库web管理工具 # image: mongo-express # container_name: mongo-express # restart: always # ports: # - 8081:8081 # environment: # - ME_CONFIG_MONGODB_ADMINUSERNAME=admin # - ME_CONFIG_MONGODB_ADMINPASSWORD=admin # - ME_CONFIG_MONGODB_SERVER=mongodb # network_mode: "host" #networks: # default: # name: mongodb_network

 

部署启动

docker-compose up -d

 

 

五、zookeeper集群

# 创建对应的目录及文件
mkdir node01/node02/node03

 

vim docker-compose.yml

version: '3'
services:
    zoo1:
        image: zookeeper:3.4.13
        restart: always
        container_name: zoo1
        ports:
            - "2181:2181"
        volumes:
            - /opt/docker/zookeeper/node01/data/:/data
            - /opt/docker/zookeeper/node01/data/datalog:/datalog
            - /opt/docker/zookeeper/node01/data/logs:/logs
            - /etc/localtime:/etc/localtime:ro
        environment:
            ZOO_MY_ID: 1
            ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888

    zoo2:
        image: zookeeper:3.4.13
        restart: always
        container_name: zoo2
        ports:
            - "2182:2181"
        volumes:
            - /opt/docker/zookeeper/node02/data/:/data
            - /opt/docker/zookeeper/node02/data/datalog:/datalog
            - /opt/docker/zookeeper/node02/data/logs:/logs
            - /etc/localtime:/etc/localtime:ro
        environment:
            ZOO_MY_ID: 2
            ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
 
    zoo3:
        image: zookeeper:3.4.13
        restart: always
        container_name: zoo3
        ports:
            - "2183:2181"
        volumes:
            - /opt/docker/zookeeper/node03/data/:/data
            - /opt/docker/zookeeper/node03/data/datalog:/datalog
            - /opt/docker/zookeeper/node03/data/logs:/logs
            - /etc/localtime:/etc/localtime:ro
        environment:
            ZOO_MY_ID: 3
            ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888

 

部署启动

docker-compose up -d

 

 

六、elasticsearch、logstash、kibana、head、ik

#创建目录及文件
mkdir -p config  data  kibana/config  logs  logstash/config  logstash/pipeline  plugins

 

vim docker-compose.yaml

version: '3.1'
services:
  elasticsearch:
    image: elasticsearch:7.9.3
    container_name: elasticsearch
    environment:
      - "discovery.type=single-node"
      - "bootstrap.memory_lock=true"
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
    volumes:
      - /opt/docker/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
      - /opt/docker/elasticsearch/data:/usr/share/elasticsearch/data 
      - /opt/docker/elasticsearch/logs:/usr/share/elasticsearch/logs
      - /opt/docker/elasticsearch/plugins:/usr/share/elasticsearch/plugins
      - /etc/localtime:/etc/localtime:ro
    ulimits:
      memlock:
        soft: -1
        hard: -1
    ports:
      - 9200:9200
      - 9300:9300
    restart: always
    network_mode: "host"


  logstash:
    image: docker.io/uselagoon/logstash-7  #没找到一样版本的logstash,使用7版本
    container_name: logstash
    ports:
      - "5045:5045"
   # environment:
      #XPACK_MONITORING_ENABLED: "false"
      #LS_JAVA_OPTS: "-Xmx256m -Xms256m"
    volumes:
      - /opt/docker/elasticsearch/logstash/pipeline/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
      - /opt/docker/elasticsearch/logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
      - /etc/localtime:/etc/localtime:ro
    network_mode: "host"
    restart: always
    depends_on:
      - elasticsearch


  kibana:
    image: docker.elastic.co/kibana/kibana:7.9.3
    container_name: kibana
    depends_on:
      - elasticsearch
    volumes:
      - /opt/docker/elasticsearch/kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml
      - /etc/localtime:/etc/localtime:ro
    environment:
      - "ELASTICSEARCH_URL=http://192.168.3.221:9200"
      - "I18N_LOCALE=zh-CN"
    ports:
      - 5601:5601
    restart: always
    network_mode: "host"

  es-head:
    image: docker.io/alivv/elasticsearch-head
    container_name: elasticsearch-head
    ports:
      - "9100:9100"
    restart: always
    volumes:
      - /etc/localtime:/etc/localtime:ro
    network_mode: "host"
    depends_on:   #在es启动后再启动
      - elasticsearch

 

配置文件

vim config/elasticsearch.yml

http.port: 9200
http.host: 0.0.0.0
path.logs: /usr/share/elasticsearch/logs
path.data: /usr/share/elasticsearch/data
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-headers: Authorization,X-Requested-With,Content-Length,Content-Type
xpack.security.enabled: true
xpack.license.self_generated.type: basic
xpack.security.transport.ssl.enabled: true


vim logstash/config/logstash.yml

http.host: "0.0.0.0"


vim logstash/pipeline/logstash.conf

input {
    beats {
        port => "5044"
    }
}
filter {
    grok {
        match => { "message" => "%{COMBINEDAPACHELOG}"}
    }
    geoip {
        source => "clientip"
    }
}
output {
    stdout { codec => rubydebug }
    elasticsearch {
        hosts => ["192.168.3.221:9200"]
        user => "elastic"
    password => "vantop2023"
    }
}


vim kibana/config/kibana.yml

server.name: kibana
# kibana的主机地址 0.0.0.0可表示监听所有IP
server.host: "0.0.0.0"
# kibana访问es的URL
elasticsearch.hosts: [ "http://192.168.3.221:9200" ]
elasticsearch.username: 'elastic'
elasticsearch.password: 'vantop2023'
# 显示登陆页面
xpack.monitoring.ui.container.elasticsearch.enabled: true
# 语言
i18n.locale: "zh-CN"