婷婷综合国产,91蜜桃婷婷狠狠久久综合9色 ,九九九九九精品,国产综合av

主頁 > 知識庫 > Docker下安裝zookeeper(單機和集群)

Docker下安裝zookeeper(單機和集群)

熱門標簽:網站上插入地圖標注內容 重慶營銷外呼系統排名 地圖標注企業名稱侵權案件 400電話辦理哪家好廠商 工廠位置地圖標注 地圖標注需要現場嗎 鶴壁電銷外呼系統怎么安裝 企業400電話辦理哪正規 繽客網注冊時地圖標注出不來

啟動Docker后,先看一下我們有哪些選擇。

有官方的當然選擇官方啦~

下載:

[root@localhost admin]# docker pull zookeeper
Using default tag: latest
Trying to pull repository docker.io/library/zookeeper ...
latest: Pulling from docker.io/library/zookeeper
1ab2bdfe9778: Already exists
7aaf9a088d61: Pull complete
80a55c9c9fe8: Pull complete
a0086b0e6eec: Pull complete
4165e7457cad: Pull complete
bcba13bcf3a1: Pull complete
41c03a109e47: Pull complete
4d5281c6b0d4: Pull complete
Digest: sha256:175d6bb1471e1e37a48bfa41a9da047c80fade60fd585eae3a0e08a4ce1d39ed
Status: Downloaded newer image for docker.io/zookeeper:latest

查看鏡像詳情

[root@localhost admin]# docker images
REPOSITORY          TAG         IMAGE ID      CREATED       SIZE
192.168.192.128:443/hello-2  latest       0c24558dd388    42 hours ago    660 MB
192.168.192.128:443/hello   latest       a3ba3d430bed    42 hours ago    660 MB
docker.io/nginx        latest       5a3221f0137b    13 days ago     126 MB
docker.io/zookeeper      latest       3487af26dee9    13 days ago     225 MB
docker.io/registry      latest       f32a97de94e1    5 months ago    25.8 MB
docker.io/mongo        latest       8bf72137439e    12 months ago    380 MB
docker.io/influxdb      latest       34de2bdc2d7f    12 months ago    213 MB
docker.io/centos       latest       5182e96772bf    12 months ago    200 MB
docker.io/grafana/grafana   latest       3e16e05be9a3    13 months ago    245 MB
docker.io/hello-world     latest       2cb0d9787c4d    13 months ago    1.85 kB
docker.io/java        latest       d23bdf5b1b1b    2 years ago     643 MB
[root@localhost admin]# docker inspect 3487af26dee9
[
  {
    "Id": "sha256:3487af26dee9ef9eacee9a97521bc4f0243bef0b285247258c32f4a03cab92c5",
    "RepoTags": [
      "docker.io/zookeeper:latest"
    ],
    "RepoDigests": [
      "docker.io/zookeeper@sha256:175d6bb1471e1e37a48bfa41a9da047c80fade60fd585eae3a0e08a4ce1d39ed"
    ],
    "Parent": "",
    "Comment": "",
    "Created": "2019-08-15T06:10:50.178554969Z",
    "Container": "9a38467115f1952161d6075135d5c5287967282b834cfe68183339c810f9652b",
    "ContainerConfig": {
      "Hostname": "9a38467115f1",
      "Domainname": "",
      "User": "",
      "AttachStdin": false,
      "AttachStdout": false,
      "AttachStderr": false,
      "ExposedPorts": {
        "2181/tcp": {},
        "2888/tcp": {},
        "3888/tcp": {},
        "8080/tcp": {}
      },
      "Tty": false,
      "OpenStdin": false,
      "StdinOnce": false,
      "Env": [
        "PATH=/usr/local/openjdk-8/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/apache-zookeeper-3.5.5-bin/bin",
        "LANG=C.UTF-8",
        "JAVA_HOME=/usr/local/openjdk-8",
        "JAVA_VERSION=8u222",
        "JAVA_BASE_URL=https://github.com/AdoptOpenJDK/openjdk8-upstream-binaries/releases/download/jdk8u222-b10/OpenJDK8U-jre_",
        "JAVA_URL_VERSION=8u222b10",
        "ZOO_CONF_DIR=/conf",
        "ZOO_DATA_DIR=/data",
        "ZOO_DATA_LOG_DIR=/datalog",
        "ZOO_LOG_DIR=/logs",
        "ZOO_TICK_TIME=2000",
        "ZOO_INIT_LIMIT=5",
        "ZOO_SYNC_LIMIT=2",
        "ZOO_AUTOPURGE_PURGEINTERVAL=0",
        "ZOO_AUTOPURGE_SNAPRETAINCOUNT=3",
        "ZOO_MAX_CLIENT_CNXNS=60",
        "ZOO_STANDALONE_ENABLED=true",
        "ZOO_ADMINSERVER_ENABLED=true",
        "ZOOCFGDIR=/conf"
      ],
      "Cmd": [
        "/bin/sh",
        "-c",
        "#(nop) ",
        "CMD [\"zkServer.sh\" \"start-foreground\"]"
      ],
      "ArgsEscaped": true,
      "Image": "sha256:20bf3cc1bd5b5766b79da5265e94007d0802ce241df1636d0f63e211a79a0e3e",
      "Volumes": {
        "/data": {},
        "/datalog": {},
        "/logs": {}
      },
      "WorkingDir": "/apache-zookeeper-3.5.5-bin",
      "Entrypoint": [
        "/docker-entrypoint.sh"
      ],
      "OnBuild": null,
      "Labels": {}
    },
    "DockerVersion": "18.06.1-ce",
    "Author": "",
    "Config": {
      "Hostname": "",
      "Domainname": "",
      "User": "",
      "AttachStdin": false,
      "AttachStdout": false,
      "AttachStderr": false,
      "ExposedPorts": {
        "2181/tcp": {},
        "2888/tcp": {},
        "3888/tcp": {},
        "8080/tcp": {}
      },
      "Tty": false,
      "OpenStdin": false,
      "StdinOnce": false,
      "Env": [
        "PATH=/usr/local/openjdk-8/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/apache-zookeeper-3.5.5-bin/bin",
        "LANG=C.UTF-8",
        "JAVA_HOME=/usr/local/openjdk-8",
        "JAVA_VERSION=8u222",
        "JAVA_BASE_URL=https://github.com/AdoptOpenJDK/openjdk8-upstream-binaries/releases/download/jdk8u222-b10/OpenJDK8U-jre_",
        "JAVA_URL_VERSION=8u222b10",
        "ZOO_CONF_DIR=/conf",
        "ZOO_DATA_DIR=/data",
        "ZOO_DATA_LOG_DIR=/datalog",
        "ZOO_LOG_DIR=/logs",
        "ZOO_TICK_TIME=2000",
        "ZOO_INIT_LIMIT=5",
        "ZOO_SYNC_LIMIT=2",
        "ZOO_AUTOPURGE_PURGEINTERVAL=0",
        "ZOO_AUTOPURGE_SNAPRETAINCOUNT=3",
        "ZOO_MAX_CLIENT_CNXNS=60",
        "ZOO_STANDALONE_ENABLED=true",
        "ZOO_ADMINSERVER_ENABLED=true",
        "ZOOCFGDIR=/conf"
      ],
      "Cmd": [
        "zkServer.sh",
        "start-foreground"
      ],
      "ArgsEscaped": true,
      "Image": "sha256:20bf3cc1bd5b5766b79da5265e94007d0802ce241df1636d0f63e211a79a0e3e",
      "Volumes": {
        "/data": {},
        "/datalog": {},
        "/logs": {}
      },
      "WorkingDir": "/apache-zookeeper-3.5.5-bin",
      "Entrypoint": [
        "/docker-entrypoint.sh"
      ],
      "OnBuild": null,
      "Labels": null
    },
    "Architecture": "amd64",
    "Os": "linux",
    "Size": 225126346,
    "VirtualSize": 225126346,
    "GraphDriver": {
      "Name": "overlay2",
      "Data": {
        "LowerDir": "/var/lib/docker/overlay2/92185ebf7638a7b34180cfb87795dd758405cbad4fd0139b92a227d1a4b61847/diff:/var/lib/docker/overlay2/8787e91f5c03a7c03cee072019eca49a0402a0a0902be39ed0b5d651a79cce35/diff:/var/lib/docker/overlay2/ce5864ddfa4d1478047aa9fcaa03744e8a4078ebe43b41e7836c96c54c724044/diff:/var/lib/docker/overlay2/fc99437bcfbabb9e8234c06c90d1c60e58c34ac053aff1adc368b7ad3a50c158/diff:/var/lib/docker/overlay2/1779297a8980830229bd4bf58bd741730956d6797332fd07b863a1b48dcb6fa2/diff:/var/lib/docker/overlay2/ee735aa3608d890ac4751dd93581a67cb54a5dd4714081e9d09d0ebd9dbc3501/diff:/var/lib/docker/overlay2/cf6b3cbc42f3c8d1fb09b29db0dafbb4dceb120925970ab8a3871eaa8562414c/diff",
        "MergedDir": "/var/lib/docker/overlay2/a7fcc1b78c472cde943f20d1d4495f145308507b5fe3da8800c33dc4ce426156/merged",
        "UpperDir": "/var/lib/docker/overlay2/a7fcc1b78c472cde943f20d1d4495f145308507b5fe3da8800c33dc4ce426156/diff",
        "WorkDir": "/var/lib/docker/overlay2/a7fcc1b78c472cde943f20d1d4495f145308507b5fe3da8800c33dc4ce426156/work"
      }
    },
    "RootFS": {
      "Type": "layers",
      "Layers": [
        "sha256:1c95c77433e8d7bf0f519c9d8c9ca967e2603f0defbf379130d9a841cca2e28e",
        "sha256:2bf534399acac9c6b09a0b1d931223808000b04400a749f08187ed9ee435738d",
        "sha256:eb25e0278d41b9ac637d8cb2e391457cf44ce8d2bfe0646d0c9faefc96413f91",
        "sha256:e54bd3566d9ef3e1309a5af6caf8682f32c6ac4d6adfcbd3e601cfee4e2e0e85",
        "sha256:c79435051d529a7b86f5f9fc32e7e2ec401929434e5596f02a2af731f55c9f28",
        "sha256:76e0d7b2d700e6d17924b985703c7b5b84fb39ddcc0a1181b41217c2a11dffc4",
        "sha256:eecdc37df6afd77091641588f9639f63b65e8eb141e56529e00da44419c5bd04",
        "sha256:36e788f2d91a89375df5901f31cca33776f887c00ddfd3cf9f2466fa4cb794d6"
      ]
    }
  }
]

默認拉取最新的是3.5.X版本,如果你需要3.4.X版本的,要指定標簽

單機

# 最后那個是鏡像的ID[root@localhost admin]# docker run -d -p 2181:2181 --name some-zookeeper --restart always 3487af26dee9
d5c6f857cd88c342acf63dd58e838a4cdf912daa6c8c0115091147136e819307
[root@localhost admin]# docker ps
CONTAINER ID    IMAGE        COMMAND         CREATED       STATUS       PORTS                         NAMES
d5c6f857cd88    3487af26dee9    "/docker-entrypoin..."  4 seconds ago    Up 3 seconds    2888/tcp, 3888/tcp, 0.0.0.0:2181->2181/tcp, 8080/tcp  some-zookeeper
[root@localhost admin]# docker exec -it d5c6f857cd88 bash
root@d5c6f857cd88:/apache-zookeeper-3.5.5-bin# ./bin/zkCli.sh
Connecting to localhost:2181
2019-08-29 07:15:21,623 [myid:] - INFO [main:Environment@109] - Client environment:zookeeper.version=3.5.5-390fe37ea45dee01bf87dc1c042b5e3dcce88653, built on 05/03/2019 12:07 GMT
2019-08-29 07:15:21,679 [myid:] - INFO [main:Environment@109] - Client environment:host.name=d5c6f857cd88
2019-08-29 07:15:21,680 [myid:] - INFO [main:Environment@109] - Client environment:java.version=1.8.0_222
2019-08-29 07:15:21,717 [myid:] - INFO [main:Environment@109] - Client environment:java.vendor=Oracle Corporation
2019-08-29 07:15:21,718 [myid:] - INFO [main:Environment@109] - Client environment:java.home=/usr/local/openjdk-8
2019-08-29 07:15:21,725 [myid:] - INFO [main:Environment@109] - Client environment:java.class.path=/apache-zookeeper-3.5.5-bin/bin/../zookeeper-server/target/classes:/apache-zookeeper-3.5.5-bin/bin/../build/classes:/apache-zookeeper-3.5.5-bin/bin/../zookeeper-server/target/lib/*.jar:/apache-zookeeper-3.5.5-bin/bin/../build/lib/*.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/zookeeper-jute-3.5.5.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/zookeeper-3.5.5.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/slf4j-log4j12-1.7.25.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/slf4j-api-1.7.25.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/netty-all-4.1.29.Final.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/log4j-1.2.17.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/json-simple-1.1.1.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jline-2.11.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jetty-util-9.4.17.v20190418.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jetty-servlet-9.4.17.v20190418.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jetty-server-9.4.17.v20190418.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jetty-security-9.4.17.v20190418.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jetty-io-9.4.17.v20190418.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jetty-http-9.4.17.v20190418.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/javax.servlet-api-3.1.0.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jackson-databind-2.9.8.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jackson-core-2.9.8.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jackson-annotations-2.9.0.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/commons-cli-1.2.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/audience-annotations-0.5.0.jar:/apache-zookeeper-3.5.5-bin/bin/../zookeeper-*.jar:/apache-zookeeper-3.5.5-bin/bin/../zookeeper-server/src/main/resources/lib/*.jar:/conf:
2019-08-29 07:15:22,108 [myid:] - INFO [main:Environment@109] - Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib
2019-08-29 07:15:22,109 [myid:] - INFO [main:Environment@109] - Client environment:java.io.tmpdir=/tmp
2019-08-29 07:15:22,109 [myid:] - INFO [main:Environment@109] - Client environment:java.compiler=<NA>
2019-08-29 07:15:22,109 [myid:] - INFO [main:Environment@109] - Client environment:os.name=Linux
2019-08-29 07:15:22,109 [myid:] - INFO [main:Environment@109] - Client environment:os.arch=amd64
2019-08-29 07:15:22,110 [myid:] - INFO [main:Environment@109] - Client environment:os.version=3.10.0-862.9.1.el7.x86_64
2019-08-29 07:15:22,110 [myid:] - INFO [main:Environment@109] - Client environment:user.name=root
2019-08-29 07:15:22,110 [myid:] - INFO [main:Environment@109] - Client environment:user.home=/root
2019-08-29 07:15:22,110 [myid:] - INFO [main:Environment@109] - Client environment:user.dir=/apache-zookeeper-3.5.5-bin
2019-08-29 07:15:22,118 [myid:] - INFO [main:Environment@109] - Client environment:os.memory.free=11MB
2019-08-29 07:15:22,148 [myid:] - INFO [main:Environment@109] - Client environment:os.memory.max=247MB
2019-08-29 07:15:22,148 [myid:] - INFO [main:Environment@109] - Client environment:os.memory.total=15MB
2019-08-29 07:15:22,206 [myid:] - INFO [main:ZooKeeper@868] - Initiating client connection, connectString=localhost:2181 sessionTimeout=30000 watcher=org.apache.zookeeper.ZooKeeperMain$MyWatcher@3b95a09c
2019-08-29 07:15:22,239 [myid:] - INFO [main:X509Util@79] - Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation
2019-08-29 07:15:22,285 [myid:] - INFO [main:ClientCnxnSocket@237] - jute.maxbuffer value is 4194304 Bytes
2019-08-29 07:15:22,366 [myid:] - INFO [main:ClientCnxn@1653] - zookeeper.request.timeout value is 0. feature enabled=
Welcome to ZooKeeper!
JLine support is enabled
2019-08-29 07:15:22,563 [myid:localhost:2181] - INFO [main-SendThread(localhost:2181):ClientCnxn$SendThread@1112] - Opening socket connection to server localhost/0:0:0:0:0:0:0:1:2181. Will not attempt to authenticate using SASL (unknown error)
2019-08-29 07:15:23,443 [myid:localhost:2181] - INFO [main-SendThread(localhost:2181):ClientCnxn$SendThread@959] - Socket connection established, initiating session, client: /0:0:0:0:0:0:0:1:37198, server: localhost/0:0:0:0:0:0:0:1:2181
2019-08-29 07:15:23,520 [myid:localhost:2181] - INFO [main-SendThread(localhost:2181):ClientCnxn$SendThread@1394] - Session establishment complete on server localhost/0:0:0:0:0:0:0:1:2181, sessionid = 0x10001216d990000, negotiated timeout = 30000
 
WATCHER::
 
WatchedEvent state:SyncConnected type:None path:null
[zk: localhost:2181(CONNECTED) 0] ls /
[zookeeper]
[zk: localhost:2181(CONNECTED) 1] quit
 
WATCHER::
 
WatchedEvent state:Closed type:None path:null
2019-08-29 07:15:37,042 [myid:] - INFO [main:ZooKeeper@1422] - Session: 0x10001216d990000 closed
2019-08-29 07:15:37,043 [myid:] - INFO [main-EventThread:ClientCnxn$EventThread@524] - EventThread shut down for session: 0x10001216d990000
root@d5c6f857cd88:/apache-zookeeper-3.5.5-bin# exit
exit
[root@localhost admin]#

在外部訪問(192.168.192.128:2181)

集群

環境:單臺宿主機(192.168.192.128),啟動三個zookeeper容器。

這里涉及一個問題,就是Docker容器之間通信的問題,這個很重要!

Docker有三種網絡模式,bridge、host、none,在你創建容器的時候,不指定--network默認是bridge。

bridge:為每一個容器分配IP,并將容器連接到一個docker0虛擬網橋,通過docker0網橋與宿主機通信。也就是說,此模式下,你不能用宿主機的IP+容器映射端口來進行Docker容器之間的通信。

host:容器不會虛擬自己的網卡,配置自己的IP,而是使用宿主機的IP和端口。這樣一來,Docker容器之間的通信就可以用宿主機的IP+容器映射端口

none:無網絡。

=====================================================

先在本地創建目錄:

[root@localhost admin]# mkdir /usr/local/zookeeper-cluster
[root@localhost admin]# mkdir /usr/local/zookeeper-cluster/node1
[root@localhost admin]# mkdir /usr/local/zookeeper-cluster/node2
[root@localhost admin]# mkdir /usr/local/zookeeper-cluster/node3
[root@localhost admin]# ll /usr/local/zookeeper-cluster/
total 0
drwxr-xr-x. 2 root root 6 Aug 28 23:02 node1
drwxr-xr-x. 2 root root 6 Aug 28 23:02 node2
drwxr-xr-x. 2 root root 6 Aug 28 23:02 node3

然后執行命令啟動

docker run -d -p 2181:2181 -p 2888:2888 -p 3888:3888 --name zookeeper_node1 --privileged --restart always \

-v /usr/local/zookeeper-cluster/node1/volumes/data:/data \

-v /usr/local/zookeeper-cluster/node1/volumes/datalog:/datalog \

-v /usr/local/zookeeper-cluster/node1/volumes/logs:/logs \

-e ZOO_MY_ID=1 \

-e "ZOO_SERVERS=server.1=192.168.192.128:2888:3888;2181 server.2=192.168.192.128:2889:3889;2182 server.3=192.168.192.128:2890:3890;2183" 3487af26dee9

docker run -d -p 2182:2181 -p 2889:2888 -p 3889:3888 --name zookeeper_node2 --privileged --restart always \

-v /usr/local/zookeeper-cluster/node2/volumes/data:/data \

-v /usr/local/zookeeper-cluster/node2/volumes/datalog:/datalog \

-v /usr/local/zookeeper-cluster/node2/volumes/logs:/logs \

-e ZOO_MY_ID=2 \

-e "ZOO_SERVERS=server.1=192.168.192.128:2888:3888;2181 server.2=192.168.192.128:2889:3889;2182 server.3=192.168.192.128:2890:3890;2183" 3487af26dee9

docker run -d -p 2183:2181 -p 2890:2888 -p 3890:3888 --name zookeeper_node3 --privileged --restart always \

-v /usr/local/zookeeper-cluster/node3/volumes/data:/data \

-v /usr/local/zookeeper-cluster/node3/volumes/datalog:/datalog \

-v /usr/local/zookeeper-cluster/node3/volumes/logs:/logs \

-e ZOO_MY_ID=3 \

-e "ZOO_SERVERS=server.1=192.168.192.128:2888:3888;2181 server.2=192.168.192.128:2889:3889;2182 server.3=192.168.192.128:2890:3890;2183" 3487af26dee9

【坑】

乍一看,沒什么問題啊,首先映射端口到宿主機,然后三個zookeeper之間的訪問地址則是宿主機IP:映射端口,沒毛病啊;

看我前面講的網絡模式就能看出問題,ZOO_SERVERS里面的IP有問題,犯這個錯誤都是不了解Docker的網絡模式的。什么錯誤往下看。

關于ZOO_SERVERS

什么意思呢,3.5.0開始,不應該再使用clientPort和clientPortAddress配置參數。相反,這些信息現在是server關鍵字規范的一部分。

端口映射三個容器不一樣,比如2181/2182/2183,因為是一臺宿主機嘛,端口不能沖突,如果你不在同一臺機器,就不用修改端口。

最后的那個參數是鏡像ID,也可以是鏡像名稱:TAG。

--privileged=true參數是為了解決【chown: changing ownership of '/data': Permission denied】,也可以省略true

執行結果:

[root@localhost admin]# docker run -d -p 2181:2181 -p 2888:2888 -p 3888:3888 --name zookeeper_node1 --privileged --restart always \

> -v /usr/local/zookeeper-cluster/node1/volumes/data:/data \

> -v /usr/local/zookeeper-cluster/node1/volumes/datalog:/datalog \

> -v /usr/local/zookeeper-cluster/node1/volumes/logs:/logs \

> -e ZOO_MY_ID=1 \

> -e "ZOO_SERVERS=server.1=192.168.192.128:2888:3888;2181 server.2=192.168.192.128:2889:3889;2182 server.3=192.168.192.128:2890:3890;2183" 3487af26dee9
4bfa6bbeb936037e178a577e5efbd06d4a963e91d67274413b933fd189917776
[root@localhost admin]# docker run -d -p 2182:2181 -p 2889:2888 -p 3889:3888 --name zookeeper_node2 --privileged --restart always \

> -v /usr/local/zookeeper-cluster/node2/volumes/data:/data \

> -v /usr/local/zookeeper-cluster/node2/volumes/datalog:/datalog \

> -v /usr/local/zookeeper-cluster/node2/volumes/logs:/logs \

> -e ZOO_MY_ID=2 \

> -e "ZOO_SERVERS=server.1=192.168.192.128:2888:3888;2181 server.2=192.168.192.128:2889:3889;2182 server.3=192.168.192.128:2890:3890;2183" 3487af26dee9
dbb7f1f323a09869d043152a4995e73bad5f615fd81bf11143fd1c28180f9869
[root@localhost admin]# docker run -d -p 2183:2181 -p 2890:2888 -p 3890:3888 --name zookeeper_node3 --privileged --restart always \

> -v /usr/local/zookeeper-cluster/node3/volumes/data:/data \

> -v /usr/local/zookeeper-cluster/node3/volumes/datalog:/datalog \

> -v /usr/local/zookeeper-cluster/node3/volumes/logs:/logs \

> -e ZOO_MY_ID=3 \

> -e "ZOO_SERVERS=server.1=192.168.192.128:2888:3888;2181 server.2=192.168.192.128:2889:3889;2182 server.3=192.168.192.128:2890:3890;2183" 3487af26dee9
6dabae1d92f0e861cc7515c014c293f80075c2762b254fc56312a6d3b450a919
[root@localhost admin]#

查看啟動的容器

[root@localhost admin]# docker ps
CONTAINER ID    IMAGE        COMMAND         CREATED       STATUS       PORTS                                       NAMES
6dabae1d92f0    3487af26dee9    "/docker-entrypoin..."  31 seconds ago   Up 29 seconds    8080/tcp, 0.0.0.0:2183->2181/tcp, 0.0.0.0:2890->2888/tcp, 0.0.0.0:3890->3888/tcp  zookeeper_node3
dbb7f1f323a0    3487af26dee9    "/docker-entrypoin..."  36 seconds ago   Up 35 seconds    8080/tcp, 0.0.0.0:2182->2181/tcp, 0.0.0.0:2889->2888/tcp, 0.0.0.0:3889->3888/tcp  zookeeper_node2
4bfa6bbeb936    3487af26dee9    "/docker-entrypoin..."  46 seconds ago   Up 45 seconds    0.0.0.0:2181->2181/tcp, 0.0.0.0:2888->2888/tcp, 0.0.0.0:3888->3888/tcp, 8080/tcp  zookeeper_node1
[root@localhost admin]# 

不是說有錯誤嗎?怎么還啟動成功了??我們來看下節點1的啟動日志

[root@localhost admin]# docker logs -f 4bfa6bbeb936
ZooKeeper JMX enabled by default

...

2019-08-29 09:20:22,665 [myid:1] - WARN [WorkerSender[myid=1]:QuorumCnxManager@677] - Cannot open channel to 2 at election address /192.168.192.128:3889
java.net.ConnectException: Connection refused (Connection refused)
  at java.net.PlainSocketImpl.socketConnect(Native Method)
  at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:350)
  at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:206)
  at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:188)
  at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
  at java.net.Socket.connect(Socket.java:589)
  at org.apache.zookeeper.server.quorum.QuorumCnxManager.connectOne(QuorumCnxManager.java:648)
  at org.apache.zookeeper.server.quorum.QuorumCnxManager.connectOne(QuorumCnxManager.java:705)
  at org.apache.zookeeper.server.quorum.QuorumCnxManager.toSend(QuorumCnxManager.java:618)
  at org.apache.zookeeper.server.quorum.FastLeaderElection$Messenger$WorkerSender.process(FastLeaderElection.java:477)
  at org.apache.zookeeper.server.quorum.FastLeaderElection$Messenger$WorkerSender.run(FastLeaderElection.java:456)
  at java.lang.Thread.run(Thread.java:748)
2019-08-29 09:20:22,666 [myid:1] - WARN [WorkerSender[myid=1]:QuorumCnxManager@677] - Cannot open channel to 3 at election address /192.168.192.128:3890
java.net.ConnectException: Connection refused (Connection refused)
  at java.net.PlainSocketImpl.socketConnect(Native Method)
  at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:350)
  at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:206)
  at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:188)
  at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
  at java.net.Socket.connect(Socket.java:589)
  at org.apache.zookeeper.server.quorum.QuorumCnxManager.connectOne(QuorumCnxManager.java:648)
  at org.apache.zookeeper.server.quorum.QuorumCnxManager.connectOne(QuorumCnxManager.java:705)
  at org.apache.zookeeper.server.quorum.QuorumCnxManager.toSend(QuorumCnxManager.java:618)
  at org.apache.zookeeper.server.quorum.FastLeaderElection$Messenger$WorkerSender.process(FastLeaderElection.java:477)
  at org.apache.zookeeper.server.quorum.FastLeaderElection$Messenger$WorkerSender.run(FastLeaderElection.java:456)
  at java.lang.Thread.run(Thread.java:748)

連接不上2 和 3,為什么呢,因為在默認的Docker網絡模式下,通過宿主機的IP+映射端口,根本找不到啊!他們有自己的IP啊!如下:

[root@localhost admin]# docker ps
CONTAINER ID    IMAGE        COMMAND         CREATED       STATUS       PORTS                                       NAMES
6dabae1d92f0    3487af26dee9    "/docker-entrypoin..."  5 minutes ago    Up 5 minutes    8080/tcp, 0.0.0.0:2183->2181/tcp, 0.0.0.0:2890->2888/tcp, 0.0.0.0:3890->3888/tcp  zookeeper_node3
dbb7f1f323a0    3487af26dee9    "/docker-entrypoin..."  6 minutes ago    Up 6 minutes    8080/tcp, 0.0.0.0:2182->2181/tcp, 0.0.0.0:2889->2888/tcp, 0.0.0.0:3889->3888/tcp  zookeeper_node2
4bfa6bbeb936    3487af26dee9    "/docker-entrypoin..."  6 minutes ago    Up 6 minutes    0.0.0.0:2181->2181/tcp, 0.0.0.0:2888->2888/tcp, 0.0.0.0:3888->3888/tcp, 8080/tcp  zookeeper_node1


[root@localhost admin]# docker inspect 4bfa6bbeb936
      "Networks": {
        "bridge": {
          "IPAMConfig": null,
          "Links": null,
          "Aliases": null,
          "NetworkID": "5fc1ce4362afe3d34fdf260ab0174c36fe4b7daf2189702eae48101a755079f3",
          "EndpointID": "368237e4c903cc663111f1fe33ac4626a9100fb5a22aec85f5eccbc6968a1631",
          "Gateway": "172.17.0.1",
          "IPAddress": "172.17.0.2",
          "IPPrefixLen": 16,
          "IPv6Gateway": "",
          "GlobalIPv6Address": "",
          "GlobalIPv6PrefixLen": 0,
          "MacAddress": "02:42:ac:11:00:02"
        }
      }
    }
  }
]

[root@localhost admin]# docker inspect dbb7f1f323a0
      "Networks": {
        "bridge": {
          "IPAMConfig": null,
          "Links": null,
          "Aliases": null,
          "NetworkID": "5fc1ce4362afe3d34fdf260ab0174c36fe4b7daf2189702eae48101a755079f3",
          "EndpointID": "8a9734044a566d5ddcd7cbbf6661abb2730742f7c73bd8733ede9ed8ef106659",
          "Gateway": "172.17.0.1",
          "IPAddress": "172.17.0.3",
          "IPPrefixLen": 16,
          "IPv6Gateway": "",
          "GlobalIPv6Address": "",
          "GlobalIPv6PrefixLen": 0,
          "MacAddress": "02:42:ac:11:00:03"
        }
      }
    }
  }
]

[root@localhost admin]# docker inspect 6dabae1d92f0
      "Networks": {
        "bridge": {
          "IPAMConfig": null,
          "Links": null,
          "Aliases": null,
          "NetworkID": "5fc1ce4362afe3d34fdf260ab0174c36fe4b7daf2189702eae48101a755079f3",
          "EndpointID": "b10329b9940a07aacb016d8d136511ec388de02bf3bd0e0b50f7f4cbb7f138ec",
          "Gateway": "172.17.0.1",
          "IPAddress": "172.17.0.4",
          "IPPrefixLen": 16,
          "IPv6Gateway": "",
          "GlobalIPv6Address": "",
          "GlobalIPv6PrefixLen": 0,
          "MacAddress": "02:42:ac:11:00:04"
        }
      }
    }
  }
]

node1---172.17.0.2
node2---172.17.0.3
node3---172.17.0.4

既然我們知道了它有自己的IP,那又出現另一個問題了,就是它的ip是動態的,啟動之前我們無法得知。有個解決辦法就是創建自己的bridge網絡,然后創建容器的時候指定ip。

【正確方式開始】

[root@localhost admin]# docker network create --driver bridge --subnet=172.18.0.0/16 --gateway=172.18.0.1 zoonet
8257c501652a214d27efdf5ef71ff38bfe222c3a2a7898be24b8df9db1fb3b13
[root@localhost admin]# docker network ls
NETWORK ID     NAME        DRIVER       SCOPE
5fc1ce4362af    bridge       bridge       local
6aa33e21444e    host        host        local
20e563b93ce9    none        null        local
8257c501652a    zoonet       bridge       local
[root@localhost admin]# docker network inspect 8257c501652a
[
  {
    "Name": "zoonet",
    "Id": "8257c501652a214d27efdf5ef71ff38bfe222c3a2a7898be24b8df9db1fb3b13",
    "Created": "2019-08-29T06:08:01.442601483-04:00",
    "Scope": "local",
    "Driver": "bridge",
    "EnableIPv6": false,
    "IPAM": {
      "Driver": "default",
      "Options": {},
      "Config": [
        {
          "Subnet": "172.18.0.0/16",
          "Gateway": "172.18.0.1"
        }
      ]
    },
    "Internal": false,
    "Attachable": false,
    "Containers": {},
    "Options": {},
    "Labels": {}
  }
]

然后我們修改一下zookeeper容器的創建命令。

docker run -d -p 2181:2181 --name zookeeper_node1 --privileged --restart always --network zoonet --ip 172.18.0.2 \

-v /usr/local/zookeeper-cluster/node1/volumes/data:/data \

-v /usr/local/zookeeper-cluster/node1/volumes/datalog:/datalog \

-v /usr/local/zookeeper-cluster/node1/volumes/logs:/logs \

-e ZOO_MY_ID=1 \

-e "ZOO_SERVERS=server.1=172.18.0.2:2888:3888;2181 server.2=172.18.0.3:2888:3888;2181 server.3=172.18.0.4:2888:3888;2181" 3487af26dee9

docker run -d -p 2182:2181 --name zookeeper_node2 --privileged --restart always --network zoonet --ip 172.18.0.3 \

-v /usr/local/zookeeper-cluster/node2/volumes/data:/data \

-v /usr/local/zookeeper-cluster/node2/volumes/datalog:/datalog \

-v /usr/local/zookeeper-cluster/node2/volumes/logs:/logs \

-e ZOO_MY_ID=2 \

-e "ZOO_SERVERS=server.1=172.18.0.2:2888:3888;2181 server.2=172.18.0.3:2888:3888;2181 server.3=172.18.0.4:2888:3888;2181" 3487af26dee9

docker run -d -p 2183:2181 --name zookeeper_node3 --privileged --restart always --network zoonet --ip 172.18.0.4 \

-v /usr/local/zookeeper-cluster/node3/volumes/data:/data \

-v /usr/local/zookeeper-cluster/node3/volumes/datalog:/datalog \

-v /usr/local/zookeeper-cluster/node3/volumes/logs:/logs \

-e ZOO_MY_ID=3 \

-e "ZOO_SERVERS=server.1=172.18.0.2:2888:3888;2181 server.2=172.18.0.3:2888:3888;2181 server.3=172.18.0.4:2888:3888;2181" 3487af26dee9

1. 由于2888 、3888不需要暴露,就不映射了;

2. 指定自己的網絡,并指定IP;

3. 每個容器之間環境是隔離的,所以容器內所用的端口一樣:2181/2888/3888

運行結果:

[root@localhost admin]# docker run -d -p 2181:2181 --name zookeeper_node1 --privileged --restart always --network zoonet --ip 172.18.0.2 \

> -v /usr/local/zookeeper-cluster/node1/volumes/data:/data \

> -v /usr/local/zookeeper-cluster/node1/volumes/datalog:/datalog \

> -v /usr/local/zookeeper-cluster/node1/volumes/logs:/logs \

> -e ZOO_MY_ID=1 \

> -e "ZOO_SERVERS=server.1=172.18.0.2:2888:3888;2181 server.2=172.18.0.3:2888:3888;2181 server.3=172.18.0.4:2888:3888;2181" 3487af26dee9
50c07cf11fab2d3b4da6d8ce48d8ed4a7beaab7d51dd542b8309f781e9920c36
[root@localhost admin]# docker run -d -p 2182:2181 --name zookeeper_node2 --privileged --restart always --network zoonet --ip 172.18.0.3 \

> -v /usr/local/zookeeper-cluster/node2/volumes/data:/data \

> -v /usr/local/zookeeper-cluster/node2/volumes/datalog:/datalog \

> -v /usr/local/zookeeper-cluster/node2/volumes/logs:/logs \

> -e ZOO_MY_ID=2 \

> -e "ZOO_SERVERS=server.1=172.18.0.2:2888:3888;2181 server.2=172.18.0.3:2888:3888;2181 server.3=172.18.0.4:2888:3888;2181" 3487af26dee9
649a4dbfb694504acfe4b8e11b990877964477bb41f8a230bd191cba7d20996f
[root@localhost admin]# docker run -d -p 2183:2181 --name zookeeper_node3 --privileged --restart always --network zoonet --ip 172.18.0.4 \

> -v /usr/local/zookeeper-cluster/node3/volumes/data:/data \

> -v /usr/local/zookeeper-cluster/node3/volumes/datalog:/datalog \

> -v /usr/local/zookeeper-cluster/node3/volumes/logs:/logs \

> -e ZOO_MY_ID=3 \

> -e "ZOO_SERVERS=server.1=172.18.0.2:2888:3888;2181 server.2=172.18.0.3:2888:3888;2181 server.3=172.18.0.4:2888:3888;2181" 3487af26dee9
c8bc1b9ae9adf86e9c7f6a3264f883206c6d0e4f6093db3200de80ef39f57160
[root@localhost admin]# docker ps
CONTAINER ID    IMAGE        COMMAND         CREATED       STATUS       PORTS                         NAMES
c8bc1b9ae9ad    3487af26dee9    "/docker-entrypoin..."  17 seconds ago   Up 16 seconds    2888/tcp, 3888/tcp, 8080/tcp, 0.0.0.0:2183->2181/tcp  zookeeper_node3
649a4dbfb694    3487af26dee9    "/docker-entrypoin..."  22 seconds ago   Up 21 seconds    2888/tcp, 3888/tcp, 8080/tcp, 0.0.0.0:2182->2181/tcp  zookeeper_node2
50c07cf11fab    3487af26dee9    "/docker-entrypoin..."  33 seconds ago   Up 32 seconds    2888/tcp, 3888/tcp, 0.0.0.0:2181->2181/tcp, 8080/tcp  zookeeper_node1
[root@localhost admin]#

進入容器內部驗證一下:

[root@localhost admin]# docker exec -it 50c07cf11fab bash
root@50c07cf11fab:/apache-zookeeper-3.5.5-bin# ./bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /conf/zoo.cfg
Client port found: 2181. Client address: localhost.
Mode: follower
root@50c07cf11fab:/apache-zookeeper-3.5.5-bin# exit
exit
[root@localhost admin]# docker exec -it 649a4dbfb694 bash
root@649a4dbfb694:/apache-zookeeper-3.5.5-bin# ./bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /conf/zoo.cfg
Client port found: 2181. Client address: localhost.
Mode: leader
root@649a4dbfb694:/apache-zookeeper-3.5.5-bin# exit
exit
[root@localhost admin]# docker exec -it c8bc1b9ae9ad bash
root@c8bc1b9ae9ad:/apache-zookeeper-3.5.5-bin# ./bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /conf/zoo.cfg
Client port found: 2181. Client address: localhost.
Mode: follower
root@c8bc1b9ae9ad:/apache-zookeeper-3.5.5-bin# exit
exit
[root@localhost admin]#

在驗證一下創建節點

開啟防火墻,以供外部訪問

firewall-cmd --zone=public --add-port=2181/tcp --permanent
firewall-cmd --zone=public --add-port=2182/tcp --permanent
firewall-cmd --zone=public --add-port=2183/tcp --permanent
systemctl restart firewalld
firewall-cmd --list-all

在本地,我用zookeeper的客戶端連接虛擬機上的集群:

可以看到連接成功!

集群安裝方式二:通過docker stack deploy或docker-compose安裝

這里用docker-compose。先安裝docker-compose

[root@localhost admin]# curl -L "https://github.com/docker/compose/releases/download/1.24.1/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
 % Total  % Received % Xferd Average Speed  Time  Time   Time Current
                 Dload Upload  Total  Spent  Left Speed
100  617  0  617  0   0  145   0 --:--:-- 0:00:04 --:--:--  145
100 15.4M 100 15.4M  0   0  131k   0 0:02:00 0:02:00 --:--:-- 136k
[root@localhost admin]# chmod +x /usr/local/bin/docker-compose

檢查版本(驗證是否安裝成功)

[root@localhost admin]# docker-compose --version
docker-compose version 1.24.1, build 4667896b

卸載的話

rm /usr/local/bin/docker-compose

開始配置,新建三個掛載目錄

[root@localhost admin]# mkdir /usr/local/zookeeper-cluster/node4
[root@localhost admin]# mkdir /usr/local/zookeeper-cluster/node5
[root@localhost admin]# mkdir /usr/local/zookeeper-cluster/node6

新建任意目錄,然后在里面新建一個文件

[root@localhost admin]# mkdir DockerComposeFolder
[root@localhost admin]# cd DockerComposeFolder/
[root@localhost DockerComposeFolder]# vim docker-compose.yml

文件內容如下:(自定義網絡見上面)

version: '3.1'

services:
 zoo1:
  image: zookeeper
  restart: always
  privileged: true
  hostname: zoo1
  ports:
   - 2181:2181
  volumes: # 掛載數據
   - /usr/local/zookeeper-cluster/node4/data:/data
   - /usr/local/zookeeper-cluster/node4/datalog:/datalog
  environment:
   ZOO_MY_ID: 4
   ZOO_SERVERS: server.4=0.0.0.0:2888:3888;2181 server.5=zoo2:2888:3888;2181 server.6=zoo3:2888:3888;2181
  networks:
   default:
    ipv4_address: 172.18.0.14

 zoo2:
  image: zookeeper
  restart: always
  privileged: true
  hostname: zoo2
  ports:
   - 2182:2181
  volumes: # 掛載數據
   - /usr/local/zookeeper-cluster/node5/data:/data
   - /usr/local/zookeeper-cluster/node5/datalog:/datalog
  environment:
   ZOO_MY_ID: 5
   ZOO_SERVERS: server.4=zoo1:2888:3888;2181 server.5=0.0.0.0:2888:3888;2181 server.6=zoo3:2888:3888;2181
  networks:
   default:
    ipv4_address: 172.18.0.15

 zoo3:
  image: zookeeper
  restart: always
  privileged: true
  hostname: zoo3
  ports:
   - 2183:2181
  volumes: # 掛載數據
   - /usr/local/zookeeper-cluster/node6/data:/data
   - /usr/local/zookeeper-cluster/node6/datalog:/datalog
  environment:
   ZOO_MY_ID: 6
   ZOO_SERVERS: server.4=zoo1:2888:3888;2181 server.5=zoo2:2888:3888;2181 server.6=0.0.0.0:2888:3888;2181
  networks:
   default:
    ipv4_address: 172.18.0.16

networks: # 自定義網絡
 default:
  external:
   name: zoonet

注意yaml文件里不能有tab,只能有空格。

關于version與Docker版本的關系如下:

然后執行(-d后臺啟動)

docker-compose -f docker-compose.yml up -d

查看已啟動的容器

[root@localhost DockerComposeFolder]# docker ps
CONTAINER ID    IMAGE        COMMAND         CREATED       STATUS       PORTS                         NAMES
a2c14814037d    zookeeper      "/docker-entrypoin..."  6 minutes ago    Up About a minute  2888/tcp, 3888/tcp, 8080/tcp, 0.0.0.0:2183->2181/tcp  dockercomposefolder_zoo3_1
50310229b216    zookeeper      "/docker-entrypoin..."  6 minutes ago    Up About a minute  2888/tcp, 3888/tcp, 0.0.0.0:2181->2181/tcp, 8080/tcp  dockercomposefolder_zoo1_1
475d8a9e2d08    zookeeper      "/docker-entrypoin..."  6 minutes ago    Up About a minute  2888/tcp, 3888/tcp, 8080/tcp, 0.0.0.0:2182->2181/tcp  dockercomposefolder_zoo2_1

進入一個容器

[root@localhost DockerComposeFolder]# docker exec -it a2c14814037d bash
root@zoo3:/apache-zookeeper-3.5.5-bin# ./bin/zkCli.sh
Connecting to localhost:2181

....

WatchedEvent state:SyncConnected type:None path:null
[zk: localhost:2181(CONNECTED) 0] 
[zk: localhost:2181(CONNECTED) 1] ls /
[zookeeper]
[zk: localhost:2181(CONNECTED) 2] create /hi
Created /hi
[zk: localhost:2181(CONNECTED) 3] ls /
[hi, zookeeper]

進入另一個容器

[root@localhost DockerComposeFolder]# docker exec -it 50310229b216 bash
root@zoo1:/apache-zookeeper-3.5.5-bin# ./bin/zkCli.sh
Connecting to localhost:2181

...

WatchedEvent state:SyncConnected type:None path:null

[zk: localhost:2181(CONNECTED) 0] ls /
[hi, zookeeper]

本地客戶端連接集群:

zkCli.cmd -server 192.168.192.128:2181,192.168.192.128:2182,192.168.192.128:2183

查看

停止所有活動容器

刪除所有已停止的容器

更多docker-compose的命令:

[root@localhost DockerComposeFolder]# docker-compose --help
Define and run multi-container applications with Docker.

Usage:
 docker-compose [-f <arg>...] [options] [COMMAND] [ARGS...]
 docker-compose -h|--help

Options:
 -f, --file FILE       Specify an alternate compose file
               (default: docker-compose.yml)
 -p, --project-name NAME   Specify an alternate project name
               (default: directory name)
 --verbose          Show more output
 --log-level LEVEL      Set log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
 --no-ansi          Do not print ANSI control characters
 -v, --version        Print version and exit
 -H, --host HOST       Daemon socket to connect to

 --tls            Use TLS; implied by --tlsverify
 --tlscacert CA_PATH     Trust certs signed only by this CA
 --tlscert CLIENT_CERT_PATH Path to TLS certificate file
 --tlskey TLS_KEY_PATH    Path to TLS key file
 --tlsverify         Use TLS and verify the remote
 --skip-hostname-check    Don't check the daemon's hostname against the
               name specified in the client certificate
 --project-directory PATH  Specify an alternate working directory
               (default: the path of the Compose file)
 --compatibility       If set, Compose will attempt to convert keys
               in v3 files to their non-Swarm equivalent

Commands:
 build       Build or rebuild services
 bundle       Generate a Docker bundle from the Compose file
 config       Validate and view the Compose file
 create       Create services
 down        Stop and remove containers, networks, images, and volumes
 events       Receive real time events from containers
 exec        Execute a command in a running container
 help        Get help on a command
 images       List images
 kill        Kill containers
 logs        View output from containers
 pause       Pause services
 port        Print the public port for a port binding
 ps         List containers
 pull        Pull service images
 push        Push service images
 restart      Restart services
 rm         Remove stopped containers
 run        Run a one-off command
 scale       Set number of containers for a service
 start       Start services
 stop        Stop services
 top        Display the running processes
 unpause      Unpause services
 up         Create and start containers
 version      Show the Docker-Compose version information

到此這篇關于Docker下安裝zookeeper(單機和集群)的文章就介紹到這了,更多相關Docker安裝zookeeper內容請搜索腳本之家以前的文章或繼續瀏覽下面的相關文章希望大家以后多多支持腳本之家!

標簽:克拉瑪依 棗莊 96 鹽城 日照 常州 東莞 渭南

巨人網絡通訊聲明:本文標題《Docker下安裝zookeeper(單機和集群)》,本文關鍵詞  Docker,下,安裝,zookeeper,單機,;如發現本文內容存在版權問題,煩請提供相關信息告之我們,我們將及時溝通與處理。本站內容系統采集于網絡,涉及言論、版權與本站無關。
  • 相關文章
  • 下面列出與本文章《Docker下安裝zookeeper(單機和集群)》相關的同類信息!
  • 本頁收集關于Docker下安裝zookeeper(單機和集群)的相關信息資訊供網民參考!
  • 推薦文章
    婷婷综合国产,91蜜桃婷婷狠狠久久综合9色 ,九九九九九精品,国产综合av
    717成人午夜免费福利电影| 国产91清纯白嫩初高中在线观看 | 精品福利av导航| 欧美日韩精品欧美日韩精品一综合| 成人黄色在线视频| 大白屁股一区二区视频| 成人晚上爱看视频| 成人一区二区三区视频| www.色精品| 91理论电影在线观看| 欧美性大战久久久久久久蜜臀 | 日韩av午夜在线观看| 日韩—二三区免费观看av| 日本大胆欧美人术艺术动态| 蜜桃一区二区三区在线| 国产麻豆成人传媒免费观看| 成人午夜电影小说| 欧美吻胸吃奶大尺度电影 | 欧美精品丝袜中出| 欧美精品v日韩精品v韩国精品v| 日韩午夜激情免费电影| 国产日韩精品一区| 亚洲男人都懂的| 日韩和欧美一区二区| 国内成人免费视频| 91免费视频观看| 6080国产精品一区二区| 国产三级欧美三级日产三级99| 中文乱码免费一区二区| 亚洲成人福利片| 国产一区二区在线看| 91高清在线观看| 精品国产乱码久久久久久久久| ...中文天堂在线一区| 免费欧美在线视频| 99久久综合99久久综合网站| 欧美一区二区三区人| 亚洲色图自拍偷拍美腿丝袜制服诱惑麻豆 | 亚洲国产精品激情在线观看| 一区二区在线观看免费视频播放| 日本色综合中文字幕| 99久久99久久免费精品蜜臀| 日韩一区二区视频在线观看| 亚洲视频免费在线观看| 国内精品久久久久影院色| 欧美三级韩国三级日本一级| 欧美国产日本视频| 免费av网站大全久久| 91精品91久久久中77777| 久久久久九九视频| 日本视频在线一区| 欧美日韩在线不卡| 亚洲黄色免费电影| 国产不卡一区视频| 欧美成人精品3d动漫h| 污片在线观看一区二区| 99re热这里只有精品视频| www激情久久| 日韩成人av影视| 91福利国产成人精品照片| |精品福利一区二区三区| 国产麻豆视频精品| 久久久久久久久久久黄色| 免费av成人在线| 欧美一区二区成人6969| 亚洲成av人片在线观看| 色婷婷综合久久久久中文一区二区| 国产日产欧美一区二区三区| 国产一区中文字幕| 欧美精品一区二区高清在线观看| 麻豆成人在线观看| 日韩亚洲欧美综合| 麻豆成人久久精品二区三区红| 欧美一级片在线| 欧美aaa在线| 欧美一区二区性放荡片| 美女高潮久久久| 91麻豆精品国产无毒不卡在线观看| 一区二区三区四区视频精品免费| k8久久久一区二区三区| 一区二区三区日本| 欧美老肥妇做.爰bbww视频| 婷婷国产在线综合| 欧美一区二区精品在线| 国产在线精品一区二区| 亚洲国产岛国毛片在线| 色激情天天射综合网| 亚洲国产一区在线观看| 欧美一区永久视频免费观看| 奇米777欧美一区二区| 精品少妇一区二区三区视频免付费| 国产综合久久久久久鬼色| 国产精品乱码人人做人人爱 | 欧美美女直播网站| 日本中文字幕一区| 国产视频视频一区| 欧美在线色视频| 精品一区二区三区的国产在线播放| 国产视频一区二区在线观看| 91九色最新地址| 美女爽到高潮91| 国产精品成人在线观看| 欧美人与禽zozo性伦| 国产一区二区三区香蕉| 一区二区三区四区在线播放| 日韩一级免费观看| 99久精品国产| 男人的j进女人的j一区| 国产精品沙发午睡系列990531| 91精品福利在线| 国产麻豆精品theporn| 亚洲综合久久av| 久久久久久一二三区| 欧洲精品视频在线观看| 狠狠色综合播放一区二区| 亚洲一区在线视频观看| 国产亚洲欧美日韩在线一区| 欧美亚男人的天堂| 国产黄色精品视频| 日韩极品在线观看| 亚洲综合男人的天堂| 国产精品人妖ts系列视频| 91精品啪在线观看国产60岁| 91免费观看视频| 精品伊人久久久久7777人| 亚洲综合激情小说| 中文字幕乱码亚洲精品一区| 日韩一级片网址| 欧美日韩另类一区| 91福利社在线观看| a4yy欧美一区二区三区| 高清国产一区二区| 精品中文字幕一区二区小辣椒| 亚洲成人福利片| 一区二区三区四区不卡视频| 国产精品久久毛片| 国产校园另类小说区| 精品日韩99亚洲| 精品嫩草影院久久| 日韩一区二区精品| 欧美精品久久99久久在免费线| 99国产精品久久久久| 成人永久免费视频| gogo大胆日本视频一区| 高清av一区二区| 国产福利一区二区| 国产成人超碰人人澡人人澡| 国产真实乱对白精彩久久| 狠狠色丁香婷综合久久| 久久99国产精品久久99| 奇米色一区二区三区四区| 久久精品国产99| 国产中文字幕精品| 国产裸体歌舞团一区二区| 岛国精品在线播放| av不卡在线观看| 色哟哟国产精品免费观看| 91久久免费观看| 欧美性猛片aaaaaaa做受| 精品视频在线免费观看| 精品视频在线免费| 欧美喷水一区二区| 5858s免费视频成人| 日韩美一区二区三区| 久久久久久久综合日本| 中文字幕一区二区三区蜜月 | 日韩一区二区在线播放| 日韩欧美一级在线播放| 久久久久久久精| 中文字幕一区视频| 天天综合色天天综合色h| 九九视频精品免费| 99久久免费视频.com| 欧美性生活大片视频| 欧美xfplay| 亚洲日本在线看| 蜜臀av在线播放一区二区三区| 国产精品自产自拍| 色婷婷狠狠综合| 欧美大片免费久久精品三p| 日本一区二区久久| 亚洲成人av电影在线| 激情深爱一区二区| 欧美综合亚洲图片综合区| 欧美va在线播放| 一区二区三区日韩精品| 国产一区二区免费看| 欧美性猛片aaaaaaa做受| 久久精品亚洲一区二区三区浴池| 亚洲一级二级三级| 国产黑丝在线一区二区三区| 欧美精品第1页| 亚洲欧美一区二区在线观看| 蜜桃在线一区二区三区| 日本伦理一区二区| 国产丝袜美腿一区二区三区| 日本不卡一二三区黄网| 日本电影亚洲天堂一区| 久久久精品国产免大香伊| 亚洲va中文字幕|