[root@dockertest ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
7846aed7560e redis:alpine "docker-entrypoint.s…" 3 minutes ago Up 2 minutes 6379/tcp test_redis_1
f7c8c051d057 test_web "python app.py" 3 minutes ago Up 2 minutes 0.0.0.0:5000->5000/tcp test_web_1
[root@dockertest ~]# docker-machine create -d generic \
> --generic-ip-address=192.168.10.128 \
> --generic-ssh-user=root \
> --generic-ssh-key ~/.ssh/id_rsa \
> machine1
Running pre-create checks...
Creating machine...
(machine1) Importing SSH key...
Waiting for machine to be running, this may take a few minutes...
Detecting operating system of created instance...
Waiting for SSH to be available...
Detecting the provisioner...
Provisioning with centos...
Copying certs to the local machine directory...
Copying certs to the remote machine...
Setting Docker configuration on the remote daemon...
Checking connection to Docker...
Docker is up and running!
To see how to connect your Docker Client to the Docker Engine running on this virtual machine, run: docker-machine env machine1
[root@dockertest ~]# docker-machine ls
NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS
machine1 - generic Running tcp://192.168.10.128:2376 v18.05.0-ce
③查看machine1的信息
[root@dockertest ~]# docker-machine env machine1
export DOCKER_TLS_VERIFY="1"
export DOCKER_HOST="tcp://192.168.10.128:2376"
export DOCKER_CERT_PATH="/root/.docker/machine/machines/machine1"
export DOCKER_MACHINE_NAME="machine1"
# Run this command to configure your shell:
# eval $(docker-machine env machine1)
[root@dockertest ~]# docker swarm join-token worker
To add a worker to this swarm, run the following command:
docker swarm join --token SWMTKN-1-67pzwc4zd5j7ld6aj998g6wm4r7rti8yrgbdbkminni6db40m5-9dzc38iwznkoun7j9pffosl5r 192.168.10.131:2377
[root@dockertest ~]# docker-machine ssh machine1
Last login: Thu Jun 28 03:17:25 2018 from 192.168.10.131
[root@machine1 ~]# docker swarm join --token SWMTKN-1-67pzwc4zd5j7ld6aj998g6wm4r7rti8yrgbdbkminni6db40m5-9dzc38iwznkoun7j9pffosl5r 192.168.10.131:2377This node joined a swarm as a worker.
[root@machine1 ~]# exit
logout
exit status 1
[root@dockertest ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
uapifqcukdehpmxxxamoq0i4a * dockertest Ready Active Leader
8q9knaeijcb4k4ouznkksnh17 machine1 Ready Active
③获取其他manager要加入swarm集群时的token,并添加manager节点
[root@dockertest ~]# docker swarm join-token manager
To add a manager to this swarm, run the following command:
docker swarm join --token SWMTKN-1-67pzwc4zd5j7ld6aj998g6wm4r7rti8yrgbdbkminni6db40m5-a1aecwco4cqt48naq68d9e9yp 192.168.10.131:2377
[root@dockertest ~]# docker-machine ssh machine2
Last login: Thu Jun 28 03:18:14 2018 from 192.168.10.131
[root@machine2 ~]# docker swarm join --token SWMTKN-1-67pzwc4zd5j7ld6aj998g6wm4r7rti8yrgbdbkminni6db40m5-a1aecwco4cqt48naq68d9e9yp 192.168.10.131:2377This node joined a swarm as a manager.
[root@machine2 ~]# exit
logout
[root@dockertest ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
uapifqcukdehpmxxxamoq0i4a * dockertest Ready Active Leader
8q9knaeijcb4k4ouznkksnh17 machine1 Ready Active
uequcd364emrwk26dchkhuj5y machine2 Ready Active Reachable
④worker节点移除
[root@dockertest ~]# docker node rm machine1
Error response from daemon: rpc error: code = FailedPrecondition desc = node 8q9knaeijcb4k4ouznkksnh17 is not down and can't be removed
[root@dockertest ~]# docker node rm --force machine1
machine1
[root@dockertest ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
uapifqcukdehpmxxxamoq0i4a * dockertest Ready Active Leader
uequcd364emrwk26dchkhuj5y machine2 Ready Active Reachable
⑤manager节点移除
[root@dockertest ~]# docker node demote machine2
Manager machine2 demoted in the swarm.
[root@dockertest ~]# docker node rm --force machine2
machine2
[root@dockertest ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
uapifqcukdehpmxxxamoq0i4a * dockertest Ready Active Leader
3.部署服务
①新建服务
[root@dockertest ~]# docker service create --replicas 3 -p 80:80 --name nginx nginx
image nginx:latest could not be accessed on a registry to record
its digest. Each node will access nginx:latest independently,
possibly leading to different nodes running different
versions of the image.
592966n2mvgnzr1ti0v5p915h
overall progress: 3 out of 3 tasks
1/3: running [==================================================>]
2/3: running [==================================================>]
3/3: running [==================================================>]
verify: Service converged
②查看服务
[root@dockertest ~]# docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
592966n2mvgn nginx replicated 3/3 nginx:latest *:80->80/tcp
[root@dockertest ~]# docker service ps nginx
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
20s9dpyppxjc nginx.1 nginx:latest dockertest Running Running 3 minutes ago
fhnpdagl7eat nginx.2 nginx:latest machine1 Running Running 7 minutes ago
v6tl3x9p6rt2 nginx.3 nginx:latest machine2 Running Running 8 minutes ago
③移除服务
[root@dockertest ~]# docker service rm nginx
④修改实例数
[root@dockertest ~]# docker service scale nginx=2
nginx scaled to 2
overall progress: 2 out of 2 tasks
1/2: running [==================================================>]
2/2: running [==================================================>]
verify: Service converged
[root@dockertest ~]# docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
p3mnb7lzvz0r nginx replicated 2/2 nginx:latest *:80->80/tcp