设置系统主机名以及 Host 文件的相互解析
hostnamectl set-hostname k8s-master01
hostnamectl set-hostname k8s-master02
hostnamectl set-hostname k8s-master03
vi /etc/hosts
10.10.21.8 k8s-master01
10.10.21.28 k8s-master02
10.10.21.38 k8s-master03
10.10.21.100 k8s-vip
安装依赖包
yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget net-tools git
设置防火墙为 Iptables 并设置空规则
systemctl stop firewalld && systemctl disable firewalld
yum -y install iptables-services && systemctl start iptables && systemctl enable iptables&& iptables -F && service iptables save
关闭 SELINUX
swapoff -a && sed -i '/ swap / s/^(.)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.
/SELINUX=disabled/' /etc/selinux/config
调整内核参数,对于 K8S
[root@k8s-master ~]# pwd
/root
[root@k8s-master ~]# vi kubernetes.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1 # 上面两条的作用是开启网桥模式,这两步是必须的
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1 # 关闭ipv6,这步也是必须的
net.netfilter.nf_conntrack_max=2310720
使开机时能调用
[root@k8s-master ~]# cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
[root@k8s-master ~]# sysctl -p /etc/sysctl.d/kubernetes.conf # 手动刷新
调整系统时区
每个节点都需要执行,根据自己环境的需求来修改,如果已经是CST的时区,就可以跳过这步
#设置系统时区为 中国/上海
[root@k8s-master ~]# timedatectl set-timezone Asia/Shanghai # 将当前的 UTC 时间写入硬件时钟
[root@k8s-master ~]# timedatectl set-local-rtc 0
#重启依赖于系统时间的服务
[root@k8s-master ~]# systemctl restart rsyslog
[root@k8s-master ~]# systemctl restart crond
关闭系统不需要的服务
每个节点都需要执行,这是关闭邮件服务
[root@k8s-master ~]# systemctl stop postfix && systemctl disable postfix
设置 rsyslogd 和 systemd journald
每个节点都需要执行,因为centos7的引导方式改为了systemd,所以在centos7中就有两个日志系统,这里我们配置使用systemd journald
[root@k8s-master ~]# mkdir /var/log/journal # 持久化保存日志的目录
[root@k8s-master ~]# mkdir /etc/systemd/journald.conf.d
[root@k8s-master ~]# vi /etc/systemd/journald.conf.d/99-prophet.conf
[Journal]
持久化保存到磁盘
Storage=persistent
压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
最大占用空间10G
SystemMaxUse=10G
#单日志文件最大200M
SystemMaxFileSize=200M
#日志保存时间 2 周
MaxRetentionSec=2week
#不将日志转发到syslog
ForwardToSyslog=no
[root@k8s-master ~]# systemctl restart systemd-journald
升级内核为4.4版本
CentOS 7.x 系统自带的 3.10.x 内核存在一些 Bugs,导致运行的 Docker、Kubernetes 不稳定,例如: rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
[root@k8s-master ~]# rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
#安装完成后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 initrd16 配置,如果没有,再安装 一次!
[root@k8s-master ~]# yum --enablerepo=elrepo-kernel install -y kernel-lt
#设置开机从新内核启动
[root@k8s-master ~]# grub2-set-default 'CentOS Linux (4.4.189-1.el7.elrepo.x86_64) 7 (Core)'
#重启后安装内核源文件
关闭 NUMA
[root@k8s-master ~]# cp /etc/default/grub{,.bak}
[root@k8s-master ~]# vi /etc/default/grub
#在 GRUB_CMDLINE_LINUX 一行添加 numa=off
参数,如下所示:
GRUB_CMDLINE_LINUX="crashkernel=auto rd.lvm.lv=centos/root rhgb quiet numa=off"
[root@k8s-master ~]# cp /boot/grub2/grub.cfg{,.bak}
[root@k8s-master ~]# grub2-mkconfig -o /boot/grub2/grub.cfg
[root@k8s-master ~]# reboot
kube-proxy开启ipvs的前置条件(所有节点都需要)
[root@k8s-master ~]# modprobe br_netfilter
[root@k8s-master ~]# vi /etc/sysconfig/modules/ipvs.modules
#!/bin/bashmodprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
[root@k8s-master ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
nf_conntrack_ipv4 20480 0
nf_defrag_ipv4 16384 1 nf_conntrack_ipv4
ip_vs_sh 16384 0
ip_vs_wrr 16384 0
ip_vs_rr 16384 0
ip_vs 147456 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 114688 2 ip_vs,nf_conntrack_ipv4
libcrc32c 16384 2 xfs,ip_vs
安装 Docker 软件(所有节点都需要)
[root@k8s-master ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@k8s-master ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
这里不yum update也能安装docker-ce(多个节点一起yum update -y会被当成恶意***,可以换个时间再update)
[root@k8s-master ~]# yum update -y && yum install -y docker-ce
[root@k8s-master ~]# reboot
##创建 /etc/docker 目录
[root@k8s-master ~]# mkdir /etc/docker
配置 daemon.
vi /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
}
}
[root@k8s-master ~]# mkdir -p /etc/systemd/system/docker.service.d
#重启docker服务
[root@k8s-master ~]# systemctl daemon-reload && systemctl restart docker && systemctl enable docker
在主节点启动 Haproxy 与 Keepalived 容器
导入脚本 > 运行 > 查看可用节点
[root@k8s-master ~]# mkdir -p /usr/local/kubernetes/install
将所需文件传入/usr/local/kubernetes/install
[root@k8s-master ~]# cd /usr/local/kubernetes/install
[root@k8s-master install]# ls
haproxy.tar keepalived.tar kubeadm-basic.images.tar.gz load-images.sh start.keep.tar.gz
[root@k8s-master install]# docker load -i haproxy.tar
[root@k8s-master install]# docker load -i keepalived.tar
[root@k8s-master install]# tar -zxvf kubeadm-basic.images.tar.gz
[root@k8s-master install]# vi load-images.sh
#!/bin/bash
cd /usr/local/kubernetes/install/kubeadm-basic.images
ls /usr/local/kubernetes/install/kubeadm-basic.images | grep -v load-images.sh > /tmp/k8s-images.txt
for i in $( cat /tmp/k8s-images.txt )
do
docker load -i $i
done
rm -rf /tmp/k8s-images.txt
[root@k8s-master install]# chmod +x load-images.sh
[root@k8s-master install]# ./load-images.sh
[root@k8s-master install]# tar -zxvf start.keep.tar.gz
[root@k8s-master install]# mv data/ /
[root@k8s-master install]# cd /data
[root@k8s-master data]# cd lb
[root@k8s-master lb]# ls
etc kubeadm-config.yaml start-haproxy.sh start-keepalived.sh
[root@k8s-master lb]# vi etc/haproxy.cfg
server rancher01 10.10.21.8:6443
(这里需要一个个节点配置,否则可能由于节点没启动出错)
[root@k8s-master lb]# vi start-haproxy.sh
MasterIP1=10.10.21.8
MasterIP2=10.10.21.28
MasterIP3=10.10.21.38
[root@k8s-master lb]# ./start-haproxy.sh
aee0cf634eadad7b73c58f7c56e2bf6bc62d4cb489f3c156bb8c0650910d58f6
[root@k8s-master lb]# netstat -naltp | grep 6444
tcp6 0 0 :::6444 :::* LISTEN 2340/docker-proxy
[root@k8s-master lb]# vi start-keepalived.sh
#!/bin/bash
VIRTUAL_IP=10.10.21.100
INTERFACE=eth0
[root@k8s-master lb]# ./start-keepalived.sh
a280dbbb7bd9d4e0b724e111d1dff308880e6931f84a28eaf3015ff1b42fc25d
[root@k8s-master lb]# ip add
eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:50:56:92:70:dd brd ff:ff:ff:ff:ff:ff
inet 10.10.21.28/24 brd 10.10.21.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet 10.10.21.100/24 scope global secondary eth0
valid_lft forever preferred_lft forever
安装 Kubeadm (主从配置)(所有节点都需要)
[root@k8s-master lb]# vi /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
[root@k8s-master lb]# yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
[root@k8s-master lb]# systemctl enable kubelet.service
初始化主节点(上面的设置都是所有节点都需要,以下设置是针对主节点)
[root@k8s-master lb]# cd /usr/local/kubernetes/install/
[root@k8s-master install]# kubeadm config print init-defaults > kubeadm-config.yaml
[root@k8s-master install]# vi kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages: - signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 10.10.21.8
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-master01
taints: -
effect: NoSchedule
key: node-role.kubernetes.io/masterapiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "10.10.21.100:6444"
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.15.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
scheduler: {}
- system:bootstrappers:kubeadm:default-node-token
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
[root@k8s-master install]# kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join 10.10.21.100:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:5e75bb6da2837ca318cc79fdb74e149a5ac185005f89cca31deba5e5fb962df6 \
--control-plane --certificate-key f9a01415dfb5909b920f2a853b1161e3f05cc9a992c922a94a21398bf22c60d3
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.10.21.100:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:5e75bb6da2837ca318cc79fdb74e149a5ac185005f89cca31deba5e5fb962df6
[root@k8s-master install]# mkdir -p $HOME/.kube
[root@k8s-master install]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master install]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-master01 install]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 NotReady master 2m44s v1.15.1
接下来在其它master节点上操作启动 Haproxy 与 Keepalived 容器
[root@k8s-master lb]# vi /data/lb/etc/haproxy.cfg (先设置一个节点)
server rancher01 10.10.21.8:6443
[root@k8s-master lb]# vi start-haproxy.sh
MasterIP1=10.10.21.8
MasterIP2=10.10.21.28
MasterIP3=10.10.21.38
[root@k8s-master lb]# cd /data/lb/
[root@k8s-master lb]# ./start-haproxy.sh
aee0cf634eadad7b73c58f7c56e2bf6bc62d4cb489f3c156bb8c0650910d58f6
[root@k8s-master lb]# netstat -naltp | grep 6444
tcp6 0 0 :::6444 :::* LISTEN 2340/docker-proxy
[root@k8s-master lb]# vi start-keepalived.sh
#!/bin/bash
VIRTUAL_IP=10.10.21.100
INTERFACE=eth0
[root@k8s-master lb]# ./start-keepalived.sh
a280dbbb7bd9d4e0b724e111d1dff308880e6931f84a28eaf3015ff1b42fc25d
[root@k8s-master lb]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
4c4551e52852 wise2c/keepalived-k8s "/usr/bin/keepalived…" 35 seconds ago Up 33 seconds Keepalived-K8S
910875d10340 wise2c/haproxy-k8s "/docker-entrypoint.…" 28 minutes ago Up 28 minutes 0.0.0.0:6444->6444/tcp HAProxy-K8S
用上面的初始化主节点得到的结果把其它master节点加入集群
[root@k8s-master lb]# kubeadm join 10.10.21.100:6444 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:5e75bb6da2837ca318cc79fdb74e149a5ac185005f89cca31deba5e5fb962df6 \
--control-plane --certificate-key f9a01415dfb5909b920f2a853b1161e3f05cc9a992c922a94a21398bf22c60d3
报错:
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
error execution phase preflight: unable to fetch the kubeadm-config ConfigMap: failed to get config map: Unauthorized
注意:kubeadm init生成的token有效期只有1天,如果你的node节点在使用kubeadm join时出现如上错误
请到master上检查你所使用的token是否有效,kubeadm token list
[root@k8s-master01 install]# kubeadm token list
TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS
abcdef.0123456789abcdef <invalid> 2020-12-19T18:46:54+08:00 authentication,signing <none> system:bootstrappers:kubeadm:default-node-token
rf5y2f.bdtdm9ojmr86lyhp <invalid> 2020-12-18T20:46:54+08:00 <none> Proxy for managing TTL for the kubeadm-certs secret <none>
生成不过期的token
[root@k8s-master01 install]# kubeadm token create --ttl 0 --print-join-command
kubeadm join 10.10.21.100:6444 --token 86lgxf.0xifzlgrxxj7ta6d --discovery-token-ca-cert-hash sha256:5e75bb6da2837ca318cc79fdb74e149a5ac185005f89cca31deba5e5fb962df6
再次把master节点加入集群
[root@k8s-master lb]# kubeadm join 10.10.21.100:6444 --token 86lgxf.0xifzlgrxxj7ta6d \
--discovery-token-ca-cert-hash sha256:5e75bb6da2837ca318cc79fdb74e149a5ac185005f89cca31deba5e5fb962df6 \
--control-plane --certificate-key f9a01415dfb5909b920f2a853b1161e3f05cc9a992c922a94a21398bf22c60d3
成功
This node has joined the cluster and a new control plane instance was created:
- Certificate signing request was sent to apiserver and approval was received.
- The Kubelet was informed of the new secure connection details.
- Control plane (master) label and taint were applied to the new node.
- The Kubernetes control plane instances scaled up.
- A new etcd member was added to the local/stacked etcd cluster.
To start administering your cluster from this node, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Run 'kubectl get nodes' to see this node join the cluster.
[root@k8s-master02 lb]# mkdir -p $HOME/.kube
[root@k8s-master02 lb]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master02 lb]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-master02 ~]# cat .kube/config
查看 server: https://10.10.21.100:6444
所有节点修改haproxy.cfg重启容器
[root@k8s-master lb]# vi /data/lb/etc/haproxy.cfg
server rancher01 10.10.21.8:6443
server rancher02 10.10.21.28:6443
server rancher03 10.10.21.38:6443
[root@k8s-master01 install]# docker ps -a | grep haproxy
84c2c97f1100 wise2c/haproxy-k8s "/docker-entrypoint.…" 2 days ago Up 2 days 0.0.0.0:6444->6444/tcp HAProxy-K8S
[root@k8s-master01 install]# docker rm -f HAProxy-K8S && /data/lb/start-haproxy.sh
HAProxy-K8S
411fb977585726ff43f14af910f91db128e5a466eb32082636dbe3a64060864f
[root@k8s-master01 install]# docker ps -a | grep haproxy
411fb9775857 wise2c/haproxy-k8s "/docker-entrypoint.…" 41 seconds ago Up 39 seconds 0.0.0.0:6444->6444/tcp HAProxy-K8S
在主节点查看操作:
[root@k8s-master01 install]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 NotReady master 2d18h v1.15.1
k8s-master02 NotReady master 36m v1.15.1
k8s-master03 NotReady master 29m v1.15.1
[root@k8s-master01 install]# pwd
/usr/local/kubernetes/install
主节点部署网络
[root@k8s-master01 install]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
[root@k8s-master01 install]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-5c98db65d4-8zmw7 1/1 Running 0 2d18h
coredns-5c98db65d4-gwjrx 1/1 Running 0 2d18h
etcd-k8s-master01 1/1 Running 0 2d18h
etcd-k8s-master02 1/1 Running 0 61m
etcd-k8s-master03 1/1 Running 0 25m
kube-apiserver-k8s-master01 1/1 Running 0 2d18h
kube-apiserver-k8s-master02 1/1 Running 0 61m
kube-apiserver-k8s-master03 1/1 Running 0 25m
kube-controller-manager-k8s-master01 1/1 Running 1 2d18h
kube-controller-manager-k8s-master02 1/1 Running 0 61m
kube-controller-manager-k8s-master03 1/1 Running 0 25m
kube-flannel-ds-6fbjn 1/1 Running 0 72s
kube-flannel-ds-tdv9w 1/1 Running 0 72s
kube-flannel-ds-zqj7x 1/1 Running 0 72s
kube-proxy-hj8qm 1/1 Running 0 2d18h
kube-proxy-k2p4m 1/1 Running 0 54m
kube-proxy-txqkl 1/1 Running 0 61m
kube-scheduler-k8s-master01 1/1 Running 1 2d18h
kube-scheduler-k8s-master02 1/1 Running 0 61m
kube-scheduler-k8s-master03 1/1 Running 0 25m
[root@k8s-master01 install]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready master 2d18h v1.15.1
k8s-master02 Ready master 61m v1.15.1
k8s-master03 Ready master 54m v1.15.1
关闭主节点
[root@k8s-master01 install]# shutdown -h now
在其它节点操作
[root@k8s-master02 ~]# kubectl get node
Unable to connect to the server: net/http: TLS handshake timeout
[root@k8s-master02 ~]# pwd
/root
[root@k8s-master02 ~]# vi .kube/config (修改如下本地节点IP)
server: https://10.10.21.28:6444
[root@k8s-master02 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 NotReady master 2d19h v1.15.1
k8s-master02 Ready master 84m v1.15.1
k8s-master03 Ready master 77m v1.15.1
[root@k8s-master03 ~]# vi .kube/config (修改如下本地节点IP)
server: https://10.10.21.38:6444
启动主节点
Etcd 集群状态查看
[root@k8s-master01 ~]# kubectl get endpoints kube-controller-manager --namespace=kube-system -o yaml
apiVersion: v1
kind: Endpoints
metadata:
annotations:
control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-master03_55d9202f-0962-4efb-a512-f21959001268","leaseDurationSeconds":15,"acquireTime":"2020-12-21T05:27:47Z","renewTime":"2020-12-21T05:55:47Z","leaderTransitions":2}'
creationTimestamp: "2020-12-18T10:46:52Z"
name: kube-controller-manager
namespace: kube-system
resourceVersion: "298045"
selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager
uid: 0dbbd8ad-8fda-4d24-8e16-19e772030559
[root@k8s-master01 ~]# kubectl get endpoints kube-scheduler --namespace=kube-system -o yaml
apiVersion: v1
kind: Endpoints
metadata:
annotations:
control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-master02_4567a03f-a1a3-4c16-aa47-e7efe6553d26","leaseDurationSeconds":15,"acquireTime":"2020-12-21T05:27:46Z","renewTime":"2020-12-21T05:56:05Z","leaderTransitions":2}'
creationTimestamp: "2020-12-18T10:46:53Z"
name: kube-scheduler
namespace: kube-system
resourceVersion: "298073"
selfLink: /api/v1/namespaces/kube-system/endpoints/kube-scheduler
uid: 2bcc5145-baea-40bf-b3e9-0e7f057c7fd8
[root@k8s-master01 ~]# kubectl -n kube-system exec etcd-k8s-master01 -- etcdctl --endpoints=https://10.10.21.8:2379 --ca-file=/etc/kubernetes/pki/etcd/ca.crt --cert-file=/etc/kubernetes/pki/etcd/server.crt --key-file=/etc/kubernetes/pki/etcd/server.key cluster-health
member 1f716c5cc789f0ad is healthy: got healthy result from https://10.10.21.28:2379
member 40869e813511be0d is healthy: got healthy result from https://10.10.21.38:2379
member 7557ff4ca558021b is healthy: got healthy result from https://10.10.21.8:2379
cluster is healthy
加入主节点以及其余工作节点(执行安装日志中的加入命令即可)
[root@k8s-master01 ~]# cat /usr/local/kubernetes/install/kubeadm-init.log
来源:oschina
链接:https://my.oschina.net/u/4383081/blog/4837257