Ansible创建K8S集群kube

*爱你&永不变心* 提交于 2020-10-04 03:18:40

服务器信息

类型 服务器IP地址 备注
Ansible(2台) 172.24.78.21/22 K8S集群部署服务器,可以和在一起,需要配置在负载均衡上实现反向代理,dashboard的端口为8443
K8S Master(2台) 172.24.78.21/22 K8s控制端,通过一个VIP做主备高可用
Harbor(2台) 172.24.78.23/24 高可用镜像服务器
Etcd(最少3台) 172.24.78.25/26/27 保存k8s集群数据的服务器
Hproxy(2台) 172.24.78.28/29 高可用etcd代理服务器
Node节点(2-N台) 172.24.78.31/32/xxx 真正运行容器的服务器,高可用环境至少两台

主机信息

序号 类型 服务器IP 主机名 VIP
1 K8S Master1 172.24.78.21 master1.his.net 172.24.78.18
2 K8S Master2 172.24.78.22 master2.his.net 172.24.78.18
3 Harbor1 172.24.78.23 harbor1.his.net
4 Harbor2 172.24.78.24 harbor2.his.net
5 etcd节点1 172.24.78.25 etcd1.his.net
6 etcd节点2 172.24.78.26 etcd2.his.net
7 etcd节点3 172.24.78.27 etcd3.his.net
8 Haproxy1 172.24.78.28 ha1.his.net
9 Haproxy2 172.24.78.29 ha2.his.net
10 Node节点1 172.24.78.12 node1.his.net
3 Node节点2 172.24.78.31 node2.his.net

软件信息

端口:192.168.7.248:6443 #需要配置在负载均衡上实现反向代理,dashboard的端口为8443
操作系统:ubuntu server 1804
k8s版本: 1.13.5
calico:3.4.4


修改主机名及IP地址

vim /etc/netplan/50-cloud-init.yaml
            dhcp4: no
            dhcp6: no
            addresses: [172.24.78.25/25]
            gateway4: 172.24.78.1
            nameservers:
                    addresses: [34.34.34.34,202.96.134.133]
hostnamectl set-hostname master1.his.net...

添加hosts映射

172.24.78.21  master1.his.net
172.24.78.22  master2.his.net
172.24.78.23  harbor1.his.net
172.24.78.24  harbor2.his.net
172.24.78.25  etcd1.his.net
172.24.78.26  etcd2.his.net
172.24.78.27  etcd3.his.net
172.24.78.28  ha1.his.net
172.24.78.29  ha2.his.net
172.24.78.12  node1.his.net
172.24.78.31  node2.his.net

  • 手动二进制部署K8S

  • ansible部署

apt-get update && apt-get install -y apt-transport-https
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
apt-get update
apt-get upgrade
sudo add-apt-repository main
sudo add-apt-repository universe
sudo add-apt-repository restricted
sudo add-apt-repository multiverse
sudo apt-get update
apt-get install git apt-get install python2.7
ln -s /usr/bin/python2.7 /usr/bin/python

安装依赖软件

sudo apt-get install -y git
sudo apt-get install -y python2.7
ln -s /usr/bin/python2.7 /usr/bin/python
apt-get install git python-pip ansible -y
  • ssh同步公钥

apt-get install sshpass
ssh-keygen
回车3次

修改自动缩进

vim ~/.vimrc
set paste

修改hosts映射或在DNS中配置

vim /etc/hosts
172.24.78.23 harbor.his.net
172.24.78.24 harbor.his.net
172.24.78.21  master1.his.net
172.24.78.22  master2.his.net
172.24.78.23  harbor1.his.net
172.24.78.24  harbor2.his.net
172.24.78.25  etcd1.his.net
172.24.78.26  etcd2.his.net
172.24.78.27  etcd3.his.net
172.24.78.28  ha1.his.net
172.24.78.29  ha2.his.net
172.24.78.30  node1.his.net
172.24.78.31  node2.his.net

分发公钥脚本(master-12)

vim scp.sh
:set paste
#!/bin/bash
#目标主机列表
IP="
172.24.78.21
172.24.78.22
172.24.78.23
172.24.78.24
172.24.78.25
172.24.78.26
172.24.78.27
172.24.78.28
172.24.78.29
172.24.78.30
172.24.78.31
" 
for node in ${IP};do
    sshpass -p silence ssh-copy-id ${node} -o StrictHostKeyChecking=no
    if [ $? -eq 0 ];then
        echo "${node} 秘钥copy完成"
    else
        echo "${node} 秘钥copy失败"
    fi
done

#同步docker证书脚本

vim cer.sh
#!/bin/bash
#目标主机列表
IP="
172.24.78.21
172.24.78.22
172.24.78.23
172.24.78.24
172.24.78.25
172.24.78.26
172.24.78.27
172.24.78.28
172.24.78.29
172.24.78.30
172.24.78.31
" 
for node in ${IP};do
    sshpass -p silence ssh-copy-id ${node} -o StrictHostKeyChecking=no
    if [ $? -eq 0 ];then
        echo "${node} 秘钥copy完成"
        echo "${node} 秘钥copy完成,准备环境初始化....."
        ssh ${node} "mkdir /etc/docker/certs.d/harbor.his.net -p"
        echo "Harbor 证书目录创建成功!"
        scp /etc/docker/certs.d/harbor.his.net/harborca.crt ${node}:/etc/docker/certs.d/harbor.his.net/harborca.crt
        echo "Harbor 证书拷贝成功!"
        scp /etc/hosts ${node}:/etc/hosts
        echo "host 文件拷贝完成"
        scp -r /root/.docker ${node}:/root/
        echo "Harbor 认证文件拷贝完成!"
        scp -r /etc/resolv.conf ${node}:/etc/
    else
        echo "${node} 秘钥copy失败"
    fi
done

执行脚本同步

bash scp.sh
bash cer.sh
  • clone项目-master1-2

git clone -b 0.6.1 https://github.com/easzlab/kubeasz.git
  • ###备份文件
mv /etc/ansible/* /opt/
mv kubeasz/* /etc/ansible/
cd /etc/ansible/
cp example/hosts.m-masters.example ./hosts

准备hosts文件

cd /etc/ansible
cp example/hosts.m-masters.example ./hosts
vim hosts
# 集群部署节点:一般为运行ansible 脚本的节点
# 变量 NTP_ENABLED (=yes/no) 设置集群是否安装 chrony 时间同步
[deploy]
172.24.78.21 NTP_ENABLED=no
# etcd集群请提供如下NODE_NAME,注意etcd集群必须是1,3,5,7...奇数个节点
[etcd]
172.24.78.25 NODE_NAME=etcd1
172.24.78.26 NODE_NAME=etcd2
172.24.78.27 NODE_NAME=etcd3
[new-etcd] # 预留组,后续添加etcd节点使用
#172.24.78.x NODE_NAME=etcdx
[kube-master]
172.24.78.21
[new-master] # 预留组,后续添加master节点使用
#172.24.78.22
[kube-node]
172.24.78.30
[new-node] # 预留组,后续添加node节点使用
#172.24.78.31
# 参数 NEW_INSTALL:yes表示新建,no表示使用已有harbor服务器
# 如果不使用域名,可以设置 HARBOR_DOMAIN=""
[harbor]
#172.24.78.23 HARBOR_DOMAIN="harbor.his.net" NEW_INSTALL=no
# 负载均衡(目前已支持多于2节点,一般2节点就够了) 安装 haproxy+keepalived
[lb]
172.24.78.29 LB_ROLE=backup
172.24.78.28 LB_ROLE=master
#【可选】外部负载均衡,用于自有环境负载转发 NodePort 暴露的服务等
[ex-lb]
#172.24.78.6 LB_ROLE=backup EX_VIP=172.24.78.250
#172.24.78.7 LB_ROLE=master EX_VIP=172.24.78.250
[all:vars]
# ---------集群主要参数---------------
#集群部署模式:allinone, single-master, multi-master
DEPLOY_MODE=multi-master
#集群主版本号,目前支持: v1.8, v1.9, v1.10,v1.11, v1.12, v1.13
K8S_VER="v1.13"
# 集群 MASTER IP即 LB节点VIP地址,为区别与默认apiserver端口,设置VIP监听的服务端口8443
# 公有云上请使用云负载均衡内网地址和监听端口
MASTER_IP="172.24.78.18"
KUBE_APISERVER="https://{{ MASTER_IP }}:6443"
# 集群网络插件,目前支持calico, flannel, kube-router, cilium
CLUSTER_NETWORK="calico"
# 服务网段 (Service CIDR),注意不要与内网已有网段冲突
SERVICE_CIDR="10.20.0.0/16"
# POD 网段 (Cluster CIDR),注意不要与内网已有网段冲突
CLUSTER_CIDR="172.31.0.0/16"
# 服务端口范围 (NodePort Range)
NODE_PORT_RANGE="20000-60000"
# kubernetes 服务 IP (预分配,一般是 SERVICE_CIDR 中第一个IP)
CLUSTER_KUBERNETES_SVC_IP="10.20.0.1"
# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
CLUSTER_DNS_SVC_IP="10.20.254.254"
# 集群 DNS 域名
CLUSTER_DNS_DOMAIN="his20.local."
# 集群basic auth 使用的用户名和密码
BASIC_AUTH_USER="admin"
BASIC_AUTH_PASS="silence"
# ---------附加参数--------------------
#默认二进制文件目录
bin_dir="/usr/bin"
#证书目录
ca_dir="/etc/kubernetes/ssl"
#部署目录,即 ansible 工作目录,建议不要修改
base_dir="/etc/ansible"

准备二进制文件

cd /etc/ansible/bin
tar xvf k8s.1-13-5.tar.gz
mv bin/* .
  • 开始按步骤部署

  • 环境初始化

cd /etc/ansible
ansible-playbook 01.prepare.yml

提示

PLAY RECAP ************************************************************************************************************************************************************
172.24.78.21               : ok=33   changed=13   unreachable=0    failed=0   
172.24.78.25               : ok=15   changed=13   unreachable=0    failed=0   
172.24.78.26               : ok=15   changed=13   unreachable=0    failed=0   
172.24.78.27               : ok=15   changed=13   unreachable=0    failed=0   
172.24.78.28               : ok=29   changed=21   unreachable=0    failed=0   
172.24.78.29               : ok=29   changed=21   unreachable=0    failed=0   
172.24.78.30               : ok=15   changed=13   unreachable=0    failed=0  
  • 部署etcd集群

ansible-playbook 02.etcd.yml

提示

PLAY RECAP ************************************************************************************************************************************************************
172.24.78.25               : ok=11   changed=9    unreachable=0    failed=0   
172.24.78.26               : ok=11   changed=9    unreachable=0    failed=0   
172.24.78.27               : ok=11   changed=9    unreachable=0    failed=0 

各etcd服务器验证etcd服务(登录各etcd服务器)

定义etcd-IP变量

export NODE_IPS="172.24.78.25 172.24.78.26 172.24.78.27"

测试etcd服务

for ip in ${NODE_IPS}; do ETCDCTL_API=3 /usr/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem endpoint health; done

提示

https://172.24.78.25:2379 is healthy: successfully committed proposal: took = 2.397926ms
https://172.24.78.26:2379 is healthy: successfully committed proposal: took = 2.771018ms
https://172.24.78.27:2379 is healthy: successfully committed proposal: took = 2.593279ms
  • 部署docker

修改host文件,启用相关主机

vim hosts
[new-master] # 预留组,后续添加master节点使用
172.24.78.22
[kube-node]
172.24.78.30
[new-node] # 预留组,后续添加node节点使用
172.24.78.31
ansible-playbook 03.docker.yml

提示

PLAY RECAP ************************************************************************************************************************************************************
172.24.78.22               : ok=13   changed=10   unreachable=0    failed=0   
172.24.78.30               : ok=12   changed=4    unreachable=0    failed=0   
172.24.78.31               : ok=12   changed=9    unreachable=0    failed=0  
  • 部署master

ansible-playbook 04.kube-master.yml

提示

PLAY RECAP ************************************************************************************************************************************************************
172.24.78.21               : ok=39   changed=34   unreachable=0    failed=0 
  • 部署node

ansible-playbook 05.kube-node.yml

备注:node节点必须安装docker

PLAY RECAP ************************************************************************************************************************************************************
172.24.78.30               : ok=24   changed=23   unreachable=0    failed=0 
  • 部署网络服务calico

ansible-playbook 06.network.yml

报错提示


fatal: [172.24.78.30 -> 172.24.78.21]: FAILED! => {"attempts": 15, "changed": true, "cmd": "/usr/bin/kubectl get pod -n kube-system -o wide|grep 'calico-node'|grep ' 172.24.78.30 '|awk '{print $3}'", "delta": "0:00:00.098717", "end": "2020-08-15 06:46:14.391950", "rc": 0, "start": "2020-08-15 06:46:14.293233", "stderr": "", "stderr_lines": [], "stdout": "Init:0/1", "stdout_lines": ["Init:0/1"]}

PLAY RECAP ************************************************************************************************************************************************************
172.24.78.21               : ok=16   changed=14   unreachable=0    failed=0   
172.24.78.30               : ok=7    changed=6    unreachable=0    failed=1 

解决办法:下载镜像并上传至本地镜像仓库

vim roles/calico/defaults/main.yml
#更新支持calico
calica_ver: "3.3.2"

导入3.3.2镜像包

cd /opt
tar xvf calico-release-v3.3.2.tgz
cd release-v3.3.2/images
docker load -i calico-node.tar
docker tag calico/node:v3.3.2 harbor.his.net/baseimages/calico-node:v3.3.2
docker push harbor.his.net/baseimages/calico-node:v3.3.2
#导入镜像
docker load -i calico-cni.tar
docker tag calico/cni:v3.3.2 harbor.his.net/baseimages/calico-cni:v3.3.2
docker push harbor.his.net/baseimages/calico-cni:v3.3.2
#导入镜像
docker load -i calico-kube-controllers.tar
docker tag calico/kube-controllers:v3.3.2 harbor.his.net/baseimages/calico-kube-controllers:v3.3.2
docker push harbor.his.net/baseimages/calico-kube-controllers:v3.3.2

查看镜像是否上传成功

修改yaml文件为本地仓库地址

vim /etc/ansible/roles/calico/templates/calico-v3.3.yaml.j2
containers:
-name:calico-node
   image:harbor.his.net/baseimages/calico-node:v3.3.2
-name:calico-cni
   image:harbor.his.net/baseimages/calico-cni:v3.3.2
-name:calico-kube-controllers
   image:harbor.his.net/baseimages/calico-kube-controllers:v3.3.2

替换calicoctl文件版本

mv /etc/ansible/bin/calicoctl /tmp

将本地calicoctl上传至/etc/ansible/bin/calicoctl

chmod a+x /etc/ansible/bin/calicoctl
root@master1:/etc/ansible/bin# ./calicoctl version
Client Version:    v3.3.2
Build date:        2018-12-03T15:10:51+0000
Git commit:        594fd84e
Cluster Version:   v3.4.1
Cluster Type:      k8s,bgp

执行ansible

ansible-playbook 06.network.yml 

提示

#异常
PLAY RECAP *********************************************************************************************************************************************
172.24.78.21               : ok=15   changed=8    unreachable=0    failed=0   
172.24.78.30               : ok=8    changed=4    unreachable=0    failed=0 
#正常
PLAY RECAP **********************************************************************************************************************************************
172.24.78.21               : ok=8    changed=2    unreachable=0    failed=0   
172.24.78.22               : ok=15   changed=7    unreachable=0    failed=0   
172.24.78.30               : ok=8    changed=3    unreachable=0    failed=0   
172.24.78.31               : ok=8    changed=3    unreachable=0    failed=0  

在节点查看是否能下载calico镜像

calicoctl node status
root@master1:/etc/ansible/bin# calicoctl node status
Calico process is running.

IPv4 BGP status
+--------------+-------------------+-------+----------+-------------+
| PEER ADDRESS |     PEER TYPE     | STATE |  SINCE   |    INFO     |
+--------------+-------------------+-------+----------+-------------+
| 172.24.78.30 | node-to-node mesh | up    | 01:17:32 | Established |
+--------------+-------------------+-------+----------+-------------+

IPv6 BGP status
No IPv6 peers found.

root@master1:/opt/release-v3.3.2/images# kubectl get pods -n kube-system
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-5445d7cb7b-62smh   1/1     Running   0          2m56s
calico-node-n75j8                          2/2     Running   0          94m
calico-node-rcmtc                          2/2     Running   2          95m
calico-node-vkcjr                          2/2     Running   0          66m
calico-node-vm7w5                          2/2     Running   0          71m
kubernetes-dashboard-74f5cf8499-wxgt7      1/1     Running   0          2m56s

添加node节点

#去掉新加node注释
vim /etc/ansible/hosts
[new-node] # 预留组,后续添加node节点使用
172.24.78.31
ansible-playbook 20.addnode.yml

提示

PLAY RECAP *********************************************************************************************************************************************
172.24.78.21               : ok=2    changed=1    unreachable=0    failed=0   
172.24.78.31               : ok=67   changed=54   unreachable=0    failed=0 
  • 添加master节点

     #注释掉lb组
     vim /etc/ansible/hosts
     [lb]
     #172.24.78.29 LB_ROLE=backup
     #172.24.78.28 LB_ROLE=master
     ansible-playbook 21.addmaster.yml

    提示

     PLAY RECAP *********************************************************************************************************************************************
     172.24.78.21               : ok=2    changed=1    unreachable=0    failed=0   
     172.24.78.22               : ok=82   changed=68   unreachable=0    failed=0   

    查看结果

     calicoctl node status

    提示

     Calico process is running.
    
     IPv4 BGP status
     +--------------+-------------------+-------+----------+-------------+
     | PEER ADDRESS |     PEER TYPE     | STATE |  SINCE   |    INFO     |
     +--------------+-------------------+-------+----------+-------------+
     | 172.24.78.30 | node-to-node mesh | up    | 01:17:32 | Established |
     | 172.24.78.31 | node-to-node mesh | up    | 01:41:33 | Established |
     | 172.24.78.22 | node-to-node mesh | up    | 01:46:20 | Established |
     +--------------+-------------------+-------+----------+-------------+
    
     IPv6 BGP status
     No IPv6 peers found.
     root@master1:/etc/ansible/bin# kubectl get nodes

    提示

     NAME           STATUS                          ROLES    AGE     VERSION
     172.24.78.21   Ready,SchedulingDisabled        master   43h     v1.13.5
     172.24.78.22   Ready,SchedulingDisabled        master   65s     v1.13.5
     172.24.78.30   Ready                           node     43h     v1.13.5
     172.24.78.31   Ready                           node     5m52s   v1.13.5

k8s应用环境

  • dashboard(1.10.1)

    导入dashboard镜像并上传至本地harbor服务器

     cd /opt/
     tar xvf dashboard-yaml_image-1.10.1.tar.gz
     docker load -i kubernetes-dashboard-amd64-v1.10.1.tar.gz
     docker tag gcr.io/google-containers/kubernetes-dashboard-amd64:v1.10.1 harbor.his.net/baseimages/kubernetes-dashboard-amd64:v1.10.1
     docker push harbor.his.net/baseimages/kubernetes-dashboard-amd64:v1.10.1

    修改yaml文件中的dashboard镜像地址为本地harbor地址

     mv /etc/ansible/manifests/dashboard/* /tmp/
     tar xvf dashboard-yaml_image-1.10.1.tar.gz -C /etc/ansible/manifests/dashboard/
     cd /etc/ansible/manifests/dashboard/
     vim kubernetes-dashboard.yaml
     image: harbor.his.net/baseimages/kubernetes-dashboard-amd64:v1.10.1

    创建服务

     kubectl apply -f .

​ 提示

serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
secret/kubernetes-dashboard-certs created
serviceaccount/kubernetes-dashboard created
role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
deployment.apps/kubernetes-dashboard created
service/kubernetes-dashboard created
serviceaccount/dashboard-read-user created
clusterrolebinding.rbac.authorization.k8s.io/dashboard-read-binding created
clusterrole.rbac.authorization.k8s.io/dashboard-read-clusterrole created
clusterrole.rbac.authorization.k8s.io/ui-admin created
rolebinding.rbac.authorization.k8s.io/ui-admin-binding created
clusterrole.rbac.authorization.k8s.io/ui-read created
rolebinding.rbac.authorization.k8s.io/ui-read-binding created

验证dashboard启动完成

kubectl get pods -n kube-system

提示

root@master1:/opt/release-v3.3.2/images# kubectl get pods -n kube-system
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-5445d7cb7b-62smh   1/1     Running   0          2m56s
calico-node-n75j8                          2/2     Running   0          94m
calico-node-rcmtc                          2/2     Running   2          95m
calico-node-vkcjr                          2/2     Running   0          66m
calico-node-vm7w5                          2/2     Running   0          71m
kubernetes-dashboard-74f5cf8499-wxgt7      1/1     Running   0          2m56s
kubectl get svc -n kube-system

提示

NAME                   TYPE       CLUSTER-IP    EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   10.20.234.4   <none>        443:57125/TCP   43m
kubectl get svc -n kube-system

提示

NAME                   TYPE       CLUSTER-IP    EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   10.20.234.4   <none>        443:57125/TCP   44m
kubectl cluster-info

提示

Kubernetes master is running at https://172.24.78.18:6443
kubernetes-dashboard is running at https://172.24.78.18:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

查看dashboard登录用户及密码

vim /etc/ansible/hosts
# 集群basic auth 使用的用户名和密码
BASIC_AUTH_USER="admin"
BASIC_AUTH_PASS="silence"
#输入三次

测试访问

https://172.24.78.18:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy

令牌

token登录dashboard

kubectl -n kube-system get secret | grep admin-user
#admin-user-token-vlt4j                kubernetes.io/service-account-token   3      59m
kubectl -n kube-system describe secret admin-user-token-vlt4j

token:

eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXZsdDRqIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJjNTcyYTMwOC1lMDJlLTExZWEtOWJlNi1mZWZjZmUyNjMzYjQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.kJhzTn58yTYu6BGvT55hH8C_W92xK85mRjD0vigqRUSKfUwxPbe0bnt47wJ0j6qvDRO-siofrwnQm9wvX0ge9yKBrXmcsNmiuocmMqK9jUjWUnXNEVIge3IVtgdYxEoETpiM_MSEE1sEHUh1sXv9V51--ObO43V7AQnyOXEQKEX4Tf6qv66q7YYuYHBWdBODyEWgBfRITq2tBA6lGefaQOR3x69WoQdx40yzejUXwuqzGc0bEy87w79372jDxSfWDdlAKha5FiXaSFd9IVI7YIjnbEt2VuvNJ_zc0PbZ2CbavDRbMyY3HHEw8s0jLOyeFzZsi-fVKWyRmQrEpFdinQ

制作Kubeconfig文件

设置token登录会话保持时间

vim /etc/ansible/manifests/dashboard/kubernetes-dashboard.yaml
      containers:
      - name: kubernetes-dashboard
        #image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
        image: harbor.his.net/baseimages/kubernetes-dashboard-amd64:v1.10.1
        ports:
        - containerPort: 8443
          protocol: TCP
        args:
          - --auto-generate-certificates
          - --token-ttl=43200
  • 部署kube-dns

    导入镜像并上传至本地harbor

     mkdir -p /etc/ansible/manifests/dns
     docker load -i k8s-dns-kube-dns-amd64_1.14.13.tar.gz
     docker tag gcr.io/google-containers/k8s-dns-kube-dns-amd64:1.14.13 harbor.his.net/baseimages/k8s-dns-kube-dns-amd64:1.14.13
     docker push harbor.his.net/baseimages/k8s-dns-kube-dns-amd64:1.14.13
    
     docker load -i k8s-dns-sidecar-amd64_1.14.13.tar.gz
     docker tag gcr.io/google-containers/k8s-dns-sidecar-amd64:1.14.13 harbor.his.net/baseimages/k8s-dns-sidecar-amd64:1.14.13
     docker push harbor.his.net/baseimages/k8s-dns-sidecar-amd64:1.14.13
    
     docker load -i k8s-dns-dnsmasq-nanny-amd64_1.14.13.tar.gz
     docker tag gcr.io/google-containers/k8s-dns-dnsmasq-nanny-amd64:1.14.13 harbor.his.net/baseimages/k8s-dns-dnsmasq-nanny-amd64:1.14.13
     docker push harbor.his.net/baseimages/k8s-dns-dnsmasq-nanny-amd64:1.14.13

修改yaml文件中的镜像地址为本地harbor地址

vim kube-dns.yaml
      - name: kubedns
        image: harbor.his.net/baseimages/k8s-dns-kube-dns-amd64:1.14.13
        args:
        - --domain=his.local.

- name: dnsmasq
        image: harbor.his.net/baseimages/k8s-dns-dnsmasq-nanny-amd64:1.14.13

      - name: sidecar
        image: harbor.his.net/baseimages/k8s-dns-sidecar-amd64:1.14.13
        args:
        - --v=2
        - --logtostderr
        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.his.local,5,SRV
        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.his.local,5,SRV

创建服务

kubectl apply -f kube-dns.yaml

提示

service/kube-dns created
serviceaccount/kube-dns created
configmap/kube-dns created
deployment.extensions/kube-dns created
  • 监控组件heapster

    heapster:数据采集
    influxdb:数据存储
    grafana:web展示

    导入相应的镜像

     cp -r /etc/ansible/manifests/heapster /opt/
     cd /etc/ansible/manifests/heapster/
     rm -rf ./*
     docker load -i docker load -i heapster-amd64_v1.5.1.tar
     docker tag gcr.io/google-containers/heapster-amd64:v1.5.1 harbor.his.net/baseimages/heapster-amd64:v1.5.1
     docker push harbor.his.net/baseimages/heapster-amd64:v1.5.1
    
     docker load -i eapster-grafana-amd64-v4.4.3.tar
     docker tag 8cb3de219af7 harbor.his.net/baseimages/eapster-grafana-amd64:v4.4.3
     docker push harbor.his.net/baseimages/eapster-grafana-amd64:v4.4.3
    
     docker load -i heapster-influxdb-amd64_v1.3.3.tar 
     docker tag gcr.io/google-containers/heapster-influxdb-amd64:v1.3.3 harbor.his.net/baseimages/heapster-influxdb-amd64:v1.3.3
     docker push harbor.his.net/baseimages/heapster-influxdb-amd64:v1.3.3

    更改yaml中的镜像地址

     vim heapster.yaml 
           containers:
           - name: heapster
             image: harbor.his.net/baseimages/heapster-amd64:v1.5.1
     vim grafana.yaml 
           containers:
           - name: grafana
             image: harbor.his.net/baseimages/eapster-grafana-amd64:v4.4.3
       vim influxdb.yaml 
         containers:
           - name: influxdb
             image: harbor.his.net/baseimages/heapster-influxdb-amd64:v1.3.3

    创建服务

     kubectl apply -f .

    提示

     service/monitoring-grafana created
     serviceaccount/heapster created
     clusterrolebinding.rbac.authorization.k8s.io/heapster created
     deployment.extensions/heapster created
     service/heapster created
     deployment.extensions/monitoring-influxdb created
     service/monitoring-influxdb created
标签
易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!