部署环境
# 操作系统:CentOS Linux release 8.0.1905 (Core)
# kubelet 版本: v1.14.0
# runc版本:v1.0.0-rc9
# crictl 版本:v1.16.1
#crio版本:v1.15.2
#conmon版本:v2.0.1
# cni版本:v0.8.2
# 网络插件: kube-router
#工作目录: /apps
# 二进制文件目录: /usr/bin
# cni 目录:/apps/cni
# 工作节点:win10 on Ubuntu 19.10
# 工作目录:/mnt/g/work/crio
# 节点IP:192.168.2.196
# 节点名:node05
准备所需二进制文件
mkdir -p /mnt/g/work/crio
cd /mnt/g/work/crio
wget https://github.com/opencontainers/runc/releases/download/v1.0.0-rc9/runc.amd64
wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.16.1/crictl-v1.16.1-linux-amd64.tar.gz
wget https://github.com/cri-o/cri-o/releases/download/v1.15.2/crio-v1.15.2.tar.gz
wget https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz
wget https://github.com/containers/conmon/archive/v2.0.1.zip
解压下载文件及编译源码
# conmon 官方不提供二进制下载所以要编译
# 工作节点编译然后分发
sudo apt-get install \
gcc \
git \
libc6-dev \
libglib2.0-dev \
pkg-config \
make \
unzip \
runc
# 编译conmon
# 解压conmon
unzip v2.0.1.zip
cd conmon-2.0.1
make
# 二进制文件目录 ./bin/conmon
cp -pdr ./bin/conmon ../
rm -rf conmon-2.0.1 v2.0.1.zip
# 解压cni
mkdir ./cni/bin
cd ./cni/bin
mv ../../cni-plugins-linux-amd64-v0.8.2.tgz ./
tar -xvf cni-plugins-linux-amd64-v0.8.2.tgz
rm -rf cni-plugins-linux-amd64-v0.8.2.tgz
# 回到最上级目录
cd ../../
# 解压crictl
tar -xvf crictl-v1.16.1-linux-amd64.tar.gz
rm -f crictl-v1.16.1-linux-amd64.tar.gz
# 解压crio
tar -xvf crio-v1.15.2.tar.gz
cd crio-v1.15.2/bin/
mv crio-x86_64-static-glibc ../../crio
rm -rf crio-v1.15.2 crio-v1.15.2.tar.gz
# 重命名runc.amd64
mv runc.amd64 runc
chmod +x runc
创建配置文件
cd /mnt/g/work/crio
# 配置cni 测试配置文件 接入kubernetes 集群需要删除此cni 配置文件
mkdir -p ./etc/cni/net.d
cat > ./etc/cni/net.d/87-bridge.conflist <<EOF
{
"cniVersion": "0.4.0",
"name": "mynet",
"plugins": [
{
"type": "bridge",
"bridge": "cni0",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"routes": [
{
"dst": "0.0.0.0/0"
}
],
"ranges": [
[
{
"subnet": "10.88.0.0/16",
"gateway": "10.88.0.1"
}
]
]
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
},
{
"type": "firewall",
"backend": "iptables"
}
]
}
EOF
# 创建policy配置文件
mkdir ./etc/containers/
cat > ./etc/containers/policy.json <<EOF
{
"default": [
{
"type": "insecureAcceptAnything"
}
],
"transports":
{
"docker-daemon":
{
"": [{"type":"insecureAcceptAnything"}]
}
}
}
EOF
# 创建容器pull 地址配置文件1.17版本用到
cat > ./etc/containers/registries.conf <<EOF
# This is a system-wide configuration file used to
# keep track of registries for various container backends.
# It adheres to TOML format and does not support recursive
# lists of registries.
# The default location for this configuration file is /etc/containers/registries.conf.
# The only valid categories are: 'registries.search', 'registries.insecure',
# and 'registries.block'.
[registries.search]
registries = ['registry.access.redhat.com', 'docker.io', 'registry.fedoraproject.org', 'quay.io', 'registry.centos.org']
# If you need to access insecure registries, add the registry's fully-qualified name.
# An insecure registry is one that does not have a valid SSL certificate or only does HTTP.
[registries.insecure]
registries = []
# If you need to block pull access from a registry, uncomment the section below
# and add the registries fully-qualified name.
#
# Docker only
[registries.block]
registries = []
EOF
# 创建crio 配文件
mkdir ./etc/crio/
# 生成配置文件
./crio --config="" config > ./etc/crio/crio.conf
# 修改生成配置
vim ./etc/crio/crio.conf
# 取消注释修改存储目录
root = "/apps/lib/containers/storage"
# 取消注释修改容器运行存储目录
runroot = "/apps/run/containers/storage"
# 取消注释修改容器运行时文件打开数
default_ulimits = [
"nofile=1024000:1024000",
"nproc=1024000:1024000",
"core=-1:-1",
]
#修改conmon 路径 默认conmon = "/usr/local/libexec/crio/conmon"
conmon = "/usr/bin/conmon"
# 修改pids_limit 默认1024
pids_limit = 102400
# pause_image 默认pause_image = "k8s.gcr.io/pause:3.1" 由于网络不通所有要修改
pause_image = "docker.io/juestnow/pause-amd64:3.1"
#修改cni 二进制路径
plugin_dirs = [
"/apps/cni/bin/",
# 创建crictl 配置
cat >./etc/crictl.yaml <<EOF
runtime-endpoint: unix:///var/run/crio/crio.sock
EOF
# 创建crio 启动文件
cat >./crio.service <<EOF
[Unit]
Description=OCI-based implementation of Kubernetes Container Runtime Interface
Documentation=https://github.com/github.com/cri-o/cri-o
[Service]
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/crio --log-level info
Restart=on-failure
RestartSec=5
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
# 分发解决目录结构
root@Qist:/mnt/g/work/crio# tree
.
├── cni
│ └── bin
│ ├── bandwidth
│ ├── bridge
│ ├── dhcp
│ ├── firewall
│ ├── flannel
│ ├── host-device
│ ├── host-local
│ ├── ipvlan
│ ├── loopback
│ ├── macvlan
│ ├── portmap
│ ├── ptp
│ ├── sbr
│ ├── static
│ ├── tuning
│ └── vlan
├── conmon
├── crictl
├── crio
├── crio.service
├── etc
│ ├── cni
│ │ └── net.d
│ │ └── 87-bridge.conflist
│ ├── containers
│ │ |── policy.json
│ |
│ ├── crictl.yaml
│ └── crio
│ └── crio.conf
└── runc
分发文件
scp -r crio crictl 192.168.2.196:/usr/local/bin/
scp conmon runc 192.168.2.196:/usr/bin/
# 分发cni
scp -r cni 192.168.2.196:/apps/
# 分发配置文件
scp -r etc 192.168.2.196:/
# 分发启动文件
scp crio.service 192.168.2.196:/usr/lib/systemd/system/crio.service
启动crio 安装对应依赖
dnf -y install epel-release
dnf install dnf-utils ipvsadm telnet wget net-tools conntrack ipset jq iptables curl sysstat libseccomp socat nfs-utils fuse fuse-devel device-mapper
# 启动 crio
systemctl daemon-reload
systemctl enable crio
systemctl start crio
[root@node05 ~]# systemctl status crio
● crio.service - OCI-based implementation of Kubernetes Container Runtime Interface
Loaded: loaded (/usr/lib/systemd/system/crio.service; enabled; vendor preset: disabled)
Active: active (running) since Mon 2019-10-28 10:22:14 CST; 10h ago
Docs: https://github.com/github.com/cri-o/cri-o
Process: 714 ExecStartPre=/sbin/modprobe overlay (code=exited, status=0/SUCCESS)
Process: 659 ExecStartPre=/sbin/modprobe br_netfilter (code=exited, status=0/SUCCESS)
Main PID: 726 (crio)
Tasks: 17 (limit: 49836)
Memory: 89.3M
CGroup: /system.slice/crio.service
└─726 /usr/local/bin/crio --log-level info
# 创建测试应用
cat > sandbox_config.json <<EOF
{
"metadata": {
"name": "podsandbox1",
"uid": "redhat-test-crio",
"namespace": "redhat.test.crio",
"attempt": 1
},
"hostname": "crictl_host",
"log_directory": "",
"dns_config": {
"searches": [
"8.8.8.8"
]
},
"port_mappings": [],
"resources": {
"cpu": {
"limits": 3,
"requests": 2
},
"memory": {
"limits": 50000000,
"requests": 2000000
}
},
"labels": {
"group": "test"
},
"annotations": {
"owner": "hmeng",
"security.alpha.kubernetes.io/seccomp/pod": "unconfined"
},
"linux": {
"cgroup_parent": "/Burstable/pod_123-456",
"security_context": {
"namespace_options": {
"network": 0,
"pid": 1,
"ipc": 0
},
"selinux_options": {
"user": "system_u",
"role": "system_r",
"type": "svirt_lxc_net_t",
"level": "s0:c4,c5"
}
}
}
}
EOF
# 创建POD
POD_ID=$(crictl runp ./sandbox_config.json)
# POD ip
crictl inspectp --output table $POD_ID
crictl pull quay.io/crio/redis:alpine
# 创建container
CONTAINER_ID=$(crictl create $POD_ID ./container_redis.json ./sandbox_config.json)
# 启动container
crictl start $CONTAINER_ID
# 查看container
crictl inspect $CONTAINER_ID
# 测试是否联通
telnet 10.88.0.2 6379
# 查看redis 日志
journalctl -u crio --no-pager
# 停止container
crictl stop $CONTAINER_ID
# 删除container
crictl rm $CONTAINER_ID
# 停止POD
crictl stopp $POD_ID
# 删除 POD
crictl rmp $POD_ID
# 查看pod
crictl pods
# 查看container
crictl ps
# 停止crio
systemctl stop crio
# 删除创建cni0 网络
ip link del dev cni0
# 删除cni 配置
rm -f /etc/cni/net.d/87-bridge.conflist
配置 kubelet以支持cri-o
vim /apps/kubernetes/conf/kubelet
----------------------------------------------------------------------------------------------------------------------
KUBELET_OPTS="--bootstrap-kubeconfig=/apps/kubernetes/conf/bootstrap.kubeconfig \
--fail-swap-on=false \
--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/apps/cni/bin \
--kubeconfig=/apps/kubernetes/conf/kubelet.kubeconfig \
--address=192.168.2.196 \
--node-ip=192.168.2.196 \
--hostname-override=node05 \
--cluster-dns=10.64.0.2 \
--cluster-domain=cluster.local \
--authorization-mode=Webhook \
--authentication-token-webhook=true \
--client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
--rotate-certificates=true \
--cgroup-driver=cgroupfs \
--allow-privileged=true \
--healthz-port=10248 \
--healthz-bind-address=192.168.2.196 \
--cert-dir=/apps/kubernetes/ssl \
--feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \
--node-labels=node-role.kubernetes.io/k8s-node=true \
--serialize-image-pulls=false \
--enforce-node-allocatable=pods,kube-reserved,system-reserved \
--pod-manifest-path=/apps/work/kubernetes/manifests \
--runtime-cgroups=/systemd/system.slice/kubelet.service \
--kube-reserved-cgroup=/systemd/system.slice/kubelet.service \
--system-reserved-cgroup=/systemd/system.slice \
--root-dir=/apps/work/kubernetes/kubelet \
--log-dir=/apps/kubernetes/log \
--alsologtostderr=true \
--logtostderr=false \
--anonymous-auth=true \
--container-log-max-files=10 \
--container-log-max-size=100Mi \
--container-runtime=remote \
--container-runtime-endpoint=unix:///var/run/crio/crio.sock \
--containerd=unix:///var/run/crio/crio.sock \
--runtime-request-timeout=15m \
--image-gc-high-threshold=70 \
--image-gc-low-threshold=50 \
--kube-reserved=cpu=500m,memory=512Mi,ephemeral-storage=1Gi \
--system-reserved=cpu=1000m,memory=1024Mi,ephemeral-storage=1Gi \
--eviction-hard=memory.available<500Mi,nodefs.available<10% \
--serialize-image-pulls=false \
--sync-frequency=30s \
--resolv-conf=/etc/resolv.conf \
--pod-infra-container-image=docker.io/juestnow/pause-amd64:3.1 \
--image-pull-progress-deadline=30s \
--v=2 \
--event-burst=30 \
--event-qps=15 \
--kube-api-burst=30 \
--kube-api-qps=15 \
--max-pods=100 \
--pods-per-core=10 \
--read-only-port=0 \
--volume-plugin-dir=/apps/kubernetes/kubelet-plugins/volume"
---------------------------------------------------------------------------------------------------------------------------------------------
# 修改启动文件kubelet.service
vim /usr/lib/systemd/system/kubelet.service
--------------------------------------------------------------------------------------------------------------------------------------------
[Unit]
Description=Kubernetes Kubelet
After=crio.service
Requires=crio.service
[Service]
ExecStartPre=-/bin/mkdir -p /sys/fs/cgroup/hugetlb/systemd/system.slice/kubelet.service
ExecStartPre=-/bin/mkdir -p /sys/fs/cgroup/blkio/systemd/system.slice/kubelet.service
ExecStartPre=-/bin/mkdir -p /sys/fs/cgroup/cpuset/systemd/system.slice/kubelet.service
ExecStartPre=-/bin/mkdir -p /sys/fs/cgroup/devices/systemd/system.slice/kubelet.service
ExecStartPre=-/bin/mkdir -p /sys/fs/cgroup/net_cls,net_prio/systemd/system.slice/kubelet.service
ExecStartPre=-/bin/mkdir -p /sys/fs/cgroup/perf_event/systemd/system.slice/kubelet.service
ExecStartPre=-/bin/mkdir -p /sys/fs/cgroup/cpu,cpuacct/systemd/system.slice/kubelet.service
ExecStartPre=-/bin/mkdir -p /sys/fs/cgroup/freezer/systemd/system.slice/kubelet.service
ExecStartPre=-/bin/mkdir -p /sys/fs/cgroup/memory/systemd/system.slice/kubelet.service
ExecStartPre=-/bin/mkdir -p /sys/fs/cgroup/pids/systemd/system.slice/kubelet.service
ExecStartPre=-/bin/mkdir -p /sys/fs/cgroup/systemd/systemd/system.slice/kubelet.service
EnvironmentFile=-/apps/kubernetes/conf/kubelet
ExecStart=/apps/kubernetes/bin/kubelet $KUBELET_OPTS
Restart=on-failure
KillMode=process
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity
[Install]
WantedBy=multi-user.target
# 说明在使用docker 时可以不需要创建kubelet.service 目录
# 使用crio 必须手动创建目录
# 配置生效
systemctl daemon-reload
# 启动 crio.service
systemctl start crio.service
# 启动 kubelet
systemctl restart kubelet.service
# 创建cni 配置文件10-kuberouter.conflist
cat > /etc/cni/net.d/10-kuberouter.conflist <<EOF
{
"cniVersion":"0.3.0",
"name":"mynet",
"plugins":[
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"hairpinMode":true,
"ipam":{
"type":"host-local"
}
},
{
"type":"portmap",
"capabilities":{
"snat":true,
"portMappings":true
}
}
]
}
EOF
# 修改 kube-router 配置
vim /apps/kube-router/conf/kube-router
KUBE_ROUTER_OPTS="--run-router=true \
--run-firewall=true \
--run-service-proxy=true \
--advertise-cluster-ip=true \
--advertise-loadbalancer-ip=true \
--advertise-pod-cidr=true \
--advertise-external-ip=true \
--cluster-asn=64512 \
--peer-router-ips=192.168.3.12 \
--peer-router-asns=64513 \
--metrics-path=/metrics \
--metrics-port=8080 \
--enable-cni=true \
--enable-ibgp=true \
--enable-overlay=true \
--nodeport-bindon-all-ip=true \
--nodes-full-mesh=true \
--enable-pod-egress=true \
--cluster-cidr=10.65.0.0/16 \
--hostname-override=node05 \
--kubeconfig=/apps/kube-router/conf/kube-router.kubeconfig \
--v= 2"
KUBE_ROUTER_CNI_CONF_FILE="/etc/cni/net.d/10-kuberouter.conflist"
# 修改 kube-router 启动文件
vim /usr/lib/systemd/system/kube-router.service
[Unit]
Description=Kubernetes kube-router
[Service]
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity
EnvironmentFile=-/apps/kube-router/conf/kube-router
ExecStart=/apps/kube-router/bin/kube-router $KUBE_ROUTER_OPTS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
# 刷新启动文件
systemctl daemon-reload
# 重启kube-router
systemctl restart kube-router
# 查看节点kubectl get node -o wide
kubectl get node -o wide
[root@jenkins ~]# kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
nginx-1 Ready k8s-master 193d v1.14.0 192.168.2.186 <none> CentOS Linux 7 (Core) 3.10.0-1062.1.2.el7.x86_64 docker://19.3.3
nginx-2 Ready k8s-node 47d v1.14.0 192.168.2.189 <none> CentOS Linux 7 (Core) 5.1.0-1.el7.elrepo.x86_64 docker://18.9.6
node01 Ready k8s-master 194d v1.14.0 192.168.2.253 <none> CentOS Linux 7 (Core) 5.0.7-1.el7.elrepo.x86_64 docker://18.9.4
node02 Ready k8s-master 194d v1.14.0 192.168.3.4 <none> CentOS Linux 7 (Core) 5.0.7-1.el7.elrepo.x86_64 docker://18.9.4
node03 Ready k8s-node 47d v1.14.0 192.168.2.165 <none> CentOS Linux 7 (Core) 5.1.0-1.el7.elrepo.x86_64 docker://18.9.6
node04 Ready k8s-node 47d v1.14.0 192.168.2.167 <none> CentOS Linux 7 (Core) 5.1.14-1.el7.elrepo.x86_64 docker://18.9.6
node05 Ready k8s-node 5h24m v1.14.0 192.168.2.196 <none> CentOS Linux 8 (Core) 4.18.0-80.11.2.el8_0.x86_64 cri-o://1.15.2
[root@node05 conf]# crictl ps
CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID
5942e8c161029 docker.io/library/traefik@sha256:9a77d02ad23622cd85e38eec127a85110ad73ba8258d8b457cf17bd3ad1eeef0 3 hours ago Running traefik 0 b03a2afb9dd91
9b23f9bc1d51a docker.io/juestnow/process-exporter@sha256:8ef99f0488d8ea30770e048b4e4255a0ace7732422f243ab57fede0a003945bf 3 hours ago Running process-exporter 1 7d133f4fba6c2
1e5a5a07676f1 docker.io/prom/node-exporter@sha256:b630fb29d99b3483c73a2a7db5fc01a967392a3d7ad754c8eccf9f4a67e7ee31 3 hours ago Running node-exporter 1 cfa2a3d2b851c
[root@node05 conf]# crictl pods
POD ID CREATED STATE NAME NAMESPACE ATTEMPT
b03a2afb9dd91 3 hours ago Ready traefik-f94cf4494-2n4x5 kube-system 0
cfa2a3d2b851c 3 hours ago Ready node-exporter-rmv24 monitoring 1
7d133f4fba6c2 3 hours ago Ready process-exporter-hrbcv monitoring 1
# 容器能正常运行cri-o 替换docker 成功
# kube-router 下载地址
https://github.com/cloudnativelabs/kube-router/releases/download/v0.3.2/kube-router_0.3.2_linux_amd64.tar.gz
来源:51CTO
作者:juestnow
链接:https://blog.51cto.com/juestnow/2446095