rke2 部署 k8s集群
环境准备,所有主机都做
# 主机名设置 tee -a /etc/hosts > /dev/null << EOF 192.168.238.125 k8s-master 192.168.238.126 k8s-node1 192.168.238.127 k8s-node2 EOF # 关闭swap swapoff -all # 开启ipvs modprobe ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh nf_conntrack # 写入文件确保配置永久生效 root@k8s-master:~# vim /etc/modules-load.d/ipvs.conf ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh nf_conntrack # 开启内核参数 modprobe br_netfilter modprobe overlay echo "net.ipv4.ip_forward=1" >> /etc/sysctl.conf echo "net.bridge.bridge-nf-call-iptables=1" >> /etc/sysctl.conf echo "net.bridge.bridge-nf-call-ip6tables=1" >> /etc/sysctl.conf sysctl --system # 确认生效情况 root@k8s-master:~# lsmod | grep -E "ip_vs|br_netfilter|overlay" overlay 212992 0 br_netfilter 32768 0 bridge 425984 1 br_netfilter ip_vs_sh 12288 0 ip_vs_wrr 12288 0 ip_vs_rr 12288 0 ip_vs 221184 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr nf_conntrack 196608 1 ip_vs nf_defrag_ipv6 24576 2 nf_conntrack,ip_vs libcrc32c 12288 4 nf_conntrack,btrfs,raid456,ip_vs# 在master上安装rke2 mkdir -p /etc/rancher/rke2/ curl -sfL https://rancher-mirror.rancher.cn/rke2/install.sh | INSTALL_RKE2_MIRROR=cn sh - # 换镜像源 vim /etc/rancher/rke2/registries.yaml mirrors: "docker.io": endpoint: - "https://docker.1ms.run" # 第一次启动 RKE2 时读取,定义集群基础参数,集群初始化用 vim /etc/rancher/rke2/config.yaml token: rke2-secret-token tls-san: - "192.168.238.125" write-kubeconfig-mode: "0644" node-name: k8s-master node-label: - "role=master" debug: true kube-proxy-arg: - proxy-mode=ipvs # 如果多master节点,其余master config参考 vim /etc/rancher/rke2/config.yaml server: https://192.168.238.125:9345 token: rke2-secret-token tls-san: - "192.168.238.125" write-kubeconfig-mode: "0644" node-name: k8s-master02 node-label: - "role=master" debug: true kube-proxy-arg: - proxy-mode=ipvs # 启动服务 systemctl enable rke2-server.service systemctl start rke2-server.service所有从节点所作操作
mkdir -p /etc/rancher/rke2/ curl -sfL https://rancher-mirror.rancher.cn/rke2/install.sh | INSTALL_RKE2_MIRROR=cn INSTALL_RKE2_TYPE="agent" sh - # 换镜像源 vim /etc/rancher/rke2/registries.yaml mirrors: "docker.io": endpoint: - "https://docker.1ms.run" # 同样写config 注意 node name更改 vim /etc/rancher/rke2/config.yaml server: https://192.168.238.125:9345 token: rke2-secret-token node-name: k8s-node01 node-label: - "role=worker" debug: true kube-proxy-arg: - proxy-mode=ipvs # 启动服务 systemctl enable rke2-agent.service systemctl start rke2-agent.service部署如果没问题 生产环境debug可以修改回来,修改之后重启 对于 rke2服务
kubectl 等命令路径
root@k8s-master:~# ls /var/lib/rancher/rke2/bin/kubectl /var/lib/rancher/rke2/bin/kubectl 可做软链接 或者 声明下PATH路径 echo 'export PATH=$PATH:/var/lib/rancher/rke2/bin' >> ~/.bashrc # 2. 让配置立即生效 source ~/.bashrc集群默认配置文件
root@k8s-master:~# ls /etc/rancher/rke2/rke2.yaml /etc/rancher/rke2/rke2.yaml root@k8s-master:~# kubectl get node --kubeconfig /etc/rancher/rke2/rke2.yaml NAME STATUS ROLES AGE VERSION k8s-master Ready control-plane,etcd 133m v1.35.4+rke2r1 k8s-node01 Ready <none> 114m v1.35.4+rke2r1 k8s-node02 Ready <none> 114m v1.35.4+rke2r1 # node节点没有标签可以手动打 kubectl label node k8s-node01 node-role.kubernetes.io/worker=true kubectl label node k8s-node02 node-role.kubernetes.io/worker=true # 如果想让这个集群成为默认集群 # 需要 将yml文件放到默认config目录 # 由于rke2 默认不创建 .kube 到主目录,需要的话可以手动建 mkdir -p ~/.kube && cp /etc/rancher/rke2/rke2.yaml ~/.kube/configcrictl 需要处理一下套接字文件
vim /etc/crictl.yaml runtime-endpoint: unix:///run/k3s/containerd/containerd.sock image-endpoint: unix:///run/k3s/containerd/containerd.sock