当前位置: 首页 > news >正文

基于rocky linux 9.7 Kubernetes-1.35.3基于docker的高可用集群安装

 

 

192.168.3.60 master1.org K8s 集群主节点 1,Master和etcd
192.168.3.61 master2.org K8s 集群主节点 2,Master和etcd
192.168.3.62 master3.org K8s 集群主节点 3,Master和etcd
192.168.3.63 node1.org K8s 集群工作节点 1
192.168.3.64 node2.org K8s 集群工作节点 2
192.168.3.65 node3.org K8s 集群工作节点 3
192.168.3.66 ha1.org K8s 主节点访问入口 1,提供高可用及负载均衡
192.168.3.67 ha2.org K8s 主节点访问入口 2,提供高可用及负载均衡
192.168.3.68 kubeapi.org VIP,在ha1和ha2主机实现

 

注意: Master节点内存至少2G以上,否则在初始化时会出错 

 

初始环境准备
1.2.1 操作系统及 Kubernetes 组件版本
以下案例中部署集群使用的操作系统、容器引擎、Kubernetes 版本信息:
OS: rocky linux  9.7 
CRI: Docker 29 Cgroup Driver: systemd
Kubernetes: 1.35

 

 dnf install lrzsz wget vim -y 

 

 

 在每个主机设置不同的主机名,IP 和主机名解析:

cat >> /etc/hosts <<EOF

192.168.3.68 kubeapi.org kubeapi
192.168.3.60 master1.org master1
192.168.3.61 master2.org master2
192.168.3.62 master3.org master3
192.168.3.63 node1.org node1
192.168.3.64 node2.org node2
192.168.3.65 node3.org node3
192.168.3.66 ha1.org ha1
192.168.3.67 ha2.org ha2

EOF

 

在集群的 Master 和各 node 同步时间

同步时间

dnf install chrony

 

sudo vi /etc/chrony.conf

server ntp1.aliyun.com iburst

 

grep -q "makestep" /etc/chrony.conf || echo "makestep 1.0 3" >> /etc/chrony.conf

sudo systemctl start chronyd

sudo systemctl enable chronyd

systemctl enable --now chronyd

chronyc -a makestep
timedatectl set-ntp true
timedatectl status

 

(crontab -l 2>/dev/null; echo '*/2 * * * * /usr/bin/chronyc -a makestep >/dev/null 2>&1') | crontab -

 

 

 禁用 SELinux

setenforce 0
sed -i 's#^\(SELINUX=\).*#\1disabled#' /etc/sysconfig/selinux

 

 

关闭防火墙

 集群的 Master 和各 node 执行

 

、关闭firewall:

systemctl stop firewalld.service #停止firewall

systemctl disable firewalld.service #禁止firewall开机启动

2、安装iptables防火墙

yum install iptables-services #安装

 

systemctl restart iptables.service #最后重启防火墙使配置生效

systemctl enable iptables.service #设置防火墙开机启动

 

systemctl stop iptables.service #最后重启防火墙使配置生效

systemctl disable iptables.service #设置防火墙开机启动

 

 

禁用 Swap 设备

在集群的 Master 和各 node 执行

swapoff -a

sed -i '/swap/s/^/#/' /etc/fstab

  

实现 keepalived
在两台主机ha1和ha2 按下面步骤部署和配置 keepalived

dnf install keepalived

#keepalived配置

 cp /usr/share/doc/keepalived/keepalived.conf.vrrp /etc/keepalived/keepalived.conf

 

#vim /etc/keepalived/keepalived.conf

#第一个节点的配置

 

cat /etc/keepalived/keepalived.conf 
global_defs {router_id ha1.org
}vrrp_script check_haproxy {  script "killall -0 haproxy"  interval 1  weight -30  fall 3  rise 2  timeout 2
}vrrp_instance VI_1 {state MASTERinterface ens33virtual_router_id 66priority 100advert_int 1authentication {auth_type PASSauth_pass 123456}virtual_ipaddress {192.168.3.68/24 dev ens33 label ens33:1}track_script {      check_haproxy   }}

 

#第二个节点的配置

global_defs {router_id ha2.org
}vrrp_instance VI_1 {state BACKUPinterface ens33virtual_router_id 66priority 80advert_int 1authentication {auth_type PASSauth_pass 123456}virtual_ipaddress { 192.168.3.68/24 dev ens33 label ens33:1}  }

 

systemctl start keepalived.service

systemctl enable keepalived.service

systemctl status keepalived.service

 

#验证keepalived服务是否正常

 hostname -I
192.168.3.66 192.168.3.68 

 

ip a 

 

实现 Haproxy
通过 Harproxy 实现 kubernetes Api-server的四层反向代理和负载均衡功能

 

#在两台主机ha1和ha2都执行下面操作

cat >> /etc/sysctl.conf <<EOF
net.ipv4.ip_nonlocal_bind = 1
EOF

 

sysctl -p 

 

#安装配置haproxy

dnf -y install haproxy

vim /etc/haproxy/haproxy.cfg 

 

cat /etc/haproxy/haproxy.cfg

 cat /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# Example configuration for a possible web application.  See the
# full configuration options online.
#
#   https://www.haproxy.org/download/1.8/doc/configuration.txt
#
#---------------------------------------------------------------------#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global# to have these messages end up in /var/log/haproxy.log you will# need to:## 1) configure syslog to accept network log events.  This is done#    by adding the '-r' option to the SYSLOGD_OPTIONS in#    /etc/sysconfig/syslog## 2) configure local2 events to go to the /var/log/haproxy.log#   file. A line like the following can be added to#   /etc/sysconfig/syslog##    local2.*                       /var/log/haproxy.log#log         127.0.0.1 local2chroot      /var/lib/haproxypidfile     /var/run/haproxy.pidmaxconn     4000user        haproxygroup       haproxydaemon# turn on stats unix socketstats socket /var/lib/haproxy/stats# utilize system-wide crypto-policiesssl-default-bind-ciphers PROFILE=SYSTEMssl-default-server-ciphers PROFILE=SYSTEM#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaultsmode                    httplog                     globaloption                  httplogoption                  dontlognulloption http-server-closeoption forwardfor       except 127.0.0.0/8option                  redispatchretries                 3timeout http-request    10stimeout queue           1mtimeout connect         10stimeout client          1mtimeout server          1mtimeout http-keep-alive 10stimeout check           10smaxconn                 3000#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend mainbind *:5000acl url_static       path_beg       -i /static /images /javascript /stylesheetsacl url_static       path_end       -i .jpg .gif .png .css .jsuse_backend static          if url_staticdefault_backend             app#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
backend staticbalance     roundrobinserver      static 127.0.0.1:4331 check#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend appbalance     roundrobinserver  app1 127.0.0.1:5001 checkserver  app2 127.0.0.1:5002 checkserver  app3 127.0.0.1:5003 checkserver  app4 127.0.0.1:5004 check##########添加以下内容######################
listen statsmode httpbind 0.0.0.0:8888stats enablelog globalstats uri /statusstats auth admin:123456listen kubernetes-api-6443bind 192.168.3.68:6443mode tcpserver master1 192.168.3.60:6443 check inter 3s fall 3 rise 3# server master2 192.168.3.102:6443 check inter 3s fall 3 rise 3# server master3 192.168.3.103:6443 check inter 3s fall 3 rise 3

 

 

systemctl restart haproxy

systemctl status haproxy

systemctl enable haproxy

haproxy -c -f /etc/haproxy/haproxy.cfg

 

 浏览器访问: http://ha1.org:8888/status ,可以看到下面界面

image

 

 

 浏览器访问: http://ha2.org:8888/status ,可以看到下面界面

 

浏览器访问: http://kubeapi.wang.org:8888/status ,可以看到下面界面

image

 

 

 

 

 

1.2.8 内核优化
集群的 Master 和各 node 执行

 

#开机加载内核模块
cat <<EOF | tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF


#立即加载内核模块
modprobe overlay
modprobe br_netfilter


#验证模块已加载

lsmod |grep -E 'overlay|br_netfilter'
br_netfilter 36864 0
bridge 425984 1 br_netfilter
overlay 241664 0


#设置所需的 sysctl 参数,参数在重新启动后保持不变
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
EOF
#应用 sysctl 参数生效而不重新启动
sysctl --system

 

 

 

所有 master 和 node 节点安装和配置 docker
1.4.1 安装 docker
范例:在所有master和worker上安装系统内置版本docker

# 1. 卸载旧版本(如果有)
sudo dnf remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine

# 2. 安装必要的依赖
sudo dnf install -y dnf-plugins-core

# 3. 添加 Docker 官方仓库
sudo dnf config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# 4. 安装 Docker Engine
sudo dnf install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin

 

 

#因为国内无法访问docker官方镜像,需要配置docker访问docker官方镜像

cat > /etc/docker/daemon.json <<EOF
{
   "registry-mirrors": [
       "https://docker.m.daocloud.io",
       "https://docker.1panel.live",
       "https://docker.1ms.run",
       "https://docker.xuanyuan.me"
  ],
   "insecure-registries": ["harbor.wang.org"]
}
EOF

 

systemctl restart docker

systemctl enable docker

 docker version

 

所有主机安装 cri-dockerd(v1.24以后版本)
在所有主机上载Linux通用二进制文件并创建service和socket文件

#下载Linux通用版本

VERSION=0.4.2

wgert https://github.com/Mirantis/cridockerd/releases/download/v${VERSION}/cri-dockerd-${VERSION}.amd64.tgz

 

tar xf cri-dockerd-${VERSION}.amd64.tgz

mv cri-dockerd/cri-dockerd /usr/bin/

scp /usr/bin/cri-dockerd 192.168.3.61:/usr/bin/ 

 

#准备service和socket文件
#https://github.com/Mirantis/cri-dockerd/tree/master/packaging/systemd

 

wget -O /lib/systemd/system/cri-docker.service https://raw.githubusercontent.com/Mirantis/cridockerd/refs/heads/master/packaging/systemd/cri-docker.service

wget -O /lib/systemd/system/cri-docker.socket https://raw.githubusercontent.com/Mirantis/cridockerd/refs/heads/master/packaging/systemd/cri-docker.socket

 

cp -a cri-docker.service /lib/systemd/system/
cp -a cri-docker.socket /lib/systemd/system/

 scp /lib/systemd/system/cri-docker.service 192.168.3.61:/lib/systemd/system/cri-docker.service

 scp /lib/systemd/system/cri-docker.socket 192.168.3.61:/lib/systemd/system/cri-docker.socket

 

systemctl daemon-reload && systemctl enable --now cri-docker.service && systemctl enable --now cri-docker.socket

 

所有主机配置 cri-dockerd(v1.24以后版本)
众所周知的原因,从国内 cri-dockerd 服务无法下载 k8s.gcr.io上面相关镜像,导致无法启动,所以需要修改
cri-dockerd 使用国内镜像源

vim /lib/systemd/system/cri-docker.service
#修改ExecStart行如下
#最新版Kubernetes v1.35.0和v1.34.1
ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infracontainer-image registry.aliyuncs.com/google_containers/pause:3.10.1

 

cat  /lib/systemd/system/cri-docker.service    
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket[Service]
Type=notify
#ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd://
ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image registry.aliyuncs.com/google_containers/pause:3.10.1
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
Delegate=yes
KillMode=process[Install]
WantedBy=multi-user.target

 

 

#同步至所有节点

scp /lib/systemd/system/cri-docker.service 192.168.3.61:/lib/systemd/system/cri-docker.service

 

 

systemctl daemon-reload && systemctl enable cri-docker.service cri-docker.socket && systemctl restart cri-docker.service
systemctl enable cri-docker.service cri-docker.socket 

 

所有 master 和 node 节点安装kubeadm等相关包
所有 master 和 node 节点都安装kubeadm, kubelet,kubectl 相关包
注意: node节点可以不安装管理工具 kubectl 包,但依赖关系会自动安装
通过国内镜像站点阿里云安装的参考链接:
https://kubernetes.io/zh-cn/docs/setup/productionenvironment/tools/kubeadm/install-kubeadm/#installing-kubeadm-kubelet-andkubectl
https://developer.aliyun.com/mirror/kubernetes
https://mirrors.tuna.tsinghua.edu.cn/help/kubernetes/

 

https://developer.aliyun.com/mirror/kubernetes

 

范例: 在所有master和node节点执行下面操作安装k8s相关包

cat <<EOF | tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.35/rpm/
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.35/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF

 

cat <<EOF | tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.35/rpm/
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.35/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF

 

dnf install  kubelet kubeadm kubectl --disableexcludes=kubernetes -y

systemctl enable kubelet && systemctl start kubelet

systemctl restart kubelet

systemctl status kubelet

 

实现 kubeadm 命令补全

# 1. 安装 bash-completion
dnf install -y bash-completion

# 2. 重新加载
source /usr/share/bash-completion/bash_completion

 

echo 'source <(kubeadm completion bash)' >> .bashrc

 

在第一个 master 节点运行 kubeadm init 初始化命令
在三台 master 中任意一台 master 主机执行kubeadm命令进行集群初始化,而且集群初始化只需要初
始化一次

 

 

 

#默认的网络配置进行初始化

K8S_RELEASE_VERSION=1.35.0

kubeadm init --kubernetes-version=v${K8S_RELEASE_VERSION} --control-plane-endpoint kubeapi.org  --pod-network-cidr 10.244.0.0/16 --service-cidr 10.96.0.0/12  --token-ttl=0 --image-repository registry.aliyuncs.com/google_containers  --upload-certs --cri-socket=unix:///run/cri-dockerd.sock

 

完整重置流程

# 1. 指定 cri-docker 重置
kubeadm reset -f --cri-socket=unix:///run/cri-dockerd.sock

# 2. 清理残留
rm -rf /etc/cni/net.d /var/lib/etcd /var/lib/kubelet /etc/kubernetes
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
ipvsadm --clear 2>/dev/null || true# 3. 重启相关服务
systemctl restart docker
systemctl restart cri-docker.socket
systemctl restart cri-docker.service# 4. 确认 cri-docker 正常
systemctl status cri-docker.service# 5. 重新初始化(同样要指定 cri-socket)
kubeadm init \--kubernetes-version=v1.35.0 \--control-plane-endpoint=kubeapi.org:6443 \--pod-network-cidr=10.244.0.0/16 \--service-cidr=10.96.0.0/12 \--token-ttl=0 \--image-repository=registry.aliyuncs.com/google_containers \--upload-certs \--cri-socket=unix:///run/cri-dockerd.sock \--ignore-preflight-errors=Mem,SystemVerification \--v=5

 

在第一个 master 节点kubectl命令的授权

根据第一台主机初始化成功的提示信息进行下面操作

#复制认证为Kubernetes系统管理员的配置文件至目标用户(例如当前用户root)的家目录下,生成配置
kube-config 文件

mkdir -p $HOME/.kube

cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

 

添加其它的master 节点到集群


根据前面集群初始化时最后面信息,添加master2和master3节点


在所有其他 master节点执行下面所需命令

#基于kubeadm init命令执行的输出结果,添加 --certificate-key 参数,执行下面命令

 

kubeadm join kubeapi.org:6443 --token 74cvl7.k68dh5v03zkjqp3h \
--discovery-token-ca-cert-hash sha256:af0f112b576f5e994eef2af13ad63b66bc74f079011a8843a67b826d282cfd05 \
--control-plane --certificate-key 478e3940e005f11e818a4d02cb8a06b26080da8cb80cd936621cab1bbc5f9320 \
--cri-socket=unix:///run/cri-dockerd.sock

 

#按上面提示生成kubectl命令的授权配置文件

mkdir -p $HOME/.kube

cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

kubectl get nodes

 

修改haproxy的配置

vi /etc/haproxy/haproxy.cfg

 

cat /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# Example configuration for a possible web application.  See the
# full configuration options online.
#
#   https://www.haproxy.org/download/1.8/doc/configuration.txt
#
#---------------------------------------------------------------------#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global# to have these messages end up in /var/log/haproxy.log you will# need to:## 1) configure syslog to accept network log events.  This is done#    by adding the '-r' option to the SYSLOGD_OPTIONS in#    /etc/sysconfig/syslog## 2) configure local2 events to go to the /var/log/haproxy.log#   file. A line like the following can be added to#   /etc/sysconfig/syslog##    local2.*                       /var/log/haproxy.log#log         127.0.0.1 local2chroot      /var/lib/haproxypidfile     /var/run/haproxy.pidmaxconn     4000user        haproxygroup       haproxydaemon# turn on stats unix socketstats socket /var/lib/haproxy/stats# utilize system-wide crypto-policiesssl-default-bind-ciphers PROFILE=SYSTEMssl-default-server-ciphers PROFILE=SYSTEM#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaultsmode                    httplog                     globaloption                  httplogoption                  dontlognulloption http-server-closeoption forwardfor       except 127.0.0.0/8option                  redispatchretries                 3timeout http-request    10stimeout queue           1mtimeout connect         10stimeout client          1mtimeout server          1mtimeout http-keep-alive 10stimeout check           10smaxconn                 3000#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend mainbind *:5000acl url_static       path_beg       -i /static /images /javascript /stylesheetsacl url_static       path_end       -i .jpg .gif .png .css .jsuse_backend static          if url_staticdefault_backend             app#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
backend staticbalance     roundrobinserver      static 127.0.0.1:4331 check#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend appbalance     roundrobinserver  app1 127.0.0.1:5001 checkserver  app2 127.0.0.1:5002 checkserver  app3 127.0.0.1:5003 checkserver  app4 127.0.0.1:5004 check##########添加以下内容######################
listen statsmode httpbind 0.0.0.0:8888stats enablelog globalstats uri /statusstats auth admin:123456listen kubernetes-api-6443bind 192.168.3.68:6443mode tcpserver master1 192.168.3.60:6443 check inter 3s fall 3 rise 3server master2 192.168.3.61:6443 check inter 3s fall 3 rise 3server master3 192.168.3.62:6443 check inter 3s fall 3 rise 3

 

cat /etc/haproxy/haproxy.cfg             
globallog /dev/log local0maxconn 4000user haproxygroup haproxydaemondefaultsmode tcplog globaloption tcplogoption dontlognulltimeout connect 5stimeout client 50stimeout server 50slisten statsmode httpbind 0.0.0.0:8888stats enablelog globalstats uri /statusstats auth admin:123456listen kubernetes-api-6443bind 192.168.3.68:6443mode tcpserver master1 192.168.3.60:6443 check inter 3s fall 3 rise 3server master2 192.168.3.61:6443 check inter 3s fall 3 rise 3server master3 192.168.3.62:6443 check inter 3s fall 3 rise 3

 

 

 #启用两行

systemctl reload haproxy

 

systemctl enable kubelet && systemctl start kubelet

systemctl restart kubelet

systemctl status kubelet

在每个worker节点分别执行命令将所有 Worker 主
机加入集群

在所有node节点执行上面的命令即可加入集群

 

kubeadm join kubeapi.org:6443 --token 74cvl7.k68dh5v03zkjqp3h \
--discovery-token-ca-cert-hash sha256:af0f112b576f5e994eef2af13ad63b66bc74f079011a8843a67b826d282cfd05 \
--cri-socket=unix:///run/cri-dockerd.sock

 

#在master上验证 Worker 节点是否加入集群
#稍等几分钟后可以查看下面Ready状态

kubectl get nodes

 

kubectl  get pod -A

 

在第一个master节点配置网络组件
Kubernetes系统上Pod网络的实现依赖于第三方插件进行,这类插件有近数十种之多,较为著名的有
flannel、calico、canal和kube-router等,简单易用的实现是为CoreOS提供的flannel项目。下面的命令
用于在线部署flannel至Kubernetes系统之上

 

#查看节点信息为NotReady状态,原因为没有网络插件

wget https://github.com/flannelio/flannel/releases/latest/download/kube-flannel.yml

kubectl apply -f kube-flannel.yml

#稍候再次验证 master 节点状态:

kubectl get nodes

 

[root@master1 ~]# kubectl  get pod -A
NAMESPACE      NAME                                  READY   STATUS    RESTARTS   AGE
kube-flannel   kube-flannel-ds-6nwnh                 1/1     Running   0          3m17s
kube-flannel   kube-flannel-ds-98k5b                 1/1     Running   0          3m17s
kube-flannel   kube-flannel-ds-hw4pn                 1/1     Running   0          3m17s
kube-flannel   kube-flannel-ds-m2z8n                 1/1     Running   0          3m17s
kube-flannel   kube-flannel-ds-rbh9m                 1/1     Running   0          3m17s
kube-flannel   kube-flannel-ds-szf7s                 1/1     Running   0          3m17s
kube-system    coredns-bbdc5fdf6-p27d4               1/1     Running   0          103m
kube-system    coredns-bbdc5fdf6-p9fs8               1/1     Running   0          103m
kube-system    etcd-master1.org                      1/1     Running   0          103m
kube-system    etcd-master2.org                      1/1     Running   0          88m
kube-system    etcd-master3.org                      1/1     Running   0          85m
kube-system    kube-apiserver-master1.org            1/1     Running   0          103m
kube-system    kube-apiserver-master2.org            1/1     Running   0          88m
kube-system    kube-apiserver-master3.org            1/1     Running   0          85m
kube-system    kube-controller-manager-master1.org   1/1     Running   0          103m
kube-system    kube-controller-manager-master2.org   1/1     Running   0          88m
kube-system    kube-controller-manager-master3.org   1/1     Running   0          85m
kube-system    kube-proxy-2mql8                      1/1     Running   0          88m
kube-system    kube-proxy-4x4hj                      1/1     Running   0          14m
kube-system    kube-proxy-8lgcf                      1/1     Running   0          103m
kube-system    kube-proxy-92mw4                      1/1     Running   0          85m
kube-system    kube-proxy-wbkq8                      1/1     Running   0          73m
kube-system    kube-proxy-x87lr                      1/1     Running   0          14m
kube-system    kube-scheduler-master1.org            1/1     Running   0          103m
kube-system    kube-scheduler-master2.org            1/1     Running   0          88m
kube-system    kube-scheduler-master3.org            1/1     Running   0          85m
[root@master1 ~]# 
[root@master1 ~]# 
[root@master1 ~]# kubectl get nodes  
NAME          STATUS     ROLES           AGE    VERSION
master1.org   NotReady   control-plane   103m   v1.35.4
master2.org   Ready      control-plane   88m    v1.35.4
master3.org   Ready      control-plane   85m    v1.35.4
node1.org     Ready      <none>          73m    v1.35.4
node2.org     Ready      <none>          14m    v1.35.4
node3.org     Ready      <none>          14m    v1.35.4

 

kubectl delete pod -n kube-flannel kube-flannel-ds-bbdg4

 
 
 
 
 
 

 

http://www.jsqmd.com/news/779967/

相关文章:

  • 构建高性能链上数据同步工具:以HyperLiquid为例的量化交易数据基础设施实践
  • 2026 Google Play运营指南:7步破局,破解上架即凉难题
  • zClaw-Skills:AI技能工具箱,一站式解决创意工作者的内容创作难题
  • Codesight:为AI编码助手生成结构化项目地图,节省91倍Token成本
  • 基于AI与Remotion的短视频自动化生成引擎实战指南
  • 茉莉花插件完整指南:如何让Zotero中文文献管理效率大幅提升
  • 全域数学(GM):暗物质即拓扑残差推演完整版文档
  • 老品牌口碑稳!2026全年度多通道/多路温度测试仪主流厂家JINKO金科7款代表型号推荐!附13条常见问题解答 (FAQ) - 奋斗者888
  • VSCode原生指针优化:Electron应用CSS样式修改实战
  • 解构大模型核心技术——从Transformer到多模态融合
  • EMC设计实战:从原理到布局布线的电磁兼容性核心策略
  • 量子计算中的离散拉普拉斯算子与块编码技术
  • 从启德机场降落看约束优化:工程师视角下的极限系统设计
  • ScaleHLS:基于MLIR的下一代HLS编译器框架,实现FPGA高性能计算与AI加速
  • 多平台 Web Scraping 实战指南:用 Bright Data + MCP 实现自动化数据采集(2026)
  • MySQL 中高效存储与查询时间数据的最佳实践
  • jieba-analysis(Java 版结巴分词)
  • 三步解锁网盘直链下载:告别繁琐的智能助手方案
  • Hivemind:去中心化P2P深度学习训练框架原理与实践
  • 基于MCP协议与Apify的英国企业合规智能查询引擎实战指南
  • Linux基础3
  • 从零打造专属VSCode深色主题:设计、开发与发布全流程
  • 大模型行业应用落地——从辅助工具到产业革新
  • 企业级AI助手技能库:模块化设计与自动化工作流实践
  • Opencode集成Cursor AI:本地代理服务实现跨编辑器AI编程
  • SQL如何统计各分组下指标的波动率_STDDEV聚合函数应用
  • 风险投资中非正式社交的价值:从人际网络到融资策略
  • 论文AI率怎么降?来看这3大指令与4款实测工具
  • 深度学习对抗攻防全解析 | 全网独家实战,从 FGSM 到 PGD 核心攻击复现 + 工业级防御策略,覆盖图像分类 / 自动驾驶 / 人脸识别全场景
  • 打通ModelScope与私有仓库:模型同步与格式转换工具详解