使用kubeadm快速部署kubernetes v1.13.1

0.697字数 365阅读 604

服务器信息

  • 192.168.1.200 Master,node
  • 192.168.1.201 Node

服务器初始化

# 修改主机名
# 192.168.1.200 执行
hostname kubeadm-mater01
echo "kubeadm-mater01"  > /etc/hostname
# 192.168.1.201 执行
hostname kubeadm-node01
echo "kubeadm-node01"  > /etc/hostname

# 关闭selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce 0

# 修改系统参数,退出session在登陆后生效
cat  >> /etc/security/limits.conf << EOF
* - nofile 1800000
    * soft nproc 65536
    * hard nproc 65536
    * soft nofile 65536
    * hard nofile 65536
EOF

# 修改yum源
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
wget http://mirrors.163.com/.help/CentOS7-Base-163.repo
mv CentOS7-Base-163.repo  /etc/yum.repos.d/CentOS-Base.repo

# 关闭无用服务和防火墙
systemctl disable postfix.service
systemctl stop postfix.service
systemctl stop firewalld
systemctl mask firewalld

# 修改hosts
echo "192.168.1.200 kubeadm-mater01" >> /etc/hosts
echo "192.168.1.201 kubeadm-node01" >> /etc/hosts

# 开启路由转发
cat >> /etc/sysctl.conf << EOF
net.ipv4.ip_forward=1
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
EOF

modprobe br_netfilter
sysctl -p

配置 Aliyun Kubernetes 源

cat  > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安装kubeadm

yum makecache fast
yum install -y kubelet kubeadm kubectl

systemctl enable kubelet.service

删除Swap分区

# 停止正在使用的swap分区:
swapoff /dev/mapper/centos-swap

# 删除swap分区文件:
rm -rf /dev/mapper/centos-swap

# 删除或注释在/etc/fstab文件中的以下开机自动挂载内容:
/dev/mapper/centos-swap swap swap defaults 0 0

# 或者忽略swap报错
添加参数 --ignore-preflight-errors=swap

安装docker

yum -y install  yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache fast
yum -y install docker-ce-18.06.1.ce-3.el7

cat > /etc/docker/daemon.json <<-'EOF'
{
  "data-root": "/data/docker",
  "registry-mirrors": ["https://docker.mirrors.ustc.edu.cn/", "https://registry.docker-cn.com"]
}
EOF

systemctl start docker
systemctl enable docker


修改镜像仓库初始化

kubeadm init \
--kubernetes-version=v1.13.1 \
--pod-network-cidr=10.244.0.0/16 \
--apiserver-advertise-address=192.168.1.200 \
--image-repository registry.aliyuncs.com/google_containers

[init] Using Kubernetes version: v1.13.1
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubeadm-mater01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.1.200]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [kubeadm-mater01 localhost] and IPs [192.168.1.200 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [kubeadm-mater01 localhost] and IPs [192.168.1.200 127.0.0.1 ::1]
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 20.010280 seconds
[uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.13" in namespace kube-system with the configuration for the kubelets in the cluster
[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "kubeadm-mater01" as an annotation
[mark-control-plane] Marking the node kubeadm-mater01 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node kubeadm-mater01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: yof5pj.1j961t39wuxuahvs
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstraptoken] creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes master has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of machines by running the following on each node
as root:

  kubeadm join 192.168.1.200:6443 --token peonuo.e4r06pm721rqjt6j --discovery-token-ca-cert-hash sha256:22800e2c3aaf2596b4544afd0ad4013048ddf6623af2537c4215fc31ff7f4c0d
  • 上面的关键输出:
    • [kubelet-start] 生成kubelet的配置文件”/var/lib/kubelet/config.yaml”
    • [certificates]生成相关的各种证书
    • [kubeconfig]生成相关的kubeconfig文件
    • [bootstraptoken]生成token记录下来,后边使用kubeadm join往集群中添加节点时会用到

配置使用kubectl访问集群

mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

查看集群信息

# 使用到镜像
[root@kubeadm-mater01 ~]# docker images                    
REPOSITORY                                                        TAG                 IMAGE ID            CREATED             SIZE
registry.aliyuncs.com/google_containers/kube-proxy                v1.13.1             fdb321fd30a0        2 weeks ago         80.2MB
registry.aliyuncs.com/google_containers/kube-apiserver            v1.13.1             40a63db91ef8        2 weeks ago         181MB
registry.aliyuncs.com/google_containers/kube-controller-manager   v1.13.1             26e6f1db2a52        2 weeks ago         146MB
registry.aliyuncs.com/google_containers/kube-scheduler            v1.13.1             ab81d7360408        2 weeks ago         79.6MB
registry.aliyuncs.com/google_containers/coredns                   1.2.6               f59dcacceff4        7 weeks ago         40MB
registry.aliyuncs.com/google_containers/etcd                      3.2.24              3cab8e1b9802        3 months ago        220MB
registry.aliyuncs.com/google_containers/pause                     3.1                 da86e6ba6ca1        12 months ago       742kB

[root@kubeadm-mater01 ~]# kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
scheduler            Healthy   ok                   
controller-manager   Healthy   ok                   
etcd-0               Healthy   {"health": "true"}  

[root@kubeadm-mater01 ~]# kubectl get namespace
NAME          STATUS   AGE
default       Active   2m33s
kube-public   Active   2m33s
kube-system   Active   2m33s

[root@kubeadm-mater01 k8s]# kubectl get services
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   24m

查看现有Pod

# 可以看到两个coredns处于Pending状态, 还需要安装网络配置。
[root@kubeadm-mater01 ~]# kubectl get pods --all-namespaces
NAMESPACE     NAME                                      READY   STATUS    RESTARTS   AGE
kube-system   coredns-78d4cf999f-jf2zb                  0/1     Pending   0          2m38s
kube-system   coredns-78d4cf999f-pm4nz                  0/1     Pending   0          2m38s
kube-system   etcd-kubeadm-mater01                      1/1     Running   0          94s
kube-system   kube-apiserver-kubeadm-mater01            1/1     Running   0          105s
kube-system   kube-controller-manager-kubeadm-mater01   1/1     Running   0          104s
kube-system   kube-proxy-cvwtn                          1/1     Running   0          2m38s
kube-system   kube-scheduler-kubeadm-mater01            1/1     Running   0          117s

安装flannel

mkdir -p /data/k8s/
cd /data/k8s
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

[root@kubeadm-mater01 k8s]# kubectl apply -f  kube-flannel.yml
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.extensions/kube-flannel-ds-amd64 created
daemonset.extensions/kube-flannel-ds-arm64 created
daemonset.extensions/kube-flannel-ds-arm created
daemonset.extensions/kube-flannel-ds-ppc64le created
daemonset.extensions/kube-flannel-ds-s390x created


[root@kubeadm-mater01 k8s]# ifconfig flannel.1
flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 10.244.0.0  netmask 255.255.255.255  broadcast 0.0.0.0
        inet6 fe80::c4a6:87ff:fe29:d69f  prefixlen 64  scopeid 0x20<link>
        ether c6:a6:87:29:d6:9f  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 8 overruns 0  carrier 0  collisions 0

如果Node有多个网卡的话,需要在kube-flannel.yml中使用–iface参数指定集群主机内网网卡的名称,否则可能会出现dns无法解析。需要将kube-flannel.yml下载到本地,flanneld启动参数加上–iface=<iface-name>

......
containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.10.0-amd64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        - --iface=eth1
......

查看Pod状态是否为Running状态

# 需要下载镜像,和启动,需要等一会在查询
[root@kubeadm-mater01 k8s]# kubectl get pods --all-namespaces
NAMESPACE     NAME                                      READY   STATUS    RESTARTS   AGE
kube-system   coredns-78d4cf999f-jf2zb                  1/1     Running   0          9m44s
kube-system   coredns-78d4cf999f-pm4nz                  1/1     Running   0          9m44s
kube-system   etcd-kubeadm-mater01                      1/1     Running   0          8m40s
kube-system   kube-apiserver-kubeadm-mater01            1/1     Running   0          8m51s
kube-system   kube-controller-manager-kubeadm-mater01   1/1     Running   0          8m50s
kube-system   kube-flannel-ds-amd64-wtd6s               1/1     Running   0          3m29s
kube-system   kube-proxy-cvwtn                          1/1     Running   0          9m44s
kube-system   kube-scheduler-kubeadm-mater01            1/1     Running   0          9m3s

master node参与工作负载

使用kubeadm初始化的集群,出于安全考虑Pod不会被调度到Master Node上,也就是说Master Node不参与工作负载。这是因为当前的master节点node1被打上了node-role.kubernetes.io/master:NoSchedule的污点

[root@kubeadm-mater01 k8s]# kubectl describe node kubeadm-mater01 | grep Taints
Taints:             node-role.kubernetes.io/master:NoSchedule

因为这里搭建的是测试环境,去掉这个污点使kubeadm-mater01节点参与工作负载

[root@kubeadm-mater01 k8s]# kubectl taint nodes kubeadm-mater01 node-role.kubernetes.io/master-
node/kubeadm-mater01 untainted

[root@kubeadm-mater01 k8s]# kubectl describe node kubeadm-mater01 | grep Taint
Taints:             <none>

k8s 命令补全

yum install -y bash-completion

source /usr/share/bash-completion/bash_completion
echo "source <(kubectl completion bash)"  >> /etc/profile

kubectl get pod -n kube-<tab>
kube-public  kube-system  

测试DNS

[root@kubeadm-mater01 k8s]# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   25m


[root@kubeadm-mater01 k8s]# kubectl exec -it -n kube-system kube-scheduler-kubeadm-mater01 ping kubernetes
PING kubernetes (211.98.71.195) 56(84) bytes of data.

#启动测试
[root@kubeadm-mater01 tmp]# kubectl run curl --image=radial/busyboxplus:curl -it

kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
If you don't see a command prompt, try pressing enter.

[ root@curl-66959f6557-fvrxv:/ ]$ nslookup kubernetes.default
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes.default
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local

node2节点加入集群

# 安装kubeadm
cat  > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum -y install kubeadm
systemctl start kubelet
systemctl enable kubelet

#安装docker
yum -y install  yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache fast
yum -y install docker-ce-18.06.1.ce-3.el7
systemctl start docker.service
systemctl enable docker.service


# 然后用join加入集群
[root@kubeadm-node01 ~]# kubeadm join 192.168.1.200:6443 --token peonuo.e4r06pm721rqjt6j --discovery-token-ca-cert-hash sha256:22800e2c3aaf2596b4544afd0ad4013048ddf6623af2537c4215fc31ff7f4c0d
[preflight] Running pre-flight checks
[discovery] Trying to connect to API Server "192.168.1.200:6443"
[discovery] Created cluster-info discovery client, requesting info from "https://192.168.1.200:6443"
[discovery] Requesting info from "https://192.168.1.200:6443" again to validate TLS against the pinned public key
[discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "192.168.1.200:6443"
[discovery] Successfully established connection with API Server "192.168.1.200:6443"
[join] Reading configuration from the cluster...
[join] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet] Downloading configuration for the kubelet from the "kubelet-config-1.13" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[tlsbootstrap] Waiting for the kubelet to perform the TLS Bootstrap...
[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "kubeadm-node01" as an annotation

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the master to see this node join the cluster.


[root@kubeadm-mater01 ~]# kubectl get nodes
NAME              STATUS   ROLES    AGE     VERSION
kubeadm-mater01   Ready    master   55m     v1.13.1
kubeadm-node01    Ready    <none>   3m22s   v1.13.1

token过期处理办法

[root@kubeadm-node01 ~]# kubeadm join 192.168.1.200:6443 --token c39ox1.v2c9gxumnw6eelv1 --discovery-token-ca-cert-hash sha256:066e3219e9286bc233fcc78dcf46152ee0c4c6faac4da6d74d4f6877be0c7773
[preflight] Running pre-flight checks
[discovery] Trying to connect to API Server "192.168.1.200:6443"
[discovery] Created cluster-info discovery client, requesting info from "https://192.168.1.200:6443"
[discovery] Requesting info from "https://192.168.1.200:6443" again to validate TLS against the pinned public key
[discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "192.168.1.200:6443"
[discovery] Successfully established connection with API Server "192.168.1.200:6443"
[join] Reading configuration from the cluster...
[join] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
unable to fetch the kubeadm-config ConfigMap: failed to get config map: Unauthorized

# 从以上信息可以看到加入到集群失败,原因是未授权,
需要在master上重新生成token在加入

# 在master上查看token,可以看到这个token已经过期,token的有效期是24小时
[root@kubeadm-mater01 k8s]# kubeadm token list
TOKEN                     TTL         EXPIRES                     USAGES                   DESCRIPTION                                                EXTRA GROUPS
c39ox1.v2c9gxumnw6eelv1   <invalid>#无效的   2019-01-23T05:07:30-05:00   authentication,signing   The default bootstrap token generated by 'kubeadm init'.   system:bootstrappers:kubeadm:default-node-token

# 重新生成token,
[root@kubeadm-mater01 k8s]# kubeadm token create
oefzfi.gpvudofdekjkqh5z

[root@kubeadm-mater01 k8s]# kubeadm token list  
TOKEN                     TTL         EXPIRES                     USAGES                   DESCRIPTION                                                EXTRA GROUPS
c39ox1.v2c9gxumnw6eelv1   <invalid>   2019-01-23T05:07:30-05:00   authentication,signing   The default bootstrap token generated by 'kubeadm init'.   system:bootstrappers:kubeadm:default-node-token
oefzfi.gpvudofdekjkqh5z   23h         2019-01-25T09:29:45+08:00   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token

# 使用新生成的token加入集群
[root@kubeadm-node01 ~]# kubeadm join 192.168.1.200:6443 --token oefzfi.gpvudofdekjkqh5z  --discovery-token-ca-cert-hash sha256:066e3219e9286bc233fcc78dcf46152ee0c4c6faac4da6d74d4f6877be0c7773
[preflight] Running pre-flight checks
[discovery] Trying to connect to API Server "192.168.1.200:6443"
[discovery] Created cluster-info discovery client, requesting info from "https://192.168.1.200:6443"
[discovery] Requesting info from "https://192.168.1.200:6443" again to validate TLS against the pinned public key
[discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "192.168.1.200:6443"
[discovery] Successfully established connection with API Server "192.168.1.200:6443"
[join] Reading configuration from the cluster...
[join] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet] Downloading configuration for the kubelet from the "kubelet-config-1.13" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[tlsbootstrap] Waiting for the kubelet to perform the TLS Bootstrap...
[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "kubeadm-node01" as an annotation

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the master to see this node join the cluster.

# 查看node
[root@kubeadm-mater01 k8s]# kubectl get nodes
NAME           STATUS   ROLES    AGE     VERSION
kubeadm-mater01   Ready    master   39h     v1.13.2
kubeadm-node01     Ready    <none>   4m56s   v1.13.2

从集群中删除节点

[root@kubeadm-mater01 ~]# kubectl drain kubeadm-node01 --delete-local-data --force --ignore-daemonsets     
node/kubeadm-node01 cordoned
WARNING: Ignoring DaemonSet-managed pods: kube-flannel-ds-amd64-nj5p8, kube-proxy-xjzpb
node/kubeadm-node01 drained

[root@kubeadm-mater01 ~]# kubectl delete node kubeadm-node01 
node "kubeadm-node01" deleted

# 在node节点上执行
kubeadm reset
ifconfig cni0 down
ip link delete cni0
ifconfig flannel.1 down
ip link delete flannel.1
rm -rf /var/lib/cni/

[root@kubeadm-mater01 ~]# kubectl get nodes 
NAME              STATUS   ROLES    AGE   VERSION
kubeadm-mater01   Ready    master   57m   v1.13.1

kube-proxy使用ipvs转发

cat >> /etc/sysctl.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

sysctl -p

# 开启ipvs支持
yum -y install ipvsadm  ipset

# 临时生效
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4

# 永久生效
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

# 查看是否已经加载
[root@kubeadm-mater01 ~]# lsmod | grep -e ip_vs
ip_vs_sh               12688  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  0 
ip_vs                 141092  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          133387  9 ip_vs,nf_nat,nf_nat_ipv4,nf_nat_ipv6,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4,nf_conntrack_ipv6
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack

# 修改ConfigMap的kube-system/kube-proxy中的config.conf,mode: "ipvs":
[root@kubeadm-mater01 ~]# kubectl edit cm kube-proxy -n kube-system
    mode: "ipvs"
    
# 重启所有工作节点的kube-proxy pod
[root@kubeadm-mater01 ~]# kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}'
pod "kube-proxy-7ks2d" deleted
pod "kube-proxy-cvwtn" deleted


[root@kubeadm-mater01 ~]# kubectl get pod -n kube-system | grep kube-proxy
kube-proxy-5sptd                          1/1     Running   0          7s
kube-proxy-lxth6                          1/1     Running   0          4s

# 查看日志是否使用ipvs
[root@kubeadm-mater01 ~]# kubectl logs -n kube-system kube-proxy-5sptd 
I1227 17:39:29.762030       1 server_others.go:189] Using ipvs Proxier.
W1227 17:39:29.762266       1 proxier.go:365] IPVS scheduler not specified, use rr by default
I1227 17:39:29.762374       1 server_others.go:216] Tearing down inactive rules.
I1227 17:39:29.818052       1 server.go:464] Version: v1.13.1
I1227 17:39:29.823742       1 conntrack.go:52] Setting nf_conntrack_max to 131072
I1227 17:39:29.824181       1 config.go:202] Starting service config controller
I1227 17:39:29.824194       1 controller_utils.go:1027] Waiting for caches to sync for service config controller
I1227 17:39:29.824251       1 config.go:102] Starting endpoints config controller
I1227 17:39:29.824255       1 controller_utils.go:1027] Waiting for caches to sync for endpoints config controller
I1227 17:39:29.924888       1 controller_utils.go:1034] Caches are synced for endpoints config controller
I1227 17:39:29.924973       1 controller_utils.go:1034] Caches are synced for service config controller

# 查看ipvs规则
[root@kubeadm-mater01 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.96.0.1:443 rr
  -> 192.168.1.200:6443           Masq    1      0          0         
TCP  10.96.0.10:53 rr
  -> 10.244.0.9:53                Masq    1      0          0         
  -> 10.244.0.10:53               Masq    1      0          0         
UDP  10.96.0.10:53 rr
  -> 10.244.0.9:53                Masq    1      0          2         
  -> 10.244.0.10:53               Masq    1      0          2  

推荐阅读更多精彩内容