CentOS7.5使用kubeadm安装kubernetes 1.11.x版本多主高可用

实验架构图
lab1: etcd master haproxy keepalived 192.168.63.131
lab2: etcd master haproxy keepalived 192.168.63.148
lab3: etcd master haproxy keepalived 192.168.63.149
lab4: node 192.168.63.151
-----------------------------------------------------------------------
VIP(loadblancer ip): 192.168.63.101

1. 安装Docker(所有节点)

v1.11.0版本推荐使用docker v17.03
但是我这次安装使用的Docker version 18.06.0-ce, build 0ffa825
在这里我给出两种版本的安装方式,自行选择。

  • 安装v17.03版本Docker(可以忽略这步,用最新版本)
# 卸载安装指定版本docker-ce
$ yum remove -y docker-ce docker-ce-selinux container-selinux
$ yum install -y --setopt=obsoletes=0 \
docker-ce-17.03.1.ce-1.el7.centos \
docker-ce-selinux-17.03.1.ce-1.el7.centos
  • 升级最新版本Docker
# 安装必要工具集
$ sudo yum install -y yum-utils
# 安装Docker官方源
$ sudo yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
# 更新yum缓存
$ sudo yum makecache fast
# 安装Dcoker
$ sudo yum -y install docker-ce
# 启动Docker
$ sudo systemctl start docker
# 安装完后设置为系统开机自动启动服务
$ sudo systemctl enable docker.service
# 测试一下
$ docker info

启动docker

$ systemctl enable docker && systemctl restart docker

2. 安装 kubeadm, kubelet 和 kubectl(所有节点)

使用阿里镜像安装

# 配置源
$ cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 安装
$ yum install -y kubelet kubeadm kubectl ipvsadm

3. 配置系统相关参数

# 临时禁用selinux
# 永久关闭 修改/etc/sysconfig/selinux文件设置
$ sed -i 's/SELINUX=permissive/SELINUX=disabled/' /etc/sysconfig/selinux
$ setenforce 0

# 临时关闭swap
# 永久关闭 注释/etc/fstab文件里swap相关的行
$ swapoff -a
$ sed -i 's/.*swap.*/#&/' /etc/fstab

# 开启forward
# Docker从1.13版本开始调整了默认的防火墙规则
# 禁用了iptables filter表中FOWARD链
# 这样会引起Kubernetes集群中跨Node的Pod无法通信
$ iptables -P FORWARD ACCEPT

# 配置转发相关参数,否则可能会出错
$ cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
EOF
# 使配置生效
$ sysctl --system

# 如果net.bridge.bridge-nf-call-iptables报错,加载br_netfilter模块
$ modprobe br_netfilter
$ sysctl -p /etc/sysctl.d/k8s.conf

# 加载ipvs相关内核模块
# 如果重新开机,需要重新加载(可以写在 /etc/rc.local 中开机自动加载)
$ modprobe ip_vs
$ modprobe ip_vs_rr
$ modprobe ip_vs_wrr
$ modprobe ip_vs_sh
$ modprobe nf_conntrack_ipv4
# 查看是否加载成功
$ lsmod | grep ip_vs

4. 配置hosts解析(所有节点)

$ cat >>/etc/hosts<<EOF
192.168.63.131 lab1
192.168.63.148 lab2
192.168.63.149 lab3
192.168.63.151 lab4
EOF

5. 配置haproxy代理和keepalived(如下操作在节点lab1,lab2,lab3操作)

$ docker pull haproxy:1.8.13-alpine
$ mkdir /etc/haprox
# 主意其中的IP
$ cat >/etc/haproxy/haproxy.cfg<<EOF
global
  log 127.0.0.1 local0 err
  maxconn 50000
  uid 99
  gid 99
  #daemon
  nbproc 1
  pidfile haproxy.pid

defaults
  mode http
  log 127.0.0.1 local0 err
  maxconn 50000
  retries 3
  timeout connect 5s
  timeout client 30s
  timeout server 30s
  timeout check 2s

listen admin_stats
  mode http
  bind 0.0.0.0:1080
  log 127.0.0.1 local0 err
  stats refresh 30s
  stats uri     /haproxy-status
  stats realm   Haproxy\ Statistics
  stats auth    will:will
  stats hide-version
  stats admin if TRUE

frontend k8s-https
  bind 0.0.0.0:8443
  mode tcp
  #maxconn 50000
  default_backend k8s-https

backend k8s-https
  mode tcp
  balance roundrobin
  server lab1 192.168.63.131:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3
  server lab2 192.168.63.148:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3
  server lab3 192.168.63.149:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3
EOF
# 启动haproxy
$ docker run -d --name my-haproxy \
-v /etc/haproxy:/usr/local/etc/haproxy:ro \
-p 8443:8443 \
-p 1080:1080 \
--restart always \
haproxy:1.8.13-alpine

# 查看日志
$ docker logs my-haproxy

# 浏览器查看状态(用户名/密码:will/will)
http://192.168.63.131:1080/haproxy-status
http://192.168.63.148:1080/haproxy-status
# 拉取keepalived镜像
$ docker pull osixia/keepalived:1.4.5

# 启动
# 载入内核相关模块
$ lsmod | grep ip_vs
$ modprobe ip_vs

# ⚠️ 注意IP、网卡,我这里是ens33
# 启动keepalived
# ens33为本次实验192.168.63.0/24网段的所在网卡
$ docker run --net=host --cap-add=NET_ADMIN \
-e KEEPALIVED_INTERFACE=ens33 \
-e KEEPALIVED_VIRTUAL_IPS="#PYTHON2BASH:['192.168.63.101']" \
-e KEEPALIVED_UNICAST_PEERS="#PYTHON2BASH:['192.168.63.131','192.168.63.148','192.168.63.149']" \
-e KEEPALIVED_PASSWORD=hello \
--name k8s-keepalived \
--restart always \
-d osixia/keepalived:1.4.5

# 查看日志
# 会看到两个成为backup 一个成为master
$ docker logs k8s-keepalived

# 此时会配置 192.168.63.101 到其中一台机器
# ping测试
$ ping -c4 192.168.63.101

# 如果失败后清理后,重新实验
# 这里有个问题,如果服务器非正常关机,那么再启动完服务器后,keepalived容器会启动失败,无限重启,需要删除容器重新run一个容器
$ docker rm -f k8s-keepalived
#ip a del 192.168.63.101/32 dev ens33

6. 配置启动kubelet(所有节点)

# 配置kubelet使用国内pause镜像
# 配置kubelet的cgroups
# 获取docker的cgroups
$ DOCKER_CGROUPS=$(docker info | grep 'Cgroup' | cut -d' ' -f3)
$ echo $DOCKER_CGROUPS

$ cat >/etc/sysconfig/kubelet<<EOF
KUBELET_CGROUP_ARGS="--cgroup-driver=$DOCKER_CGROUPS"
KUBELET_EXTRA_ARGS="--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1"
EOF

# 启动
$ systemctl daemon-reload
$ systemctl enable kubelet && systemctl restart kubelet

7. 配置第一个master节点(如下操作在lab1节点操作)

# 1.11 版本 centos 下使用 ipvs 模式会出问题(经测试该问题应该修复了)
# 参考 https://github.com/kubernetes/kubernetes/issues/65461

# 生成配置文件
$ CP0_IP="192.168.63.131"
$ CP0_HOSTNAME="lab1"
# ⚠️ kubernetesVersion,如果你搭建的不是1.11.2版本,那么一定要在这里指定你的版本号
# ⚠️ imageRepository,如果你有自己的镜像源地址,在这里修改。
$ cat >kubeadm-master.config<<EOF
apiVersion: kubeadm.k8s.io/v1alpha2
kind: MasterConfiguration
kubernetesVersion: v1.11.2
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers

apiServerCertSANs:
- "lab1"
- "lab2"
- "lab3"
- "192.168.63.131"
- "192.168.63.148"
- "192.168.63.149"
- "192.168.63.101"
- "127.0.0.1"

api:
  advertiseAddress: $CP0_IP
  controlPlaneEndpoint: 192.168.63.101:8443

etcd:
  local:
    extraArgs:
      listen-client-urls: "https://127.0.0.1:2379,https://$CP0_IP:2379"
      advertise-client-urls: "https://$CP0_IP:2379"
      listen-peer-urls: "https://$CP0_IP:2380"
      initial-advertise-peer-urls: "https://$CP0_IP:2380"
      initial-cluster: "$CP0_HOSTNAME=https://$CP0_IP:2380"
    serverCertSANs:
      - $CP0_HOSTNAME
      - $CP0_IP
    peerCertSANs:
      - $CP0_HOSTNAME
      - $CP0_IP

controllerManagerExtraArgs:
  node-monitor-grace-period: 10s
  pod-eviction-timeout: 10s

networking:
  podSubnet: 10.244.0.0/16
  
kubeProxy:
  config:
    # 这里可以选择用ipvs还是ipatables
    mode: ipvs
    # mode: iptables
EOF
# 提前拉取镜像
# 如果执行失败 可以多次执行
$ kubeadm config images pull --config kubeadm-master.config

# 初始化
# ⚠️ 保存返回的 join 命令
# 如果这里没有正确返回 join 命令,请仔细检查以上步骤
$ kubeadm init --config kubeadm-master.config

如果初始化过程出现问题,使用如下命令重置:
kubeadm reset
rm -rf /var/lib/cni/ $HOME/.kube/config

# 打包ca相关文件上传至其他master节点
$ cd /etc/kubernetes && tar cvzf k8s-key.tgz admin.conf pki/ca.* pki/sa.* pki/front-proxy-ca.* pki/etcd/ca.*
$ scp k8s-key.tgz lab2:~/
$ scp k8s-key.tgz lab3:~/
$ ssh lab2 'tar xf k8s-key.tgz -C /etc/kubernetes/'
$ ssh lab3 'tar xf k8s-key.tgz -C /etc/kubernetes/'

8. 配置第二个master节点(如下操作在lab2节点操作)

# 1.11 版本 centos 下使用 ipvs 模式会出问题(经测试该问题应该修复了)
# 参考 https://github.com/kubernetes/kubernetes/issues/65461

# 生成配置文件
$ CP0_IP="192.168.63.131"
$ CP0_HOSTNAME="lab1"
$ CP1_IP="192.168.63.148"
$ CP1_HOSTNAME="lab2"
# ⚠️ kubernetesVersion,如果你搭建的不是1.11.2版本,那么一定要在这里指定你的版本号
# ⚠️ imageRepository,如果你有自己的镜像源地址,在这里修改。
$ cat >kubeadm-master.config<<EOF
apiVersion: kubeadm.k8s.io/v1alpha2
kind: MasterConfiguration
kubernetesVersion: v1.11.2
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers

apiServerCertSANs:
- "lab1"
- "lab2"
- "lab3"
- "192.168.63.131"
- "192.168.63.148"
- "192.168.63.149"
- "192.168.63.101"
- "127.0.0.1"

api:
  advertiseAddress: $CP1_IP
  controlPlaneEndpoint: 192.168.63.101:8443

etcd:
  local:
    extraArgs:
      listen-client-urls: "https://127.0.0.1:2379,https://$CP1_IP:2379"
      advertise-client-urls: "https://$CP1_IP:2379"
      listen-peer-urls: "https://$CP1_IP:2380"
      initial-advertise-peer-urls: "https://$CP1_IP:2380"
      initial-cluster: "$CP0_HOSTNAME=https://$CP0_IP:2380,$CP1_HOSTNAME=https://$CP1_IP:2380"
      initial-cluster-state: existing
    serverCertSANs:
      - $CP1_HOSTNAME
      - $CP1_IP
    peerCertSANs:
      - $CP1_HOSTNAME
      - $CP1_IP

controllerManagerExtraArgs:
  node-monitor-grace-period: 10s
  pod-eviction-timeout: 10s

networking:
  podSubnet: 10.244.0.0/16
  
kubeProxy:
  config:
    mode: ipvs
    # mode: iptables
EOF
# 配置kubelet
$ kubeadm alpha phase certs all --config kubeadm-master.config
$ kubeadm alpha phase kubelet config write-to-disk --config kubeadm-master.config
$ kubeadm alpha phase kubelet write-env-file --config kubeadm-master.config
$ kubeadm alpha phase kubeconfig kubelet --config kubeadm-master.config
$ systemctl restart kubelet

# 添加etcd到集群中
$ CP0_IP="192.168.63.131"
$ CP0_HOSTNAME="lab1"
$ CP1_IP="192.168.63.148"
$ CP1_HOSTNAME="lab2"
$ KUBECONFIG=/etc/kubernetes/admin.conf kubectl exec -n kube-system etcd-${CP0_HOSTNAME} -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://${CP0_IP}:2379 member add ${CP1_HOSTNAME} https://${CP1_IP}:2380
$ kubeadm alpha phase etcd local --config kubeadm-master.config

# 提前拉取镜像
# 如果执行失败 可以多次执行
$ kubeadm config images pull --config kubeadm-master.config

# 部署
$ kubeadm alpha phase kubeconfig all --config kubeadm-master.config
$ kubeadm alpha phase controlplane all --config kubeadm-master.config
$ kubeadm alpha phase mark-master --config kubeadm-master.config

9. 配置第三个master节点(如下操作在lab3节点操作)

# 1.11 版本 centos 下使用 ipvs 模式会出问题(经测试该问题应该修复了)
# 参考 https://github.com/kubernetes/kubernetes/issues/65461

# 生成配置文件
$ CP0_IP="192.168.63.131"
$ CP0_HOSTNAME="lab1"
$ CP1_IP="192.168.63.148"
$ CP1_HOSTNAME="lab2"
$ CP2_IP="192.168.63.149"
$ CP2_HOSTNAME="lab3"
# ⚠️ kubernetesVersion,如果你搭建的不是1.11.2版本,那么一定要在这里指定你的版本号
# ⚠️ imageRepository,如果你有自己的镜像源地址,在这里修改。
$ cat >kubeadm-master.config<<EOF
apiVersion: kubeadm.k8s.io/v1alpha2
kind: MasterConfiguration
kubernetesVersion: v1.11.2
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers

apiServerCertSANs:
- "lab1"
- "lab2"
- "lab3"
- "192.168.63.131"
- "192.168.63.148"
- "192.168.63.149"
- "192.168.63.101"
- "127.0.0.1"

api:
  advertiseAddress: $CP2_IP
  controlPlaneEndpoint: 192.168.63.101:8443

etcd:
  local:
    extraArgs:
      listen-client-urls: "https://127.0.0.1:2379,https://$CP2_IP:2379"
      advertise-client-urls: "https://$CP2_IP:2379"
      listen-peer-urls: "https://$CP2_IP:2380"
      initial-advertise-peer-urls: "https://$CP2_IP:2380"
      initial-cluster: "$CP0_HOSTNAME=https://$CP0_IP:2380,$CP1_HOSTNAME=https://$CP1_IP:2380,$CP2_HOSTNAME=https://$CP2_IP:2380"
      initial-cluster-state: existing
    serverCertSANs:
      - $CP2_HOSTNAME
      - $CP2_IP
    peerCertSANs:
      - $CP2_HOSTNAME
      - $CP2_IP

controllerManagerExtraArgs:
  node-monitor-grace-period: 10s
  pod-eviction-timeout: 10s

networking:
  podSubnet: 10.244.0.0/16
  
kubeProxy:
  config:
    mode: ipvs
    # mode: iptables
EOF
# 配置kubelet
$ kubeadm alpha phase certs all --config kubeadm-master.config
$ kubeadm alpha phase kubelet config write-to-disk --config kubeadm-master.config
$ kubeadm alpha phase kubelet write-env-file --config kubeadm-master.config
$ kubeadm alpha phase kubeconfig kubelet --config kubeadm-master.config
$ systemctl restart kubelet

# 添加etcd到集群中
$ CP0_IP="192.168.63.131"
$ CP0_HOSTNAME="lab1"
$ CP2_IP="192.168.63.149"
$ CP2_HOSTNAME="lab3"
$ KUBECONFIG=/etc/kubernetes/admin.conf kubectl exec -n kube-system etcd-${CP0_HOSTNAME} -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://${CP0_IP}:2379 member add ${CP2_HOSTNAME} https://${CP2_IP}:2380
$ kubeadm alpha phase etcd local --config kubeadm-master.config

# 提前拉取镜像
# 如果执行失败 可以多次执行
$ kubeadm config images pull --config kubeadm-master.config

# 部署
$ kubeadm alpha phase kubeconfig all --config kubeadm-master.config
$ kubeadm alpha phase controlplane all --config kubeadm-master.config
$ kubeadm alpha phase mark-master --config kubeadm-master.config

10. 配置使用kubectl(如下操作在任意master节点操作)

$ rm -rf $HOME/.kube
$ mkdir -p $HOME/.kube
$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
$ sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 查看node节点
$ kubectl get nodes

# 只有网络插件也安装配置完成之后,才能会显示为ready状态
# 设置master允许部署应用pod,参与工作负载,现在可以部署其他系统组件
# 如 dashboard, heapster, efk等
$ kubectl taint nodes --all node-role.kubernetes.io/master-

11. 配置使用网络插件(如下操作在任意master节点操作)

# 下载配置
$ cd ~ && mkdir flannel && cd flannel
$ wget https://raw.githubusercontent.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml
# 修改kube-flannel.yml中配置
# 此处的ip配置要与上面kubeadm的pod-network一致
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }

# 默认的镜像是quay.io/coreos/flannel:v0.10.0-amd64,如果你能pull下来就不用修改镜像地址,否则,修改yml中镜像地址为阿里镜像源
image: registry.cn-shanghai.aliyuncs.com/gcr-k8s/flannel:v0.10.0-amd64

# 如果Node有多个网卡的话,参考flannel issues 39701,
# https://github.com/kubernetes/kubernetes/issues/39701
# 目前需要在kube-flannel.yml中使用--iface参数指定集群主机内网网卡的名称,
# 否则可能会出现dns无法解析。容器无法通信的情况,需要将kube-flannel.yml下载到本地,
# flanneld启动参数加上--iface=<iface-name>
    containers:
      - name: kube-flannel
        image: registry.cn-shanghai.aliyuncs.com/gcr-k8s/flannel:v0.10.0-amd64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        - --iface=eth1

⚠️⚠️⚠️--iface=eth1 的值,是你当前的网卡
# 启动
$ kubectl apply -f kube-flannel.yml

# 查看
$ kubectl get pods --namespace kube-system
$ kubectl get svc --namespace kube-system

# 只有网络插件也安装配置完成之后,才能会显示为ready状态
# 设置master允许部署应用pod,参与工作负载,现在可以部署其他系统组件
# 如 dashboard, heapster, efk等
# kubectl taint nodes --all node-role.kubernetes.io/master-

12. 配置node节点加入集群(如下操作在所有node节点操作)

# 此命令为初始化master成功后返回的结果
$ kubeadm join 192.168.63.101:8443 --token xxx.xxxxxx --discovery-token-ca-cert-hash sha256:xxxxxxx

13. 检查service ipvs配置:

$ ipvsadm -ln

基础测试

测试容器间的通信和DNS

配置好网络之后,kubeadm会自动部署coredns
如下测试可以在配置kubectl的节点上操作

启动

kubectl run nginx --replicas=2 --image=nginx:alpine --port=80
kubectl expose deployment nginx --type=NodePort --name=example-service-nodeport
kubectl expose deployment nginx --name=example-service

查看状态

kubectl get deploy
kubectl get pods
kubectl get svc
kubectl describe svc example-service

DNS解析

kubectl run curl --image=radial/busyboxplus:curl -i --tty
nslookup kubernetes
nslookup example-service
curl example-service

访问测试

# 10.96.59.56 为查看svc时获取到的clusterip
curl "10.96.59.56:80"

# 32223 为查看svc时获取到的 nodeport
http://192.168.63.148:32223/
http://192.168.63.149:32223/

清理删除

kubectl delete svc example-service example-service-nodeport
kubectl delete deploy nginx curl

高可用测试

关闭任一master节点测试集群是能否正常执行上一步的基础测试,查看相关信息,不能同时关闭两个节点,因为3个节点组成的etcd集群,最多只能有一个当机。

# 查看组件状态
kubectl get pod --all-namespaces -o wide
kubectl get pod --all-namespaces -o wide | grep lab1
kubectl get pod --all-namespaces -o wide | grep lab2
kubectl get pod --all-namespaces -o wide | grep lab3
kubectl get nodes -o wide
kubectl get deploy
kubectl get pods
kubectl get svc

# 访问测试
CURL_POD=$(kubectl get pods | grep curl | grep Running | cut -d ' ' -f1)
kubectl exec -ti $CURL_POD -- sh --tty
nslookup kubernetes
nslookup example-service
curl example-service

小技巧

1、忘记初始master节点时的node节点加入集群命令怎么办

简单方法

kubeadm token create --print-join-command

第二种方法

token=$(kubeadm token generate)
kubeadm token create $token --print-join-command --ttl=0

2、负载组件切换
如果kube-proxy用iptables想切换到到ipvs可以直接修改kube-proxy configmap即可

kubectl edit configmap kube-proxy -n kube-system
    ipvs:
      minSyncPeriod: 0s
      scheduler: ""
      syncPeriod: 30s
    kind: KubeProxyConfiguration
    metricsBindAddress: 127.0.0.1:10249
    mode: "ipvs"       # 加上这个或iptables
    nodePortAddresses: null

要重启kube-proxy的容器

⚠️ 以上步骤都没有问题后再往下看


部署Kubernetes Dashboard

1、下载官方提供的 Dashboard 组件部署的 yaml 文件

wget https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml

2、修改 yaml 文件中的镜像

k8s.gcr.io 修改为 registry.cn-hangzhou.aliyuncs.com/google_containers,后续所有 yaml 文件中,只要涉及到 image 的,都需要做同样的修改,因为国内 k8s.gcr.io 这个地址被墙了。

3、修改 yaml 文件中的 Dashboard Service,暴露服务使外部能够访问

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard

修改为

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 31111
  selector:
    k8s-app: kubernetes-dashboard
  type: NodePort

4、启动 Dashboard

kubectl apply -f kubernetes-dashboard.yaml

5、访问 Dashboard

地址: https://<Your-IP>:31111/
注意:必须是 https

6、创建能够访问 Dashboard 的用户

新建文件 account.yaml ,内容如下:

# Create Service Account
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
# Create ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system

7、获取登录 Dashboard 的令牌 (Token)

kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')

输出如下

Name:         admin-user-token-f6tct
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name=admin-user
              kubernetes.io/service-account.uid=81cb9047-7087-11e8-95da-00163e0c5bd1

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1025 bytes
namespace:  11 bytes
token:  <超长字符串>

参考文档