k8s证书配置,dns,dashboard

一.签发证书

TLS双向认知需要预先自建CA签发证书,权威CA机构的证书应该不可用,因为大部分k8s都是在内网中部署,而内网应该都会采用私有IP地址通讯,权威CA好像只能签署域名证书,对签署到IP可能无法实现.

master:192.168.23.128
node1:192.168.23.129
node2:192.168.23.131
node3:192.168.23.130

1.自签CA

对于私有证书签发首先要自签署一个CA根证书
创建证书存放的目录,
创建CA私钥
自签CA

[root@master ~]# mkdir /etc/kubernetes/ssl && cd /etc/kubernetes/ssl
openssl genrsa -out ca-key.pem 2048
[root@master ssl]# openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca"
[root@master ssl]# ls
ca-key.pem  ca.pem

2.签署apiserver 证书

自签 CA 后就需要使用这个根 CA 签署 apiserver 相关的证书了,首先先修改 openssl 的配置。

# vim openssl.cnf
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.cluster.local
IP.1 = 10.254.0.1   #k8s 集群service ip
IP.2 = 192.168.23.128  #k8s master ip

然后开始签署apiserver相关的证书

# 生成 apiserver 私钥
openssl genrsa -out apiserver-key.pem 2048
# 生成签署请求
openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config openssl.cnf
# 使用自建 CA 签署
openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 365 -extensions v3_req -extfile openssl.cnf

3.生成集群管理证书

openssl genrsa -out admin-key.pem 2048
openssl req -new -key admin-key.pem -out admin.csr -subj "/CN=kube-admin"
openssl x509 -req -in admin.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin.pem -days 365

4.签署node证书

先修改一下 openssl 配置

[root@master ssl]#  cp openssl.cnf worker-openssl.cnf
[root@master ssl]# cat worker-openssl.cnf 
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
IP.1 = 192.168.23.129
IP.2 = 192.168.23.131
IP.3 = 192.168.23.130

生成各个结点的证书并且拷贝到每个节点的目录下

[root@master ssl]# for i in {node1,node2,node3}
> do
> openssl  genrsa -out $i-worker-key.pem 2048
>  openssl req -new -key $i-worker-key.pem -out $i-worker.csr -subj "/CN=$i" -config worker-openssl.cnf
> openssl x509 -req -in $i-worker.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out $i-worker.pem -days 365 -extensions v3_req -extfile worker-openssl.cnf
> ssh root@$i "mkdir /etc/kubernetes/ssl;chown kube:kube -R /etc/kubernetes/ssl"
> scp /etc/kubernetes/ssl/ca.pem /etc/kubernetes/ssl/$i* root@$i:/etc/kubernetes/ssl
> done

二.配置k8s

1.配置master

apiserver文件

KUBE_API_ADDRESS="--bind-address=192.168.23.128 --insecure-bind-address=127.0.0.1 "

# The port on the local server to listen on.
KUBE_API_PORT=="--secure-port=6443 --insecure-port=8080"

# Port minions listen on
# KUBELET_PORT="--kubelet-port=10250"

# Comma separated list of nodes in the etcd cluster
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.23.128:2379"

# Address range to use for services
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"

# default admission control policies
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
#KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota"

# Add your own!
KUBE_API_ARGS="--tls-cert-file=/etc/kubernetes/ssl/apiserver.pem --tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem --client-ca-file=/etc/kubernetes/ssl/ca.pem --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem"

config文件

KUBE_MASTER="--master=https://192.168.23.128:6443"

scheduler文件

KUBE_SCHEDULER_ARGS="--kubeconfig=/etc/kubernetes/cm-kubeconfig.yaml --master=http://127.0.0.1:8080"

controller-manager

KUBE_CONTROLLER_MANAGER_ARGS="--service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem  --root-ca-file=/etc/kubernetes/ssl/ca.pem --master=http://127.0.0.1:8080 --kubeconfig=/etc/kubernetes/cm-kubeconfig.yaml"

创建一个/etc/kubernetes/cm-kubeconfig.yaml 文件

apiVersion: v1
kind: Config
clusters:
- name: local
  cluster:
    certificate-authority: /etc/kubernetes/ssl/ca.pem
users:
- name: controllermanager
  user:
    client-certificate: /etc/kubernetes/ssl/apiserver.pem
    client-key: /etc/kubernetes/ssl/apiserver-key.pem
contexts:
- context:
    cluster: local
    user: controllermanager
  name: kubelet-context
current-context: kubelet-context

重启服务

systemctl  restart  etcd kube-apiserver.service kube-controller-manager.service kube-scheduler.service

2.配置node结点(以node1为例子)

config文件

KUBE_MASTER="--master=https://192.168.23.128:6443"

kubelet文件

KUBELET_ADDRESS="--address=192.168.23.129"

# The port for the info server to serve on
KUBELET_PORT="--port=10250"

# You may leave this blank to use the actual hostname
KUBELET_HOSTNAME="--hostname-override=node1"

# location of the api-server
KUBELET_API_SERVER="--api-servers=https://192.168.23.128:6443"

# pod infrastructure container
#KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=kubernetes/pause:latest"
# Add your own!
KUBELET_ARGS="--cluster_dns=10.254.0.3 --cluster_domain=cluster.local --tls-cert-file=/etc/kubernetes/ssl/node1-worker.pem --tls-private-key-file=/etc/kubernetes/ssl/node1-worker-key.pem --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml --allow-privileged=true"

proxy文件

KUBE_PROXY_ARGS="--proxy-mode=iptables --master=https://192.168.23.128:6443 --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml"

创建一个文件worker-kubeconfig.yaml

apiVersion: v1
kind: Config
clusters:
- name: local
  cluster:
    server: https://192.168.23.128:6443
    certificate-authority: /etc/kubernetes/ssl/ca.pem
users:
- name: kubelet
  user:
    client-certificate: /etc/kubernetes/ssl/node1-worker.pem
    client-key: /etc/kubernetes/ssl/node1-worker-key.pem
contexts:
- context:
    cluster: local
    user: kubelet
  name: kubelet-context
current-context: kubelet-context

node1

#重启服务
systemctl  restart kubelet kube-proxy
#查看 状态
systemctl  status  kubelet kube-proxy  -l
#验证证书
curl https://192.168.23.128:6443/api/v1/nodes --cert /etc/kubernetes/ssl/node1-worker.pem --key /etc/kubernetes/ssl/node1-worker-key.pem --cacert /ec/kubernetes/ssl/ca.pem

curl这行要手打出来。不然会显示ca有问题
PS 如果显示certificate signed by unknown authority检查kubelet文件里面的配置。

三.配置dns

skydns-rc.yaml

apiVersion: v1
kind: ReplicationController
metadata:
  name: kube-dns-v9
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    version: v9
    kubernetes.io/cluster-service: "true"
spec:
  replicas: 1
  selector:
    k8s-app: kube-dns
    version: v9
  template:
    metadata:
      labels:
        k8s-app: kube-dns
        version: v9
        kubernetes.io/cluster-service: "true"
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
        scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
    spec:
      containers:
      - name: etcd
        image: test-registry:5000/etcd
        resources:
          limits:
            cpu: 100m
            memory: 50Mi
        command:
        - /usr/local/bin/etcd
        - -data-dir
        - /var/etcd/data
        - -listen-client-urls
        - http://127.0.0.1:2379,http://127.0.0.1:4001
        - -advertise-client-urls
        - http://127.0.0.1:2379,http://127.0.0.1:4001
        - -initial-cluster-token
        - skydns-etcd
        volumeMounts:
        - name: etcd-storage
          mountPath: /var/etcd/data
      - name: kube2sky
        image: test-registry:5000/kube2sky
        resources:
          limits:
            cpu: 100m
            memory: 50Mi
        args:
        - -domain=cluster.local                        #设置k8s集群中Service所属的域名
        - -kube_master_url=https://192.168.23.128:6443   #k8s中master的ip地址和apiserver中配置的端口号
        - -kubecfg_file=/etc/kubernetes/worker-kubeconfig.yaml                      
        volumeMounts:
        - mountPath: /etc/kubernetes/ssl
          name: ssl-certs-kubernetes
        - mountPath: /etc/ssl/certs
          name: ssl-certs-host
        - mountPath: /etc/kubernetes/worker-kubeconfig.yaml
          name: config

      - name: skydns
        image: test-registry:5000/skydns
        resources:
          limits:
            cpu: 100m
            memory: 50Mi
        args:
        - -machines=http://localhost:4001
        - -addr=0.0.0.0:53
        - -domain=cluster.local
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
      volumes:
      - name: etcd-storage
        emptyDir: {}
      - hostPath:
          path: /etc/kubernetes/ssl
        name: ssl-certs-kubernetes
      - hostPath:
          path: /etc/pki/tls/certs
        name: ssl-certs-host
      - hostPath:
          path: /etc/kubernetes/worker-kubeconfig.yaml
        name: config


skydns-svc.yaml

apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "KubeDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.254.0.3           #/etc/kubernetes/kubelet中已经设定好clusterIP
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP

启动rc和svckubectl create -f skydns-svc.yaml,skydns-rc.yaml

排错

kubectl  describe pod name --namespace=kube-system
kubectl  logs podname -c containersname --namespace=kube-system
docker run  test-registry:5000/kube2sky --help 

检测

启动一个带有nslookup命令的容器解析同一命名空间内的service。
进入容器查看etcd里面获取到service的信息

[root@master build]# kubectl  exec -it kube-dns-v9-b8a4z --namespace=kube-system -c etcd etcdctl ls /skydns/local/cluster
/skydns/local/cluster/default
/skydns/local/cluster/svc
/skydns/local/cluster/kube-system

四.配置dashboard

kind: Deployment
apiVersion: extensions/v1beta1
metadata:
  labels:
    app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  replicas: 1
  selector:
    matchLabels:
      app: kubernetes-dashboard
  template:
    metadata:
      labels:
        app: kubernetes-dashboard
    spec:
      containers:
      - name: kubernetes-dashboard
        image: index.tenxcloud.com/google_containers/kubernetes-dashboard-amd64:v1.4.1
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 9090
          protocol: TCP
        args:
          # Uncomment the following line to manually specify Kubernetes API server Host
          # If not specified, Dashboard will attempt to auto discover the API server and connect
          # to it. Uncomment only if the default does not work.
          - --apiserver-host=https://192.168.23.128:6443
          - --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml
        livenessProbe:
          httpGet:
            path: /
            port: 9090
          initialDelaySeconds: 30
          timeoutSeconds: 30
        volumeMounts:
        - mountPath: /etc/kubernetes/ssl
          name: ssl-certs-kubernetes
        - mountPath: /etc/ssl/certs
          name: ssl-certs-host
        - mountPath: /etc/kubernetes/worker-kubeconfig.yaml
          name: config
      volumes:
      - hostPath:
          path: /etc/kubernetes/ssl
        name: ssl-certs-kubernetes
      - hostPath:
          path: /etc/pki/tls/certs
        name: ssl-certs-host
      - hostPath:
          path: /etc/kubernetes/worker-kubeconfig.yaml
        name: config
kind: Service
apiVersion: v1
metadata:
  labels:
    app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  type: NodePort
  ports:
  - port: 80
    nodePort: 30010
    targetPort: 9090
  selector:
    app: kubernetes-dashboard

访问方法 http://nodeip:30010

推荐阅读更多精彩内容