k8s进阶(三)

7. 基于nfs部署Redis服务实现持久化

7.1 构建Redis镜像

配置文件和启动脚本

root@k8s-ansible-client:~/yaml/20211016/dockerfile/redis# tree 
.
├── build-command.sh
├── Dockerfile
├── redis-4.0.14.tar.gz
├── redis.conf
└── run_redis.sh

root@k8s-ansible-client:~/yaml/20211016/dockerfile/redis# grep -v "^#" redis.conf |grep -v "^$"
bind 0.0.0.0
protected-mode yes
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile ""
databases 16
always-show-logo yes
save 900 1
save 5 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error no
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /data/redis-data
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
requirepass 123456
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
slave-lazy-flush no
appendonly no
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble no
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes

root@k8s-ansible-client:~/yaml/20211016/dockerfile/redis# cat run_redis.sh 
#!/bin/bash

/usr/sbin/redis-server /usr/local/redis/redis.conf

tail -f  /etc/hosts

root@k8s-ansible-client:~/yaml/20211016/dockerfile/redis# cat build-command.sh 
#!/bin/bash
TAG=$1
docker build -t harbor.openscp.com/base/redis:${TAG} .
sleep 3
docker push  harbor.openscp.com/base/redis:${TAG}

root@k8s-ansible-client:~/yaml/20211016/dockerfile/redis# cat Dockerfile 
#Redis Image
FROM harbor.openscp.com/base/centos:centos7.9.2009


ADD redis-4.0.14.tar.gz /usr/local/src
RUN yum install -y vim wget tree  lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop
RUN ln -sv /usr/local/src/redis-4.0.14 /usr/local/redis && cd /usr/local/redis && make && cp src/redis-cli /usr/sbin/ && cp src/redis-server  /usr/sbin/ && mkdir -pv /data/redis-data 
ADD redis.conf /usr/local/redis/redis.conf 
ADD run_redis.sh /usr/local/redis/run_redis.sh

EXPOSE 6379

CMD ["/usr/local/redis/run_redis.sh"]

构建镜像

root@k8s-ansible-client:~/yaml/20211016/dockerfile/redis# bash build-command.sh v1
...
Successfully tagged harbor.openscp.com/base/redis:v1
The push refers to repository [harbor.openscp.com/base/redis]
4b87fefa01f9: Pushed 
7cf99aab4830: Pushed 
498f7e20f28c: Pushed 
2853d7e8868a: Pushed 
4f43a69196ea: Pushed 
174f56854903: Mounted from base/tomcat-app1 
v1: digest: sha256:449ef47457b4161707f77f458e0e416be66f9f7fa7082d5afe71c982539d0936 size: 1581

创建一个pv

root@k8s-ansible-client:~/yaml/20211016/redis# cat redis-pv.yaml 
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-datadir-pv-1
  namespace: pop
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/pop/redis-datadir-1 
    server: 10.10.0.26

root@k8s-ansible-client:~/yaml/20211016/redis# kubectl apply -f redis-pv.yaml 
persistentvolume/redis-datadir-pv-1 created
root@k8s-ansible-client:~/yaml/20211016/redis# kubectl get pv -n pop
NAME                 CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                  STORAGECLASS   REASON   AGE
redis-datadir-pv-1   5Gi        RWO            Retain           Available                                                  39s

创建一个pvc

root@k8s-ansible-client:~/yaml/20211016/redis# cat redis-pvc.yaml 
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: redis-datadir-pvc-1 
  namespace: pop
spec:
  volumeName: redis-datadir-pv-1 
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 5Gi
root@k8s-ansible-client:~/yaml/20211016/redis# kubectl apply -f redis-pvc.yaml 
persistentvolumeclaim/redis-datadir-pvc-1 created                              11s
root@k8s-ansible-client:~/yaml/20211016/redis# kubectl get pvc -n pop
NAME                  STATUS   VOLUME               CAPACITY   ACCESS MODES   STORAGECLASS   AGE
redis-datadir-pvc-1   Bound    redis-datadir-pv-1   5Gi        RWO                           13s

7.2 部署Redis

Redis的Deployment文件

root@k8s-ansible-client:~/yaml/20211016/redis# cat redis.yaml 
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: devops-redis 
  name: deploy-devops-redis
  namespace: pop
spec:
  replicas: 1 
  selector:
    matchLabels:
      app: devops-redis
  template:
    metadata:
      labels:
        app: devops-redis
    spec:
      containers:
        - name: redis-container
          image: harbor.openscp.com/base/redis:v1
          imagePullPolicy: Always
          volumeMounts:
          - mountPath: "/data/redis-data/"
            name: redis-datadir
      volumes:
        - name: redis-datadir
          persistentVolumeClaim:
            claimName: redis-datadir-pvc-1 

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: devops-redis
  name: srv-devops-redis
  namespace: pop
spec:
  type: NodePort
  ports:
  - name: http
    port: 6379 
    targetPort: 6379
    nodePort: 36379 
  selector:
    app: devops-redis
  sessionAffinity: ClientIP
  sessionAffinityConfig:
    clientIP:
      timeoutSeconds: 10800

基于Deployment启动一个Redis pod容器

root@k8s-ansible-client:~/yaml/20211016/redis# kubectl apply -f redis.yaml 
deployment.apps/deploy-devops-redis created
service/srv-devops-redis created

root@k8s-ansible-client:~/yaml/20211016/redis# kubectl get pods,deploy -n pop
NAME                                      READY   STATUS    RESTARTS   AGE
pod/deploy-devops-redis-d9fd6594c-fvmmg   1/1     Running   0          44s

NAME                                  READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/deploy-devops-redis   1/1     1            1           44s

验证

root@k8s-ansible-client:~/yaml/20211016/redis# kubectl exec -it pod/deploy-devops-redis-d9fd6594c-fvmmg bash -n pop
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
[root@deploy-devops-redis-d9fd6594c-fvmmg /]# df -Th
Filesystem                           Type     Size  Used Avail Use% Mounted on
overlay                              overlay   20G  9.9G  8.8G  53% /
tmpfs                                tmpfs     64M     0   64M   0% /dev
tmpfs                                tmpfs    2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/mapper/ubuntu--vg-ubuntu--lv    ext4      20G  9.9G  8.8G  53% /etc/hosts
10.10.0.26:/data/pop/redis-datadir-1 nfs4      20G   15G  4.2G  78% /data/redis-data
shm                                  tmpfs     64M     0   64M   0% /dev/shm
tmpfs                                tmpfs    3.2G   12K  3.2G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                                tmpfs    2.0G     0  2.0G   0% /proc/acpi
tmpfs                                tmpfs    2.0G     0  2.0G   0% /proc/scsi
tmpfs                                tmpfs    2.0G     0  2.0G   0% /sys/firmware

8. 基于StatefulSet运行mysql一主多从

官网参考文档

8.1 StatefulSet介绍

有状态服务, 是一种给Pod提供唯一标志的控制器,它可以保证部署和扩展的顺序。

Pod一致性:包含次序(启动、停止次序)、网络一致性。此一致性与Pod相关,与被调度到哪个node节点无关。
稳定的次序:对于N个副本的StatefulSet,每个Pod都在[0,N)的范围内分配一个数字序号,且是唯一的。
稳定的网络:Pod的hostname模式为(statefulset名称)- (序号)。
稳定的存储:通过VolumeClaimTemplate为每个Pod创建一个PV。删除、减少副本,不会删除相关的卷

如果把之前无状态的服务比喻为牛、羊等牲畜,因为,这些到一定时候就可以”送出“。那么,有状态就比喻为:宠物,而宠物不像牲畜一样到达一定时候“送出”,人们往往会照顾宠物的一生。

8.2 StatefulSet实现原理

与 ReplicaSet 和 Deployment 资源一样,StatefulSet 也使用控制器的方式实现,它主要由 StatefulSetController、StatefulSetControl 和 StatefulPodControl 三个组件协作来完成 StatefulSet 的管理,StatefulSetController 会同时从 PodInformer 和 ReplicaSetInformer 中接受增删改事件并将事件推送到队列中。

控制器 StatefulSetController 会在 Run 方法中启动多个 Goroutine 协程,这些协程会从队列中获取待处理的 StatefulSet 资源进行同步,接下来我们会先介绍 Kubernetes 同步 StatefulSet 的过程。

8.3 StatefulSet组成部分

Headless Service:用来定义Pod网络标识( DNS domain)。

volumeClaimTemplates :存储卷申请模板,创建PVC,指定pvc名称大小,将自动创建pvc,且pvc必须由存储类供应。

StatefulSet :定义具体应用,名为Nginx,有三个Pod副本,并为每个Pod定义了一个域名部署statefulset。

为什么需要 headless service 无头服务?
在用Deployment时,每一个Pod名称是没有顺序的,是随机字符串,因此是Pod名称是无序的,但是在statefulset中要求必须是有序 ,每一个pod不能被随意取代,pod重建后pod名称还是一样的。而pod IP是变化的,所以是以Pod名称来识别。pod名称是pod唯一性的标识符,必须持久稳定有效。这时候要用到无头服务,它可以给每个Pod一个唯一的名称 。

为什么需要volumeClaimTemplate?
对于有状态的副本集都会用到持久存储,对于分布式系统来讲,它的最大特点是数据是不一样的,所以各个节点不能使用同一存储卷,每个节点有自已的专用存储,但是如果在Deployment中的Pod template里定义的存储卷,是所有副本集共用一个存储卷,数据是相同的,因为是基于模板来的 ,而statefulset中每个Pod都要自已的专有存储卷,所以statefulset的存储卷就不能再用Pod模板来创建了,于是statefulSet使用volumeClaimTemplate,称为卷申请模板,它会为每个Pod生成不同的pvc,并绑定pv,从而实现各pod有专用存储。这就是为什么要用volumeClaimTemplate的原因。

8.4 创建MYSQL一主多从环境

定义mysql的pv

root@k8s-ansible-client:~/yaml/20211016/mysql# cat mysql-pv.yaml 
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-1
  namespace: pop
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/pop/mysql-datadir-1 
    server: 10.10.0.26
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-2
  namespace: pop
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/pop/mysql-datadir-2
    server: 10.10.0.26
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-3
  namespace: pop
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/pop/mysql-datadir-3
    server: 10.10.0.26
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-4
  namespace: pop
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/pop/mysql-datadir-4
    server: 10.10.0.26
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-5
  namespace: pop
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/pop/mysql-datadir-5
    server: 10.10.0.26

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-6
  namespace: pop
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/pop/mysql-datadir-6
    server: 10.10.0.26


执行yaml文件

root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl apply -f mysql-pv.yaml 
persistentvolume/mysql-datadir-1 created
persistentvolume/mysql-datadir-2 created
persistentvolume/mysql-datadir-3 created
persistentvolume/mysql-datadir-4 created
persistentvolume/mysql-datadir-5 created
persistentvolume/mysql-datadir-6 created

root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl get pv -n pop
NAME                 CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                     STORAGECLASS   REASON   AGE
mysql-datadir-1      10Gi       RWO            Retain           Available                                                     9s
mysql-datadir-2      10Gi       RWO            Retain           Available                                                     9s
mysql-datadir-3      10Gi       RWO            Retain           Available                                                     9s
mysql-datadir-4      10Gi       RWO            Retain           Available                                                     9s
mysql-datadir-5      10Gi       RWO            Retain           Available                                                     9s
mysql-datadir-6      10Gi       RWO            Retain           Available                                                     9s

将MySQL配置定义到configMap

root@k8s-ansible-client:~/yaml/20211016/mysql# cat mysql-configmap.yaml 
apiVersion: v1
kind: ConfigMap
metadata:
  name: mysql
  namespace: pop
  labels:
    app: mysql
data:
  master.cnf: |
    # Apply this config only on the master.
    [mysqld]
    log-bin
    log_bin_trust_function_creators=1
    lower_case_table_names=1
  slave.cnf: |
    # Apply this config only on slaves.
    [mysqld]
    super-read-only
    log_bin_trust_function_creators=1

执行yaml文件

root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl apply -f mysql-configmap.yaml 
configmap/mysql created
root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl get configMap -n pop
NAME               DATA   AGE
kube-root-ca.crt   1      22h
mysql              2      9s

定义MySQL的Service

root@k8s-ansible-client:~/yaml/20211016/mysql# cat mysql-svc.yaml 
apiVersion: v1
kind: Service
metadata:
  namespace: pop
  name: mysql
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  clusterIP: None
  selector:
    app: mysql
---
apiVersion: v1
kind: Service
metadata:
  name: mysql-read
  namespace: pop
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  selector:
    app: mysql

执行yaml文件

root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl apply -f mysql-svc.yaml 
service/mysql created
service/mysql-read created
root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl get svc -n pop
NAME               TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
mysql              ClusterIP   None            <none>        3306/TCP         8s
mysql-read         ClusterIP   10.68.181.29    <none>        3306/TCP         8s
srv-devops-redis   NodePort    10.68.251.172   <none>        6379:30379/TCP   22h

定义MySQL的statefulset文件
镜像:
mysql: docker pull mysql:5.7.30
xtrabackup: docker pull anjia0532/google-samples.xtrabackup:1.0

root@k8s-ansible-client:~/yaml/20211016/mysql# cat mysql-statefulset.yaml 
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: mysql
  namespace: pop
spec:
  selector:
    matchLabels:
      app: mysql
  serviceName: mysql
  replicas: 3
  template:
    metadata:
      labels:
        app: mysql
    spec:
      initContainers:
      - name: init-mysql
        image: harbor.openscp.com/base/mysql:5.7.30 
        command:
        - bash
        - "-c"
        - |
          set -ex
          # Generate mysql server-id from pod ordinal index.
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          ordinal=${BASH_REMATCH[1]}
          echo [mysqld] > /mnt/conf.d/server-id.cnf
          # Add an offset to avoid reserved server-id=0 value.
          echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
          # Copy appropriate conf.d files from config-map to emptyDir.
          if [[ $ordinal -eq 0 ]]; then
            cp /mnt/config-map/master.cnf /mnt/conf.d/
          else
            cp /mnt/config-map/slave.cnf /mnt/conf.d/
          fi
        volumeMounts:
        - name: conf
          mountPath: /mnt/conf.d
        - name: config-map
          mountPath: /mnt/config-map
      - name: clone-mysql
        image: harbor.openscp.com/base/xtrabackup:1 
        command:
        - bash
        - "-c"
        - |
          set -ex
          # Skip the clone if data already exists.
          [[ -d /var/lib/mysql/mysql ]] && exit 0
          # Skip the clone on master (ordinal index 0).
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          ordinal=${BASH_REMATCH[1]}
          [[ $ordinal -eq 0 ]] && exit 0
          # Clone data from previous peer.
          ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
          # Prepare the backup.
          xtrabackup --prepare --target-dir=/var/lib/mysql
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
      containers:
      - name: mysql
        image: harbor.openscp.com/base/mysql:5.7.30 
        env:
        - name: MYSQL_ALLOW_EMPTY_PASSWORD
          value: "1"
        ports:
        - name: mysql
          containerPort: 3306
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        resources:
          requests:
            cpu: 500m
            memory: 1Gi
        livenessProbe:
          exec:
            command: ["mysqladmin", "ping"]
          initialDelaySeconds: 30
          periodSeconds: 10
          timeoutSeconds: 5
        readinessProbe:
          exec:
            # Check we can execute queries over TCP (skip-networking is off).
            command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
          initialDelaySeconds: 5
          periodSeconds: 2
          timeoutSeconds: 1
      - name: xtrabackup
        image: harbor.openscp.com/base/xtrabackup:1
        ports:
        - name: xtrabackup
          containerPort: 3307
        command:
        - bash
        - "-c"
        - |
          set -ex
          cd /var/lib/mysql
          # Determine binlog position of cloned data, if any.
          if [[ -f xtrabackup_slave_info ]]; then
            # XtraBackup already generated a partial "CHANGE MASTER TO" query
            # because we're cloning from an existing slave.
            mv xtrabackup_slave_info change_master_to.sql.in
            # Ignore xtrabackup_binlog_info in this case (it's useless).
            rm -f xtrabackup_binlog_info
          elif [[ -f xtrabackup_binlog_info ]]; then
            # We're cloning directly from master. Parse binlog position.
            [[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
            rm xtrabackup_binlog_info
            echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
                  MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
          fi
          # Check if we need to complete a clone by starting replication.
          if [[ -f change_master_to.sql.in ]]; then
            echo "Waiting for mysqld to be ready (accepting connections)"
            until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done
            echo "Initializing replication from clone position"
            # In case of container restart, attempt this at-most-once.
            mv change_master_to.sql.in change_master_to.sql.orig
            mysql -h 127.0.0.1 <<EOF
          $(<change_master_to.sql.orig),
            MASTER_HOST='mysql-0.mysql',
            MASTER_USER='root',
            MASTER_PASSWORD='',
            MASTER_CONNECT_RETRY=10;
          START SLAVE;
          EOF
          fi
          # Start a server to send backups when requested by peers.
          exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
            "xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        resources:
          requests:
            cpu: 100m
            memory: 100Mi
      volumes:
      - name: conf
        emptyDir: {}
      - name: config-map
        configMap:
          name: mysql
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes: ["ReadWriteOnce"]
      resources:
        requests:
          storage: 10Gi

执行yaml文件

root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl apply -f mysql-statefulset.yaml 
statefulset.apps/mysql created

可以通过运行以下命令查看启动进度

root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl get pods -l app=mysql --watch -n pop
NAME      READY   STATUS     RESTARTS   AGE
mysql-0   2/2     Running    0          22s
mysql-1   0/2     Init:0/2   0          4s
mysql-1   0/2     Init:1/2   0          16s
mysql-1   0/2     Init:1/2   0          28s
mysql-1   0/2     PodInitializing   0          36s
mysql-1   1/2     Running           0          37s
mysql-1   2/2     Running           0          42s
mysql-2   0/2     Pending           0          0s
mysql-2   0/2     Pending           0          0s
mysql-2   0/2     Pending           0          2s
mysql-2   0/2     Init:0/2          0          2s
mysql-2   0/2     Init:1/2          0          17s
mysql-2   0/2     Init:1/2          0          30s
mysql-2   0/2     PodInitializing   0          37s
mysql-2   1/2     Error             0          38s
mysql-2   1/2     Running           1 (1s ago)   39s
mysql-2   2/2     Running           1 (6s ago)   44s

一段时间后,应该看到所有 3 个 Pod 进入 Running 状态:

root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl get pods -l app=mysql -n pop
NAME      READY   STATUS    RESTARTS       AGE
mysql-0   2/2     Running   0              3m46s
mysql-1   2/2     Running   0              3m28s
mysql-2   2/2     Running   1 (2m8s ago)   2m46s

验证
在MySQL主(mysql-0)上新建一个数据库test

root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl exec -it mysql-0 -n pop bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
Defaulted container "mysql" out of: mysql, xtrabackup, init-mysql (init), clone-mysql (init)
root@mysql-0:/# mysql -u root -p
Enter password: 
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 226
Server version: 5.7.30-log MySQL Community Server (GPL)

Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql> CREATE DATABASE test;
Query OK, 1 row affected (0.01 sec)

mysql> show databases;
+------------------------+
| Database               |
+------------------------+
| information_schema     |
| mysql                  |
| performance_schema     |
| sys                    |
| test                   |
| xtrabackup_backupfiles |
+------------------------+
6 rows in set (0.01 sec)

mysql>

在从库(mysql-1,mysql-2)上查询数据库是否存在test

root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl exec -it mysql-1 -n pop bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
Defaulted container "mysql" out of: mysql, xtrabackup, init-mysql (init), clone-mysql (init)
root@mysql-1:/# mysql -u root -p
Enter password: 
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 313
Server version: 5.7.30 MySQL Community Server (GPL)

Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql> show databases;
+------------------------+
| Database               |
+------------------------+
| information_schema     |
| mysql                  |
| performance_schema     |
| sys                    |
| test                   |
| xtrabackup_backupfiles |
+------------------------+
6 rows in set (0.01 sec)

mysql> 

查看pv绑定情况

root@k8s-ansible-client:~/yaml/20211016/mysql# kubectl get pv -n pop
NAME                 CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                     STORAGECLASS   REASON   AGE
mysql-datadir-1      10Gi       RWO            Retain           Bound       pop/data-mysql-0                                  115m
mysql-datadir-2      10Gi       RWO            Retain           Bound       pop/data-mysql-1                                  115m
mysql-datadir-3      10Gi       RWO            Retain           Available                                                     115m
mysql-datadir-4      10Gi       RWO            Retain           Available                                                     115m
mysql-datadir-5      10Gi       RWO            Retain           Bound       pop/data-mysql-2                                  115m
mysql-datadir-6      10Gi       RWO            Retain           Available                                                     115m

nfs服务器上映射目录情况

root@k8s-ansible-client:/data/pop/mysql-datadir-1/mysql# ls
auto.cnf    client-cert.pem  ibdata1      ibtmp1              mysql-0-bin.000002  mysql-0-bin.index   public_key.pem   sys
ca-key.pem  client-key.pem   ib_logfile0  mysql               mysql-0-bin.000003  performance_schema  server-cert.pem  test
ca.pem      ib_buffer_pool   ib_logfile1  mysql-0-bin.000001  mysql-0-bin.000004  private_key.pem     server-key.pem   xtrabackup_backupfiles
root@k8s-ansible-client:/data/pop/mysql-datadir-1/mysql# pwd
/data/pop/mysql-datadir-1/mysql

9. 部署Jenkins

9.1 构建镜像

# dockerfile文件
root@k8s-ansible-client:~/yaml/20211016/dockerfile/jenkins# cat Dockerfile 
#Jenkins Version 2.190.1
FROM harbor.openscp.com/base/jdk-base:v8.212

ADD jenkins-2.190.1.war /apps/jenkins/
ADD run_jenkins.sh /usr/bin/


EXPOSE 8080 

CMD ["/usr/bin/run_jenkins.sh"]

# 启动脚本
root@k8s-ansible-client:~/yaml/20211016/dockerfile/jenkins# cat run_jenkins.sh 
#!/bin/bash
cd /apps/jenkins && java -server -Xms1024m -Xmx1024m -Xss512k -jar jenkins-2.190.1.war --webroot=/apps/jenkins/jenkins-data --httpPort=8080

# 构建镜像脚本
root@k8s-ansible-client:~/yaml/20211016/dockerfile/jenkins# cat build-command.sh 
#!/bin/bash
docker build -t  harbor.openscp.com/base/jenkins:v2.190.1 .
echo "镜像制作完成,即将上传至Harbor服务器"
sleep 1
docker push harbor.openscp.com/base/jenkins:v2.190.1
echo "镜像上传完成"

root@k8s-ansible-client:~/yaml/20211016/dockerfile/jenkins# ls
build-command.sh  Dockerfile  jenkins-2.190.1.war  run_jenkins.sh

制作镜像

root@k8s-ansible-client:~/yaml/20211016/dockerfile/jenkins# bash build-command.sh 
Sending build context to Docker daemon  78.25MB
Step 1/5 : FROM harbor.openscp.com/base/jdk-base:v8.212
 ---> 5ff82b3545df
Step 2/5 : ADD jenkins-2.190.1.war /apps/jenkins/
 ---> 0ff146588622
Step 3/5 : ADD run_jenkins.sh /usr/bin/
 ---> f1845218b8e9
Step 4/5 : EXPOSE 8080
 ---> Running in b9385da9b115
Removing intermediate container b9385da9b115
 ---> 423d3faa4ee3
Step 5/5 : CMD ["/usr/bin/run_jenkins.sh"]
 ---> Running in 517b84d483af
Removing intermediate container 517b84d483af
 ---> ee7f6eb14d7a
Successfully built ee7f6eb14d7a
Successfully tagged harbor.openscp.com/base/jenkins:v2.190.1
镜像制作完成,即将上传至Harbor服务器
The push refers to repository [harbor.openscp.com/base/jenkins]
1435dd08975d: Pushed 
38926d9a4b7c: Pushed 
38dbe7a8225d: Mounted from base/tomcat-app1 
4cdbfe6aa3f6: Mounted from base/tomcat-app1 
3aec209f0edd: Mounted from base/tomcat-app1 
174f56854903: Mounted from base/redis 
v2.190.1: digest: sha256:a8c1486adf54c960f7a8bd1c33c86d164973115fbadd497f3f9ca151dd150db7 size: 1576
镜像上传完成

9.2 创建Jenkins基于nfs实现持久化存储

基于nfs创建pv

root@k8s-ansible-client:~/yaml/20211016/jenkins# cat jenkins-pv.yaml 
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: jenkins-datadir-pv
  namespace: pop
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.10.0.26
    path: /data/pop/jenkins-data 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: jenkins-root-datadir-pv
  namespace: pop
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.10.0.26
    path: /data/pop/jenkins-root-data

root@k8s-ansible-client:~/yaml/20211016/jenkins# kubectl apply -f jenkins-pv.yaml 
persistentvolume/jenkins-datadir-pv created
persistentvolume/jenkins-root-datadir-pv created

root@k8s-ansible-client:~/yaml/20211016/jenkins# kubectl get pv -n pop
NAME                      CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                     STORAGECLASS   REASON   AGE
jenkins-datadir-pv        10Gi       RWO            Retain           Available                                                     11s
jenkins-root-datadir-pv   10Gi       RWO            Retain           Available                                                     11s

创建pvc

root@k8s-ansible-client:~/yaml/20211016/jenkins# cat jenkins-pvc.yaml 
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: jenkins-datadir-pvc
  namespace: pop
spec:
  volumeName: jenkins-datadir-pv
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 10Gi

---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: jenkins-root-data-pvc
  namespace: pop
spec:
  volumeName: jenkins-root-datadir-pv 
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 10Gi
root@k8s-ansible-client:~/yaml/20211016/jenkins# kubectl apply -f jenkins-pvc.yaml 
persistentvolumeclaim/jenkins-datadir-pvc created
persistentvolumeclaim/jenkins-root-data-pvc created

root@k8s-ansible-client:~/yaml/20211016/jenkins# kubectl get pvc -n pop
NAME                    STATUS   VOLUME                    CAPACITY   ACCESS MODES   STORAGECLASS   AGE
jenkins-datadir-pvc     Bound    jenkins-datadir-pv        10Gi       RWO                           5s
jenkins-root-data-pvc   Bound    jenkins-root-datadir-pv   10Gi       RWO                           5s

创建Jenkins环境

root@k8s-ansible-client:~/yaml/20211016/jenkins# cat jenkins.yaml 
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: pop-jenkins
  name: pop-jenkins-deployment
  namespace: pop
spec:
  replicas: 1
  selector:
    matchLabels:
      app: pop-jenkins
  template:
    metadata:
      labels:
        app: pop-jenkins
    spec:
      containers:
      - name: pop-jenkins-container
        image: harbor.openscp.com/base/jenkins:v2.190.1
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 8080
          protocol: TCP
          name: http
        volumeMounts:
        - mountPath: "/apps/jenkins/jenkins-data/"
          name: jenkins-datadir-magedu
        - mountPath: "/root/.jenkins"
          name: jenkins-root-datadir
      volumes:
        - name: jenkins-datadir-magedu
          persistentVolumeClaim:
            claimName: jenkins-datadir-pvc
        - name: jenkins-root-datadir
          persistentVolumeClaim:
            claimName: jenkins-root-data-pvc

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: pop-jenkins
  name: pop-jenkins-service
  namespace: pop
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
    nodePort: 31080
  selector:
    app: pop-jenkins

root@k8s-ansible-client:~/yaml/20211016/jenkins# kubectl apply -f jenkins.yaml 
deployment.apps/pop-jenkins-deployment created
service/pop-jenkins-service created


#查看pods、deployment信息
root@k8s-ansible-client:~/yaml/20211016/jenkins# kubectl get pods,deploy -n pop
NAME                                          READY   STATUS    RESTARTS      AGE
pod/deploy-devops-redis-d9fd6594c-fvmmg       1/1     Running   0             2d17h
pod/mysql-0                                   2/2     Running   0             41h
pod/mysql-1                                   2/2     Running   0             41h
pod/mysql-2                                   2/2     Running   1 (41h ago)   41h
pod/pop-jenkins-deployment-58d59b9bf5-llcqs   1/1     Running   0             27s

NAME                                     READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/deploy-devops-redis      1/1     1            1           2d17h
deployment.apps/pop-jenkins-deployment   1/1     1            1           28s

# 查看svc信息
root@k8s-ansible-client:~/yaml/20211016/jenkins# kubectl get svc -n pop
NAME                  TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
pop-jenkins-service   NodePort    10.68.139.115   <none>        80:31080/TCP     37s

验证

image.png

获取Jenkins的admin密码

root@k8s-ansible-client:~/yaml/20211016/jenkins# kubectl exec -it pod/pop-jenkins-deployment-58d59b9bf5-llcqs -n pop bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
[root@pop-jenkins-deployment-58d59b9bf5-llcqs /]# cat /root/.jenkins/secrets/initialAdminPassword
c174844ecff54595ba7ddea4e4400abf

查看nfs挂载硬盘信息

# pods里面挂载信息
[root@pop-jenkins-deployment-58d59b9bf5-llcqs /]# df -Th
Filesystem                             Type     Size  Used Avail Use% Mounted on
overlay                                overlay   20G  9.5G  9.2G  51% /
tmpfs                                  tmpfs     64M     0   64M   0% /dev
tmpfs                                  tmpfs    2.0G     0  2.0G   0% /sys/fs/cgroup
10.10.0.26:/data/pop/jenkins-root-data nfs4     295G  806M  279G   1% /root/.jenkins
/dev/mapper/ubuntu--vg-ubuntu--lv      ext4      20G  9.5G  9.2G  51% /etc/hosts
shm                                    tmpfs     64M     0   64M   0% /dev/shm
10.10.0.26:/data/pop/jenkins-data      nfs4     295G  806M  279G   1% /apps/jenkins/jenkins-data
tmpfs                                  tmpfs    3.2G   12K  3.2G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                                  tmpfs    2.0G     0  2.0G   0% /proc/acpi
tmpfs                                  tmpfs    2.0G     0  2.0G   0% /proc/scsi
tmpfs                                  tmpfs    2.0G     0  2.0G   0% /sys/firmware

# nfs服务器上目录的信息
root@k8s-ansible-client:/data/pop/jenkins-data# ls
 bootstrap              dc-license.txt   help             jsbundles                      LogFileOutputStream.class       'MainDialog$1$1.class'   META-INF     WEB-INF
 ColorFormatter.class   executable       images          'LogFileOutputStream$1.class'  'Main$FileAndDescription.class'  'MainDialog$1.class'     robots.txt   winstone.jar
 css                    favicon.ico      JNLPMain.class  'LogFileOutputStream$2.class'   Main.class                       MainDialog.class        scripts
root@k8s-ansible-client:/data/pop/jenkins-data# cd ../jenkins-root-data/
root@k8s-ansible-client:/data/pop/jenkins-root-data# ls
config.xml                     jenkins.install.InstallUtil.installingPlugins  jobs              nodes       secret.key.not-so-secret  userContent
hudson.model.UpdateCenter.xml  jenkins.install.UpgradeWizard.state            logs              plugins     secrets                   users
identity.key.enc               jenkins.telemetry.Correlator.xml               nodeMonitors.xml  secret.key  updates
root@k8s-ansible-client:/data/pop/jenkins-root-data# 

10. k8s实战案例

10.1 web站点案例

实现nginx+php+wordpress+mysql环境

10.1.1 构建 php镜像

root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/php# ls
build-command.sh  Dockerfile  run_php.sh  www.conf
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/php# cat Dockerfile 
#PHP Base Image
FROM harbor.openscp.com/base/centos:centos7.9.2009

RUN yum install -y vim wget tree  lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop &&  rm -rf /etc/localtime && ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && useradd  www -u 2020 && useradd nginx -u 2021
RUN yum install -y  https://mirrors.tuna.tsinghua.edu.cn/remi/enterprise/remi-release-7.rpm && yum install  php56-php-fpm php56-php-mysql -y 
ADD www.conf /opt/remi/php56/root/etc/php-fpm.d/www.conf
#RUN useradd nginx -u 2019
ADD run_php.sh /usr/local/bin/run_php.sh
EXPOSE 9000

CMD ["/usr/local/bin/run_php.sh"]

root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/php# cat build-command.sh 
#!/bin/bash
TAG=$1
docker build -t harbor.openscp.com/base/wordpress-php-5.6:${TAG} .
echo "镜像制作完成,即将上传至Harbor服务器"
sleep 1
docker push harbor.openscp.com/base/wordpress-php-5.6:${TAG}
echo "镜像上传完成"

root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/php# cat run_php.sh 
#!/bin/bash

/opt/remi/php56/root/usr/sbin/php-fpm
tail -f /etc/hosts

root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/php# grep -v "^;" www.conf |grep -v "^$"
[www]
user = nginx
group = nginx
listen = 0.0.0.0:9000
pm = dynamic
pm.max_children = 50
pm.start_servers = 5
pm.min_spare_servers = 5
pm.max_spare_servers = 35
slowlog = /opt/remi/php56/root/var/log/php-fpm/www-slow.log
php_admin_value[error_log] = /opt/remi/php56/root/var/log/php-fpm/www-error.log
php_admin_flag[log_errors] = on
php_value[session.save_handler] = files
php_value[session.save_path]    = /opt/remi/php56/root/var/lib/php/session
php_value[soap.wsdl_cache_dir]  = /opt/remi/php56/root/var/lib/php/wsdlcache

执行脚本构建镜像

root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/php# chmod a+x *.sh
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/php# bash build-command.sh v1
...
Successfully tagged harbor.openscp.com/base/wordpress-php-5.6:v1
镜像制作完成,即将上传至Harbor服务器
The push refers to repository [harbor.openscp.com/base/wordpress-php-5.6]
18125a50a52c: Pushed 
017c00d221f8: Pushed 
3bab9e7267e6: Pushed 
7fc5345cbe01: Mounted from base/nginx-web1 
174f56854903: Mounted from base/jenkins 
v1: digest: sha256:d5a6abed76905f428d164520a5c728e99625d88bc1dbc6c62d2bc6fe384a6714 size: 1369
镜像上传完成

10.1.2 构建NGINX镜像

先构建nginx的基础镜像

root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx-base# cat Dockerfile 
#Nginx Base Image
FROM harbor.openscp.com/base/centos:centos7.9.2009

RUN yum install -y vim wget tree  lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop &&  rm -rf /etc/localtime && ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && useradd  www -u 2020 && useradd nginx -u 2021
ADD nginx-1.14.2.tar.gz /usr/local/src/
RUN cd /usr/local/src/nginx-1.14.2 && ./configure --prefix=/apps/nginx  && make && make install && ln -sv  /apps/nginx/sbin/nginx /usr/sbin/nginx  &&rm -rf /usr/local/src/nginx-1.14.2.tar.gz 
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx-base# cat build-command.sh 
#!/bin/bash
docker build -t harbor.openscp.com/base/nginx-base-wordpress:v1.14.2  .
sleep 1
docker push  harbor.openscp.com/base/nginx-base-wordpress:v1.14.2

执行脚本构建nginx基础镜像

root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx-base# bash build-command.sh
...
Successfully built b73c4576e520
Successfully tagged harbor.openscp.com/base/nginx-base-wordpress:v1.14.2
The push refers to repository [harbor.openscp.com/base/nginx-base-wordpress]
154a43bb903c: Pushed 
3265817f225b: Mounted from base/nginx-web1 
7fc5345cbe01: Mounted from base/wordpress-php-5.6 
174f56854903: Mounted from base/wordpress-php-5.6 
v1.14.2: digest: sha256:52412ed50aff876003c4834f00a8d60d7624ab770444ff8f2cac6fd21712ced3 size: 1164

制作业务nginx镜像

root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx# ls
build-command.sh  Dockerfile  index.html  nginx.conf  run_nginx.sh
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx# cat Dockerfile 
#FROM harbor.magedu.local/pub-images/nginx-base-wordpress:v1.14.2 
FROM harbor.openscp.com/base/nginx-base-wordpress:v1.14.2

ADD nginx.conf /apps/nginx/conf/nginx.conf
ADD run_nginx.sh /apps/nginx/sbin/run_nginx.sh
RUN mkdir -pv /home/nginx/wordpress
RUN chown nginx.nginx /home/nginx/wordpress/ -R

EXPOSE 80 443

CMD ["/apps/nginx/sbin/run_nginx.sh"] 
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx# cat index.html 
nginx web1
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx# cat nginx.conf 
user  nginx nginx;
worker_processes  auto;

events {
    worker_connections  1024;
}


http {
    include       mime.types;
    default_type  application/octet-stream;

    sendfile        on;

    keepalive_timeout  65;
    client_max_body_size 10M;
    client_body_buffer_size 16k;
    client_body_temp_path  /apps/nginx/tmp   1 2 2;
    gzip  on;


    server {
        listen       80;
        server_name  blogs.openscp.com;

        location / {
            root    /home/nginx/wordpress;
            index   index.php index.html index.htm;
        }

        location ~ \.php$ {
            root           /home/nginx/wordpress;
            fastcgi_pass   127.0.0.1:9000;
            fastcgi_index  index.php;
            fastcgi_param  SCRIPT_FILENAME  $document_root$fastcgi_script_name;
             include        fastcgi_params;
        }
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }
    }

}
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx# cat run_nginx.sh 
#!/bin/bash
/apps/nginx/sbin/nginx
tail -f /etc/hosts
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx# cat build-command.sh 
#!/bin/bash
TAG=$1
docker build -t harbor.openscp.com/base/wordpress-nginx:${TAG} .
echo "镜像制作完成,即将上传至Harbor服务器"
sleep 1
docker push  harbor.openscp.com/base/wordpress-nginx:${TAG}
echo "镜像上传完成"

执行脚本构建业务Nginx镜像

root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx# chmod a+x *.sh
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/nginx# bash build-command.sh v1
...
Successfully tagged harbor.openscp.com/base/wordpress-nginx:v1
镜像制作完成,即将上传至Harbor服务器
The push refers to repository [harbor.openscp.com/base/wordpress-nginx]
2e4717b51034: Pushed 
e32627e01bb4: Pushed 
7a8e3034ed91: Pushed 
b79d4f07216e: Pushed 
25e48aff2729: Mounted from base/nginx-base-wordpress 
5b5981584815: Mounted from base/nginx-base-wordpress 
c6a91dc597a0: Mounted from base/nginx-base-wordpress 
174f56854903: Mounted from base/nginx-base-wordpress 
v1: digest: sha256:98d50b474682ef18369a6468a1e488d0baafa3324099fded4dc9ff754a84250d size: 1992
镜像上传完成

10.1.3 部署wordpress

yaml文件,基于nfs实现持久化存储

root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/wordpress# cat wordpress.yaml 
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: wordpress-app
  name: wordpress-app-deployment
  namespace: pop
spec:
  replicas: 1
  selector:
    matchLabels:
      app: wordpress-app
  template:
    metadata:
      labels:
        app: wordpress-app
    spec:
      containers:
      - name: wordpress-app-nginx
        image: harbor.openscp.com/base/wordpress-nginx:v1
        imagePullPolicy: Always
        ports:
        - containerPort: 80
          protocol: TCP
          name: http
        - containerPort: 443
          protocol: TCP
          name: https
        volumeMounts:
        - name: wordpress
          mountPath: /home/nginx/wordpress
          readOnly: false

      - name: wordpress-app-php
        image: harbor.openscp.com/base/wordpress-php-5.6:v1
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 9000
          protocol: TCP
          name: http
        volumeMounts:
        - name: wordpress
          mountPath: /home/nginx/wordpress
          readOnly: false

      volumes:
      - name: wordpress
        nfs:
          server: 10.10.0.26
          path: /data/pop/wordpress 


---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: wordpress-app
  name: wordpress-app-spec
  namespace: pop
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 30031
  - name: https
    port: 443
    protocol: TCP
    targetPort: 443
    nodePort: 30033
  selector:
    app: wordpress-app

基于上面MySQL主从数据库,创建数据库(wordpress)和帐号(wordpress 123456)

root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/wordpress# kubectl exec -it mysql-0 -n pop bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
Defaulted container "mysql" out of: mysql, xtrabackup, init-mysql (init), clone-mysql (init)
root@mysql-0:/# mysql -u root -p
Enter password: 
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 91544
Server version: 5.7.30-log MySQL Community Server (GPL)

Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql> create database wordpress;
Query OK, 1 row affected (0.01 sec)

mysql> 
mysql> grant all privileges on wordpress.* to wordpress@'%' identified by '123456';
Query OK, 0 rows affected, 1 warning (0.05 sec)

mysql> flush privileges;
Query OK, 0 rows affected (0.05 sec)

mysql>

执行yaml构建wordpress环境

root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/wordpress# kubectl apply -f wordpress.yaml 
deployment.apps/wordpress-app-deployment created
service/wordpress-app-spec created

root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/wordpress# kubectl get pods,deploy -n pop
NAME                                            READY   STATUS    RESTARTS      AGE
pod/deploy-devops-redis-d9fd6594c-fvmmg         1/1     Running   0             2d18h
pod/mysql-0                                     2/2     Running   0             42h
pod/mysql-1                                     2/2     Running   0             42h
pod/mysql-2                                     2/2     Running   1 (42h ago)   42h
pod/wordpress-app-deployment-5f98868c5b-4775l   2/2     Running   0             4s

NAME                                       READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/deploy-devops-redis        1/1     1            1           2d18h
deployment.apps/wordpress-app-deployment   1/1     1            1           4s
root@k8s-ansible-client:~/yaml/20211016/dockerfile/web/wordpress# kubectl get svc -n pop
NAME                 TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                      AGE
mysql                ClusterIP   None            <none>        3306/TCP                     44h
mysql-read           ClusterIP   10.68.181.29    <none>        3306/TCP                     44h
wordpress-app-spec   NodePort    10.68.208.19    <none>        80:30031/TCP,443:30033/TCP   115s

下载wordpress安装包https://cn.wordpress.org/download/

# 在nfs服务器上
root@k8s-ansible-client:/data/pop/wordpress# wget https://cn.wordpress.org/latest-zh_CN.tar.gz
# 解压安装包
root@k8s-ansible-client:/data/pop/wordpress# tar -zxvf latest-zh_CN.tar.gz
root@k8s-ansible-client:/data/pop/wordpress# ls
index.html  latest-zh_CN.tar.gz  wordpress
root@k8s-ansible-client:/data/pop/wordpress# rm -fr index.html latest-zh_CN.tar.gz 
root@k8s-ansible-client:/data/pop/wordpress# mv wordpress/* .
root@k8s-ansible-client:/data/pop/wordpress# ls
index.php    readme.html  wp-activate.php  wp-blog-header.php    wp-config-sample.php  wp-cron.php  wp-links-opml.php  wp-login.php  wp-settings.php  wp-trackback.php
license.txt  wordpress    wp-admin         wp-comments-post.php  wp-content            wp-includes  wp-load.php        wp-mail.php   wp-signup.php    xmlrpc.php
root@k8s-ansible-client:/data/pop/wordpress# rm -fr wordpress/

安装wordpress

image.png

注:如果权限有问题,将wordpress目录加上nginx权限

数据库名称和用户密码是上面创建的,连接地址:mysql-0.mysql.pop.svc.pop.local


image.png

image.png

登录并发表一篇文章


image.png

image.png

10.2 微服务案例

实现dubbo+zookeeper微服务环境

10.2.1 构建镜像

consumer

root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/consumer# ls
build-command.sh  Dockerfile  dubbo-demo-consumer-2.1.5  dubbo-demo-consumer-2.1.5-assembly.tar.gz  run_java.sh
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/consumer# cat Dockerfile 
#Dubbo consumer
FROM harbor.openscp.com/base/jdk-base:v8.212

RUN yum install -y vim wget tree  lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop&&  rm -rf /etc/localtime && ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && useradd nginx -u 2001
RUN yum install file -y
RUN mkdir -p /apps/dubbo/consumer 
ADD dubbo-demo-consumer-2.1.5  /apps/dubbo/consumer
ADD run_java.sh /apps/dubbo/consumer/bin 
RUN chown nginx.nginx /apps -R
RUN chmod a+x /apps/dubbo/consumer/bin/*.sh

CMD ["/apps/dubbo/consumer/bin/run_java.sh"]

root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/consumer# cat run_java.sh 
#!/bin/bash
su - nginx -c "/apps/dubbo/consumer/bin/start.sh"
tail -f /etc/hosts

root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/consumer# cat build-command.sh 
#!/bin/bash
docker build -t harbor.openscp.com/base/dubbo-demo-consumer:v1  .
sleep 3
docker push harbor.openscp.com/base/dubbo-demo-consumer:v1

# 需要配置注册中心地址
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/consumer# grep -v "^#" dubbo-demo-consumer-2.1.5/conf/dubbo.properties
dubbo.container=log4j,spring
dubbo.application.name=demo-consumer
dubbo.application.owner=
dubbo.registry.address=zookeeper://zookeeper1.default.svc.pop.local:2181 | zookeeper://zookeeper2.default.svc.pop.local:2181 | zookeeper://zookeeper3.default.svc.pop.local:2181
dubbo.monitor.protocol=registry
dubbo.log4j.file=logs/dubbo-demo-consumer.log
dubbo.log4j.level=WARN

执行脚本构建镜像

root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/consumer# chmod a+x *.sh
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/consumer# bash build-command.sh
...
Successfully tagged harbor.openscp.com/base/dubbo-demo-consumer:v1
The push refers to repository [harbor.openscp.com/base/dubbo-demo-consumer]
6138693121b5: Pushed 
4fae946992b0: Pushed 
1429d5765bc5: Pushed 
d75b5de4df30: Pushed 
c67ef8dd101d: Pushed 
a53b249d3f92: Pushed 
bc1760d32325: Pushed 
38dbe7a8225d: Mounted from base/jenkins 
4cdbfe6aa3f6: Mounted from base/jenkins 
3aec209f0edd: Mounted from base/jenkins 
174f56854903: Mounted from base/wordpress-nginx 
v1: digest: sha256:413825db8a03f0eed51fd3056041059bc35fd79bd18b4fd7285a9fedbfc3913f size: 2628

dubboadmin

root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/dubboadmin# ls
build-command.sh  catalina.sh  Dockerfile  dubboadmin.war  dubboadmin.zip  logging.properties  run_tomcat.sh  server.xml
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/dubboadmin# cat Dockerfile 
#Dubbo dubboadmin
FROM harbor.openscp.com/base/tomcat-base:v8.5.43

RUN yum install -y vim unzip wget tree  lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop&&  rm -rf /etc/localtime && ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
ADD server.xml /apps/tomcat/conf/server.xml
ADD logging.properties /apps/tomcat/conf/logging.properties
ADD catalina.sh /apps/tomcat/bin/catalina.sh
ADD run_tomcat.sh /apps/tomcat/bin/run_tomcat.sh
ADD dubboadmin.war  /data/tomcat/webapps/dubboadmin.war
RUN cd /data/tomcat/webapps && unzip dubboadmin.war && rm -rf dubboadmin.war && chown -R nginx.nginx /data /apps

EXPOSE 8080 8443

CMD ["/apps/tomcat/bin/run_tomcat.sh"]

root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/dubboadmin# cat build-command.sh 
#!/bin/bash
TAG=$1
docker build -t harbor.openscp.com/base/dubboadmin:${TAG}  .
sleep 3
docker push  harbor.openscp.com/base/dubboadmin:${TAG}

root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/dubboadmin# cat run_tomcat.sh 
#!/bin/bash

su - nginx -c "/apps/tomcat/bin/catalina.sh start"
su - nginx -c "tail -f /etc/hosts"

# 需要配置注册中心地址
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/dubboadmin# cat dubboadmin/WEB-INF/dubbo.properties 
dubbo.registry.address=zookeeper://zookeeper1.default.svc.pop.local:2181 | zookeeper://zookeeper2.default.svc.pop.local:2181 | zookeeper://zookeeper3.default.svc.pop.local:2181
dubbo.admin.root.password=root
dubbo.admin.guest.password=guest

执行脚本构建镜像

root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/dubboadmin#  chmo a+x *.sh
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/dubboadmin# bash build-command.sh v1
...
Successfully tagged harbor.openscp.com/base/dubboadmin:v1
The push refers to repository [harbor.openscp.com/base/dubboadmin]
98e0d611e025: Pushed 
6dc138ab6f03: Pushed 
82e5ee6a3c84: Pushed 
5ddfa7134fcc: Pushed 
44868cb8c702: Pushed 
aaefdcea4576: Pushed 
2ff59fc7c569: Pushed 
6dc737cf89d1: Pushed 
d5123d987925: Mounted from base/tomcat-app1 
afa3eb2a2173: Mounted from base/tomcat-app1 
7136febc3401: Mounted from base/tomcat-app1 
38dbe7a8225d: Mounted from base/dubbo-demo-consumer 
4cdbfe6aa3f6: Mounted from base/dubbo-demo-consumer 
3aec209f0edd: Mounted from base/dubbo-demo-consumer 
174f56854903: Mounted from base/dubbo-demo-consumer 
v1: digest: sha256:2f4e1148295e244c0188c2dd8048d673bbda3c0a11b421d1be25b42e81ca19b6 size: 3466

provider, 配置注册中心地址

root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/provider# ls
build-command.sh  Dockerfile  dubbo-demo-provider-2.1.5  dubbo-demo-provider-2.1.5-assembly.tar.gz  run_java.sh
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/provider# cat Dockerfile 
#Dubbo provider
FROM harbor.openscp.com/base/jdk-base:v8.212

RUN yum install -y vim wget tree file nc lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop&&  rm -rf /etc/localtime && ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && useradd nginx -u 2001
RUN mkdir -p /apps/dubbo/provider
ADD dubbo-demo-provider-2.1.5/  /apps/dubbo/provider
ADD run_java.sh /apps/dubbo/provider/bin 
RUN chown nginx.nginx /apps -R
RUN chmod a+x /apps/dubbo/provider/bin/*.sh

CMD ["/apps/dubbo/provider/bin/run_java.sh"]

root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/provider# cat build-command.sh 
#!/bin/bash
docker build -t harbor.openscp.com/base/dubbo-demo-provider:v1  .
sleep 3
docker push harbor.openscp.com/base/dubbo-demo-provider:v1

root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/provider# cat run_java.sh 
#!/bin/bash
su - nginx -c "/apps/dubbo/provider/bin/start.sh"
tail -f /etc/hosts

# 需要配置注册中心地址
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/provider# grep -v "^#" dubbo-demo-provider-2.1.5/conf/dubbo.properties 
dubbo.container=log4j,spring
dubbo.application.name=demo-provider
dubbo.application.owner=
dubbo.registry.address=zookeeper://zookeeper1.default.svc.pop.local:2181 | zookeeper://zookeeper2.default.svc.pop.local:2181 | zookeeper://zookeeper3.default.svc.pop.local:2181
dubbo.monitor.protocol=registry
dubbo.protocol.name=dubbo
dubbo.protocol.port=20880
dubbo.log4j.file=logs/dubbo-demo-provider.log
dubbo.log4j.level=WARN

执行脚本构建镜像

root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/provider# chmod a+x *.sh
root@k8s-ansible-client:~/yaml/20211016/dockerfile/dubbo/provider# bash build-command.sh
...
Successfully tagged harbor.openscp.com/base/dubbo-demo-provider:v1
The push refers to repository [harbor.openscp.com/base/dubbo-demo-provider]
c32486dc8751: Pushed 
719e351a9ee9: Pushed 
8f08fdf8a695: Pushed 
ee248e4a3744: Pushed 
fad539a3eb64: Pushed 
9ff991a489ff: Pushed 
38dbe7a8225d: Mounted from base/dubboadmin 
4cdbfe6aa3f6: Mounted from base/dubboadmin 
3aec209f0edd: Mounted from base/dubboadmin 
174f56854903: Mounted from base/dubboadmin 
v1: digest: sha256:5eeebdbbe537f385bbed92b7f24335af11c3f6d38285741390d2151c361010e8 size: 2416

10.2.2 部署微服务环境

yaml文件

root@k8s-ansible-client:~/yaml/20211016/dubbo# cat provider.yaml 
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: pop-provider
  name: pop-provider-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: pop-provider
  template:
    metadata:
      labels:
        app: pop-provider
    spec:
      containers:
      - name: pop-provider-container
        image: harbor.openscp.com/base/dubbo-demo-provider:v1
        #command: ["/apps/tomcat/bin/run_tomcat.sh"]
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 20880
          protocol: TCP
          name: http

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: pop-provider
  name: pop-provider-spec
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 30880
    #nodePort: 30001
  selector:
    app: pop-provider

root@k8s-ansible-client:~/yaml/20211016/dubbo# cat dubboadmin.yaml 
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: pop-dubboadmin
  name: pop-dubboadmin-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: pop-dubboadmin
  template:
    metadata:
      labels:
        app: pop-dubboadmin
    spec:
      containers:
      - name: pop-dubboadmin-container
        image: harbor.openscp.com/base/dubboadmin:v1
        #command: ["/apps/tomcat/bin/run_tomcat.sh"]
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 8080
          protocol: TCP
          name: http

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: pop-dubboadmin
  name: pop-dubboadmin-service
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
    nodePort: 30080
  selector:
    app: pop-dubboadmin

root@k8s-ansible-client:~/yaml/20211016/dubbo# cat consumer.yaml 
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: pop-consumer
  name: pop-consumer-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: pop-consumer
  template:
    metadata:
      labels:
        app: pop-consumer
    spec:
      containers:
      - name: pop-consumer-container
        image: harbor.openscp.com/base/dubbo-demo-consumer:v1
        #command: ["/apps/tomcat/bin/run_tomcat.sh"]
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 80
          protocol: TCP
          name: http

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: pop-consumer
  name: pop-consumer-server
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    #nodePort: 30001
  selector:
    app: pop-consumer

启动provider(生产者)

root@k8s-ansible-client:~/yaml/20211016/dubbo# kubectl apply -f provider.yaml 
deployment.apps/pop-provider-deployment created
service/pop-provider-spec created

root@k8s-ansible-client:~/yaml/20211016/dubbo# kubectl get pods,deploy 
NAME                                           READY   STATUS             RESTARTS           AGE
pod/alpine-test                                1/1     Running            50 (53m ago)       28d
pod/kube100-site                               2/2     Running            0                  14d
pod/nginx                                      0/1     CrashLoopBackOff   1611 (3m21s ago)   4d
pod/nginx-test-001                             1/1     Running            26 (90m ago)       15d
pod/nginx-test1                                1/1     Running            50 (63m ago)       28d
pod/nginx-test2                                1/1     Running            50 (63m ago)       28d
pod/nginx-test3                                1/1     Running            50 (63m ago)       28d
pod/pop-provider-deployment-6dfd4d78db-gl4rt   1/1     Running            0                  20s
pod/zookeeper1-cdbb7fbc-5pgdg                  1/1     Running            1 (6d3h ago)       6d3h
pod/zookeeper2-f4944446d-2xnjd                 1/1     Running            0                  6d3h
pod/zookeeper3-589f6bc7-2mnz6                  1/1     Running            0                  6d3h

NAME                                      READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/pop-provider-deployment   1/1     1            1           20s
deployment.apps/zookeeper1                1/1     1            1           6d3h
deployment.apps/zookeeper2                1/1     1            1           6d3h
deployment.apps/zookeeper3                1/1     1            1           6d3h
root@k8s-ansible-client:~/yaml/20211016/dubbo# kubectl get svc
NAME                TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)                                        AGE
kubernetes          ClusterIP   10.68.0.1      <none>        443/TCP                                        29d
pop-provider-spec   NodePort    10.68.4.34     <none>        80:31797/TCP                                   25s
zookeeper1          NodePort    10.68.42.189   <none>        2181:32181/TCP,2888:30923/TCP,3888:30168/TCP   6d3h
zookeeper2          NodePort    10.68.78.146   <none>        2181:32182/TCP,2888:31745/TCP,3888:30901/TCP   6d3h
zookeeper3          NodePort    10.68.199.44   <none>        2181:32183/TCP,2888:32488/TCP,3888:31621/TCP   6d3h

验证,使用window客户端zooinspector查看zookeeper里面的信息


image.png

image.png

可以发现信息已经注册到zookeeper里面

启动consumer(消费者)

root@k8s-ansible-client:~/yaml/20211016/dubbo# kubectl apply -f consumer.yaml 
deployment.apps/pop-consumer-deployment created
service/pop-consumer-server created

验证,查看provider(生产者)日志

root@k8s-ansible-client:~/yaml/20211010/03/dockerfile# kubectl exec -it pod/pop-provider-deployment-6dfd4d78db-gl4rt bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead
[root@pop-provider-deployment-6dfd4d78db-gl4rt logs]# tail -f *.log
...
==> stdout.log <==
    at org.jboss.netty.channel.socket.nio.NioServerSocketPipelineSink.handleAcceptedSocket(NioServerSocketPipelineSink.java:137)
    at org.jboss.netty.channel.socket.nio.NioServerSocketPipelineSink.eventSunk(NioServerSocketPipelineSink.java:76)
    at org.jboss.netty.channel.Channels.write(Channels.java:632)
    at org.jboss.netty.handler.codec.oneone.OneToOneEncoder.handleDownstream(OneToOneEncoder.java:70)
    at com.alibaba.dubbo.remoting.transport.netty.NettyHandler.writeRequested(NettyHandler.java:99)
    at org.jboss.netty.channel.Channels.write(Channels.java:611)
    at org.jboss.netty.channel.Channels.write(Channels.java:578)
    at org.jboss.netty.channel.AbstractChannel.write(AbstractChannel.java:251)
    at com.alibaba.dubbo.remoting.transport.netty.NettyChannel.send(NettyChannel.java:98)
    ... 6 more



[00:57:55] Hello world0, request from consumer: /172.20.213.27:35450
[00:57:57] Hello world1, request from consumer: /172.20.213.27:35450
[00:57:59] Hello world2, request from consumer: /172.20.213.27:35450
[00:58:01] Hello world3, request from consumer: /172.20.213.27:35450
[00:58:03] Hello world4, request from consumer: /172.20.213.27:35450
[00:58:05] Hello world5, request from consumer: /172.20.213.27:35450
[00:58:07] Hello world6, request from consumer: /172.20.213.27:35450

启动dubboadmin

root@k8s-ansible-client:~/yaml/20211016/dubbo# kubectl apply -f dubboadmin.yaml 
deployment.apps/pop-dubboadmin-deployment created
service/pop-dubboadmin-service created
root@k8s-ansible-client:~/yaml/20211016/dubbo# kubectl get pods -o wide
NAME                                        READY   STATUS             RESTARTS           AGE    IP              NODE             NOMINATED NODE   READINESS GATES
alpine-test                                 1/1     Running            50 (76m ago)       28d    172.20.108.65   192.168.20.236   <none>           <none>
kube100-site                                2/2     Running            0                  14d    172.20.213.6    192.168.20.253   <none>           <none>
nginx                                       0/1     CrashLoopBackOff   1617 (4m28s ago)   4d     172.20.213.20   192.168.20.253   <none>           <none>
nginx-test-001                              1/1     Running            26 (113m ago)      15d    172.20.191.10   192.168.20.147   <none>           <none>
nginx-test1                                 1/1     Running            50 (86m ago)       28d    172.20.191.2    192.168.20.147   <none>           <none>
nginx-test2                                 1/1     Running            50 (86m ago)       28d    172.20.213.3    192.168.20.253   <none>           <none>
nginx-test3                                 1/1     Running            50 (86m ago)       28d    172.20.191.3    192.168.20.147   <none>           <none>
pop-consumer-deployment-54b54559d7-2dd2k    1/1     Running            0                  10m    172.20.213.27   192.168.20.253   <none>           <none>
pop-dubboadmin-deployment-75f8d75df-drcz6   1/1     Running            0                  38s    172.20.108.91   192.168.20.236   <none>           <none>
pop-provider-deployment-6dfd4d78db-gl4rt    1/1     Running            0                  23m    172.20.191.33   192.168.20.147   <none>           <none>
zookeeper1-cdbb7fbc-5pgdg                   1/1     Running            1 (6d3h ago)       6d3h   172.20.191.27   192.168.20.147   <none>           <none>
zookeeper2-f4944446d-2xnjd                  1/1     Running            0                  6d3h   172.20.108.81   192.168.20.236   <none>           <none>
zookeeper3-589f6bc7-2mnz6                   1/1     Running            0                  6d3h   172.20.191.28   192.168.20.147   <none>           <none>
root@k8s-ansible-client:~/yaml/20211016/dubbo# kubectl get svc
NAME                     TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                                        AGE
kubernetes               ClusterIP   10.68.0.1       <none>        443/TCP                                        29d
pop-consumer-server      NodePort    10.68.122.230   <none>        80:30926/TCP                                   12m
pop-dubboadmin-service   NodePort    10.68.60.83     <none>        80:30080/TCP                                   2m23s
pop-provider-spec        NodePort    10.68.4.34      <none>        80:31797/TCP                                   24m
zookeeper1               NodePort    10.68.42.189    <none>        2181:32181/TCP,2888:30923/TCP,3888:30168/TCP   6d3h
zookeeper2               NodePort    10.68.78.146    <none>        2181:32182/TCP,2888:31745/TCP,3888:30901/TCP   6d3h
zookeeper3               NodePort    10.68.199.44    <none>        2181:32183/TCP,2888:32488/TCP,3888:31621/TCP   6d3h

验证,登录帐号密码都是root


image.png

11. Ingress

11.1 什么是ingress

Ingress 公开了从集群外部到集群内服务的 HTTP 和 HTTPS 路由。 流量路由由 Ingress 资源上定义的规则控制。

简单的流程图,如下:


image.png

可以将 Ingress 配置为服务提供外部可访问的 URL、负载均衡流量、终止 SSL/TLS,以及提供基于名称的虚拟主机等能力。 Ingress 控制器 通常负责通过负载均衡器来实现 Ingress,尽管它也可以配置边缘路由器或其他前端来帮助处理流量。

Ingress 不会公开任意端口或协议。 将 HTTP 和 HTTPS 以外的服务公开到 Internet 时,通常使用 Service.Type=NodePortService.Type=LoadBalancer 类型的服务。

11.2 Ingress controller

ingress Controller 通过监听 Ingress这个api对象里的配置规则并转化成 Nginx 的配置(kubernetes声明式API和控制循环) , 然后对外部提供服务。

核心是一个deployment,实现方式有很多,比如nginx, Contour, Haproxy, trafik, Istio,需要编写的yaml有:Deployment, Service, ConfigMap, ServiceAccount(Auth),其中service的类型可以是NodePort或者LoadBalancer。

Ingress Controller是将Ingress这种变化生成一段Nginx的配置,然后将这个配置通过Kubernetes API写到Nginx的Pod中,然后reload.(注意:写入 nginx.conf 的不是service的地址,而是service backend 的 pod 的地址,避免在 service 在增加一层负载均衡转发)

image.png

从上图中可以很清晰的看到,实际上请求进来还是被负载均衡器拦截,比如 nginx,然后 Ingress Controller 通过跟 Ingress 交互得知某个域名对应哪个 service,再通过跟 kubernetes API 交互得知 service 地址等信息;综合以后生成配置文件实时写入负载均衡器,然后负载均衡器 reload 该规则便可实现服务发现,即动态映射

了解了以上内容以后,这也就很好的说明了我为什么喜欢把负载均衡器部署为 Daemon Set;因为无论如何请求首先是被负载均衡器拦截的,所以在每个 node 上都部署一下,同时 hostport 方式监听 80 端口;那么就解决了其他方式部署不确定 负载均衡器在哪的问题,同时访问每个 node 的 80 都能正确解析请求;如果前端再 放个 nginx 就又实现了一层负载均衡。

Ingress Controller 会根据你定义的 Ingress 对象,提供对应的代理能力。业界常用的各种反向代理项目,比如 Nginx、HAProxy、Envoy、Traefik 等,都已经为Kubernetes 专门维护了对应的 Ingress Controller。


image.png

11.3 ingress部署

ingress调度的是后端的service,而不是pod

ingress controller 是与k8s集群做交互的,感知etcd的变化

部署nginx-ingress-controller
https://kubernetes.github.io/ingress-nginx/deploy/
yaml地址

root@k8s-ansible-client:~/yaml/20211016/ingress# kubectl apply -f aa.yaml 
namespace/ingress-nginx created
serviceaccount/ingress-nginx created
configmap/ingress-nginx-controller created
clusterrole.rbac.authorization.k8s.io/ingress-nginx created
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx created
role.rbac.authorization.k8s.io/ingress-nginx created
rolebinding.rbac.authorization.k8s.io/ingress-nginx created
service/ingress-nginx-controller-admission created
service/ingress-nginx-controller created
deployment.apps/ingress-nginx-controller created
ingressclass.networking.k8s.io/nginx created
validatingwebhookconfiguration.admissionregistration.k8s.io/ingress-nginx-admission created
serviceaccount/ingress-nginx-admission created
clusterrole.rbac.authorization.k8s.io/ingress-nginx-admission created
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
role.rbac.authorization.k8s.io/ingress-nginx-admission created
rolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
job.batch/ingress-nginx-admission-create created
job.batch/ingress-nginx-admission-patch created

root@k8s-ansible-client:~/yaml/20211016/ingress# kubectl get pods,deploy -n ingress-nginx
NAME                                           READY   STATUS      RESTARTS   AGE
pod/ingress-nginx-admission-create--1-n7f7b    0/1     Completed   0          21s
pod/ingress-nginx-admission-patch--1-cd57p     0/1     Completed   1          21s
pod/ingress-nginx-controller-cfb66bc7b-bflgv   1/1     Running     0          21s

NAME                                       READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/ingress-nginx-controller   1/1     1            1           21s

11.3.1 基于域名的多虚拟主机

root@k8s-ansible-client:~/yaml/20211016/ingress# cat ingress_single-host.yaml 
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: nginx-web
  namespace: pop
  annotations:
    kubernetes.io/ingress.class: "nginx" ##指定Ingress Controller的类型
spec:
  rules: #路由规则
  - host: www.pop.com ##客户端访问的host域名
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: pop-tomcat-app1-service
            port:
               number: 80

root@k8s-ansible-client:~/yaml/20211016/ingress# kubectl apply -f ingress_single-host.yaml 
ingress.networking.k8s.io/nginx-web created

root@k8s-ansible-client:~/yaml/20211016/ingress# kubectl get ingress -n pop
NAME        CLASS    HOSTS         ADDRESS          PORTS   AGE
nginx-web   <none>   www.pop.com   192.168.20.253   80      2m44s

验证

image.png

11.3.2 基于域名的url转发

root@k8s-ansible-client:~/yaml/20211016/ingress# cat ingress-url.yaml 
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: nginx-web
  namespace: pop
  annotations:
    kubernetes.io/ingress.class: "nginx" ##指定Ingress Controller的类型
spec:
  rules:
  - host: www.pop.com
    http:
      paths:
      - path: /url1
        pathType: Prefix
        backend:
          service:
            name: pop-tomcat-app1-service
            port:
               number: 80
      - path: /url2
        pathType: Prefix
        backend:
          service:
            name: pop-tomcat-app2-service
            port:
               number: 80

root@k8s-ansible-client:~/yaml/20211016/ingress# kubectl apply -f ingress-url.yaml 
ingress.networking.k8s.io/nginx-web created
root@k8s-ansible-client:~/yaml/20211016/ingress# kubectl get ingress -n pop
NAME        CLASS    HOSTS         ADDRESS          PORTS   AGE
nginx-web   <none>   www.pop.com   192.168.20.253   80      9s

验证

image.png

image.png

11.3.3 基于多域名https

自签证书

# 域名 www.pop.com
root@k8s-ansible-client:~/yaml/20211016/ingress/certs# openssl req -x509 -sha256 -newkey rsa:4096 -keyout ca.key -out ca.crt -days 3560 -nodes -subj '/CN=www.pop.com'
root@k8s-ansible-client:~/yaml/20211016/ingress/certs# openssl req -new -newkey rsa:4096 -keyout server.key -out server.csr -nodes -subj '/CN=www.pop.com'
root@k8s-ansible-client:~/yaml/20211016/ingress/certs# openssl x509 -req -sha256 -days 3650 -in server.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out server.crt
Signature ok
subject=CN = www.pop.com
Getting CA Private Key

# 域名 test.pop.net
root@k8s-ansible-client:~/yaml/20211016/ingress/certs# openssl req -new -newkey rsa:4096 -keyout test.key -out test.csr -nodes -subj '/CN=test.pop.net'
root@k8s-ansible-client:~/yaml/20211016/ingress/certs# openssl x509 -req -sha256 -days 3650 -in test.csr -CA ca.crt -CAkey ca.key -set_serial 01  -out test.crt
Signature ok
subject=CN = test.pop.net
Getting CA Private Key

证书上传至k8s

root@k8s-ansible-client:~/yaml/20211016/ingress/certs# kubectl  create secret generic www-tls-secret --from-file=tls.crt=server.crt --from-file=tls.key=server.key -n pop
secret/www-tls-secret created
root@k8s-ansible-client:~/yaml/20211016/ingress/certs# kubectl  create secret generic test-tls-secret --from-file=tls.crt=test.crt --from-file=tls.key=test.key -n pop
secret/test-tls-secret created
root@k8s-ansible-client:~/yaml/20211016/ingress/certs# kubectl get secret -n pop
NAME                  TYPE                                  DATA   AGE
default-token-4nrw2   kubernetes.io/service-account-token   3      9d
test-tls-secret       Opaque                                2      11s
www-tls-secret        Opaque                                2      45s

root@k8s-ansible-client:~/yaml/20211016/ingress# cat ingress-https-multi.yaml 
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: nginx-web
  namespace: pop
  annotations:
    kubernetes.io/ingress.class: "nginx" ##指定Ingress Controller的类型
    nginx.ingress.kubernetes.io/ssl-redirect: 'true'
spec:
  tls:
  - hosts:
    - www.pop.com
    secretName: www-tls-secret
  - hosts:
    - test.pop.net
    secretName: test-tls-secret 
  rules:
  - host: www.pop.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: pop-tomcat-app1-service
            port:
               number: 80
  - host: test.pop.net
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: pop-tomcat-app2-service
            port:
               number: 80

root@k8s-ansible-client:~/yaml/20211016/ingress# kubectl apply -f ingress-https-multi.yaml 
ingress.networking.k8s.io/nginx-web created
root@k8s-ansible-client:~/yaml/20211016/ingress# kubectl get ingress -n pop
NAME        CLASS    HOSTS                      ADDRESS          PORTS     AGE
nginx-web   <none>   www.pop.com,test.pop.net   192.168.20.253   80, 443   25s

验证

image.png

image.png

12. 基于HPA控制器实现控制pod副本数

12.1 HPA简介

HPA(Horizontal Pod Autoscaler)是kubernetes(以下简称k8s)的一种资源对象,能够根据某些指标对在statefulSet、replicaController、replicaSet等集合中的pod数量进行动态伸缩,使运行在上面的服务对指标的变化有一定的自适应能力。

HPA目前支持四种类型的指标,分别是Resource、Object、External、Pods。其中在稳定版本autoscaling/v1中只支持对CPU指标的动态伸缩,在测试版本autoscaling/v2beta2中支持memory和自定义指标的动态伸缩,并以annotation的方式工作在autoscaling/v1版本中。

12.2 HPA动态伸缩的原理及工作过程

HPA在k8s中也由一个controller控制,controller会间隔循环HPA,检查每个HPA中监控的指标是否触发伸缩条件,默认的间隔时间为15s。一旦触发伸缩条件,controller会向k8s发送请求,修改伸缩对象(statefulSet、replicaController、replicaSet)子对象scale中控制pod数量的字段。k8s响应请求,修改scale结构体,然后会刷新一次伸缩对象的pod数量。伸缩对象被修改后,自然会通过list/watch机制增加或减少pod数量,达到动态伸缩的目的。

kubectl autoscale 参数自动控制在k8s集群中运行的pod数量(水平自动伸缩),需要提前设置pod范围及触发条件。

k8s从1.1版本开始增加了名称为HPA(Horizontal Pod Autoscaler)的控制器,用于实现基于pod中资源
(CPU/Memory)利用率进行对pod的自动扩缩容功能的实现,早期的版本只能基于Heapster组件实现对CPU利用率
做为触发条件,但是在k8s 1.11版本开始使用Metrices Server完成数据采集(采集pod的CPU/内存),然后将采集到的数据通过
API(Aggregated API,汇总API),例如metrics.k8s.io、custom.metrics.k8s.io、external.metrics.k8s.io,然后
再把数据提供给HPA控制器进行查询,以实现基于某个资源利用率对pod进行扩缩容的目的。

控制管理器(kube-controller-manager)默认每隔15s(可以通过–horizontal-pod-autoscaler-sync-period修改)查询metrics的资源使用情况

kube-controller-manager支持以下三种metrics指标类型:
->预定义metrics(比如Pod的CPU)以利用率的方式计算
->自定义的Pod metrics,以原始值(raw value)的方式计算
->自定义的object metrics

支持两种metrics查询方式:
->Heapster
->自定义的REST API

支持多metrics,即支持多种数据采集方式,采集pod指标数据


image.png

运维通过master的API创建一个HPA控制器,由HPA控制器对deployment进行自动伸缩,通过调用kube-controller-manager对pod数量进行管理;
配置了HPA的deployment发现pod的资源利用率超过了之前所设定的值,通过调用kube-controller-manager服务创建pod,直到pod的资源利用率低于所设定的值;HPA会通过metrics API获取当前HPA所管理的deployment内部的POD的资源利用率

工作过程:
HPA通过master api(Metrices Server通过api server把采集到的数据记录到etcd中,HPA会通过master api获取到etcd中pod的数据)获取到deployment中pod的指标数据,再与HPA提前定义好的值做对比,如果资源利用率超出定义的值,则会通过kube-controller-manager新建pod,直到资源利用率低于hpa所定义的值;
kube-controller-manager默认每隔15s会查询metrics的资源使用情况

12.3 部署Metrics-server

root@k8s-ansible-client:~/yaml/20211016/hpa# cat metrics-server-v0.4.4.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
    rbac.authorization.k8s.io/aggregate-to-admin: "true"
    rbac.authorization.k8s.io/aggregate-to-edit: "true"
    rbac.authorization.k8s.io/aggregate-to-view: "true"
  name: system:aggregated-metrics-reader
rules:
- apiGroups:
  - metrics.k8s.io
  resources:
  - pods
  - nodes
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
rules:
- apiGroups:
  - ""
  resources:
  - pods
  - nodes
  - nodes/stats
  - namespaces
  - configmaps
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server-auth-reader
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server:system:auth-delegator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:auth-delegator
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:metrics-server
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  ports:
  - name: https
    port: 443
    protocol: TCP
    targetPort: https
  selector:
    k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  strategy:
    rollingUpdate:
      maxUnavailable: 0
  template:
    metadata:
      labels:
        k8s-app: metrics-server
    spec:
      containers:
      - args:
        - --cert-dir=/tmp
        - --secure-port=4443
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        image: harbor.openscp.com/base/metrics-server:v0.4.4
        imagePullPolicy: IfNotPresent
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /livez
            port: https
            scheme: HTTPS
          periodSeconds: 10
        name: metrics-server
        ports:
        - containerPort: 4443
          name: https
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /readyz
            port: https
            scheme: HTTPS
          periodSeconds: 10
        securityContext:
          readOnlyRootFilesystem: true
          runAsNonRoot: true
          runAsUser: 1000
        volumeMounts:
        - mountPath: /tmp
          name: tmp-dir
      nodeSelector:
        kubernetes.io/os: linux
      priorityClassName: system-cluster-critical
      serviceAccountName: metrics-server
      volumes:
      - emptyDir: {}
        name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
  labels:
    k8s-app: metrics-server
  name: v1beta1.metrics.k8s.io
spec:
  group: metrics.k8s.io
  groupPriorityMinimum: 100
  insecureSkipTLSVerify: true
  service:
    name: metrics-server
    namespace: kube-system
  version: v1beta1
  versionPriority: 100

执行yaml文件

root@k8s-ansible-client:~/yaml/20211016/hpa# kubectl apply -f metrics-server-v0.4.4.yaml 
serviceaccount/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
service/metrics-server created
deployment.apps/metrics-server created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created

root@k8s-ansible-client:~/yaml/20211016/hpa# kubectl top node
W1030 22:13:22.276731  170084 top_node.go:119] Using json format to get metrics. Next release will switch to protocol-buffers, switch early by passing --use-protocol-buffers flag
NAME             CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
192.168.20.147   229m         5%     1716Mi          53%       
192.168.20.189   137m         6%     1117Mi          86%       
192.168.20.201   251m         12%    1275Mi          99%       
192.168.20.236   243m         6%     1555Mi          48%       
192.168.20.249   309m         15%    1176Mi          91%       
192.168.20.253   275m         6%     1580Mi          48%

12.4 HPA自动伸缩pod数量

命令行模式

kubectl autoscale deployment/nginx-deploymen --min=2 --max=5 --cpu-percent=50 -n default
#自动扩容指定deployment的pod数量,当pod的cpu资源利用率达到50%,最大pod数量扩容到5个,最小2个pod数量

yaml文件
hpa的yml文件与服务的yml文件分开

root@k8s-ansible-client:~/yaml/20211016/hpa# cat hpa.yaml 
apiVersion: autoscaling/v1                #定义API版本
kind: HorizontalPodAutoscaler             #对象类型
metadata:                                 #定义对象元数据
  namespace: pop                        #hpa创建后隶属的namespace
  name: pop-tomcat-app1-podautoscaler   #hpa对象名称
  labels:                    
    app: pop-tomcat-app1   #自定义的label名称
    version: v2beta1         #自定义的api版本
spec:                        #定义对象具体信息
  scaleTargetRef:            #定义水平伸缩的目标对象,Deployment、ReplicationController/ReplicaSet     
    apiVersion: apps/v1      #目标对象的API版本
    #apiVersion: extensions/v1beta1 
    kind: Deployment         #目标对象类型为deployment
    name: pop-tomcat-app1-deployment   #目标deployment的具体名称
  minReplicas: 2                         #最小pod数
  maxReplicas: 4                         #最大pod数
  targetCPUUtilizationPercentage: 60     #CPU资源利用率

执行yaml, 可以看见默认启动一个pod,执行hpa之后,启动了两个pod,这里cpu利用率就不做测试

root@k8s-ansible-client:~/yaml/20211010/03/tomcat-app1# kubectl get pods,deploy -n pop
NAME                                              READY   STATUS    RESTARTS     AGE
pod/deploy-devops-redis-d9fd6594c-fvmmg           1/1     Running   0            9d
pod/mysql-0                                       2/2     Running   0            8d
pod/mysql-1                                       2/2     Running   0            8d
pod/mysql-2                                       2/2     Running   1 (8d ago)   8d
pod/pop-tomcat-app1-deployment-54bb9d8f8c-jblz6   1/1     Running   0            4h20m
pod/pop-tomcat-app2-deployment-5676bf7c9-2sc7g    1/1     Running   0            4h21m

NAME                                         READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/deploy-devops-redis          1/1     1            1           9d
deployment.apps/pop-tomcat-app1-deployment   1/1     1            1           4h20m
deployment.apps/pop-tomcat-app2-deployment   1/1     1            1           4h21m

root@k8s-ansible-client:~/yaml/20211016/hpa# kubectl apply -f hpa.yaml 
horizontalpodautoscaler.autoscaling/pop-tomcat-app1-podautoscaler created

root@k8s-ansible-client:~/yaml/20211016/hpa# kubectl get pods,deploy -n pop
NAME                                              READY   STATUS    RESTARTS     AGE
pod/deploy-devops-redis-d9fd6594c-fvmmg           1/1     Running   0            9d
pod/mysql-0                                       2/2     Running   0            8d
pod/mysql-1                                       2/2     Running   0            8d
pod/mysql-2                                       2/2     Running   1 (8d ago)   8d
pod/pop-tomcat-app1-deployment-54bb9d8f8c-jblz6   1/1     Running   0            4h23m
pod/pop-tomcat-app1-deployment-54bb9d8f8c-psrr4   1/1     Running   0            7s
pod/pop-tomcat-app2-deployment-5676bf7c9-2sc7g    1/1     Running   0            4h24m

NAME                                         READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/deploy-devops-redis          1/1     1            1           9d
deployment.apps/pop-tomcat-app1-deployment   2/2     2            2           4h23m
deployment.apps/pop-tomcat-app2-deployment   1/1     1            1           4h24m

©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 160,706评论 4 366
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 68,002评论 1 301
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 110,462评论 0 250
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 44,375评论 0 216
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 52,763评论 3 294
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 40,849评论 1 224
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 32,033评论 2 317
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 30,768评论 0 204
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 34,490评论 1 246
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 30,734评论 2 253
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 32,204评论 1 264
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 28,566评论 3 260
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 33,227评论 3 241
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 26,137评论 0 8
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 26,934评论 0 201
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 35,926评论 2 283
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 35,774评论 2 274

推荐阅读更多精彩内容