参考资料

https://github.com/sanderploegsma/redis-cluster
https://www.cnblogs.com/cocowool/p/kubernetes_statefulset.html
https://www.cnblogs.com/puyangsky/p/6677308.html
https://www.cnblogs.com/Jack47/p/deploy-stateful-application-on-Kubernetes.html

单实例Redis

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
###定义ConfigMap,把脚本和配置文件放到ConfigMap###
apiVersion: v1
kind: ConfigMap
metadata:
  name: redis-configmap
  #labels:
  #  app: redis
data:
  ###redis的配置文件,添加基本的配置###
  redis.conf: |+
    appendonly yes
    protected-mode no
    maxmemory-policy volatile-lru    
---

###声明 Deployment####
apiVersion: apps/v1
kind: Deployment
metadata:
  name: redis
  labels:
    app: redis
spec:
  replicas: 1
  selector:
    matchLabels:
      app: redis
  template:
    metadata:
      labels:
        app: redis
    spec:
      containers:
      - name: redis
        image: redis:5.0.3
         ###设置redis可以使用的内存###
        resources:
          limits:
            memory: 2Gi
          requests:
            memory: 2Gi

        ports:
        - containerPort: 6379
          name: client
        - containerPort: 16379
          name: gossip
          
          
        ######健康检查 开始######
        ###pod启动之后,需要检查应用服务是否已经启动,服务正常才加入到service.这里是调用redis-cli的ping方法,检查redis是否正常启动###
        readinessProbe:
          exec:
            command:
            - sh
            - -c
            - "redis-cli -h $(hostname) ping"
          ###容器启动后第一次执行探测是需要等待多少秒###
          initialDelaySeconds: 15
          ###探测超时时间.默认1秒,最小1秒.###
          timeoutSeconds: 5
        ###健康检查,pod状态正常,但是应用异常,例如假死,不响应,这样会重建pod.这里是调用redis-cli的ping方法,检查redis状态是否健康###
        livenessProbe:
          exec:
            command:
            - sh
            - -c
            - "redis-cli -h $(hostname) ping"
          ###容器启动后第一次执行探测是需要等待多少秒###
          initialDelaySeconds: 20
          ###执行探测的频率.默认是10秒,最小1秒###
          periodSeconds: 3
        ######健康检查 结束######  
          
          
        volumeMounts:
        ###挂载点###
        - mountPath: /data
          ###对应下面卷 cephfs-pvc-pod 的名称###
          name: redis-pvc
          ###指定子目录,会在cephfs里创建子文件夹####
          subPath: basesoft/redis/pvc 
        - mountPath: /conf
          name: conf
          
      volumes:
      ###卷名称,对应上面的挂载名称###
      - name: redis-pvc
        persistentVolumeClaim:
          ###对应cephfs-pvc.yaml的metadata.name###
          claimName: k8s-pvc
      ###和上面的volumeMounts -name对应####
      - name: conf
        ###来自于configMap###
        configMap:
          ###configMap.metadata.name是redis-configmap###
          name: redis-configmap

---

###创建 redis 服务####
apiVersion: v1
kind: Service
metadata:
  name: redis
  labels:
    app: redis
spec:
  ports:
  - port: 6379
    targetPort: 6379
    nodePort: 30379 ###node的端口,用于开发人员直连###
    name: client
  - port: 16379
    targetPort: 16379
    name: gossip
  ###固定虚拟IP###
  clusterIP: 10.98.239.103 
  selector:
    app: redis
  ###节点类型是NodePort###
  type: NodePort

集群Redis

  1. 创建cephfs账号和文件夹
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
ceph fs authorize cephfs client.redis-cluster /redis-cluster rw

mkdir /mnt/mycephfs/redis-cluster

mkdir /mnt/mycephfs/redis-cluster/data-redis-cluster-0
mkdir /mnt/mycephfs/redis-cluster/data-redis-cluster-1
mkdir /mnt/mycephfs/redis-cluster/data-redis-cluster-2
mkdir /mnt/mycephfs/redis-cluster/data-redis-cluster-3
mkdir /mnt/mycephfs/redis-cluster/data-redis-cluster-4
mkdir /mnt/mycephfs/redis-cluster/data-redis-cluster-5
  1. 创建cephfs账户授权
1
2
3
4
5
6
7
apiVersion: v1
kind: Secret
metadata:
  name: redis-cluster-ceph-secret
data:
###通过ceph auth get-key client.redis-cluster |base64 获取 ###
  key: QVFDeUdFbGMvZUxaS3hBQTM0TWVOdklHY1Nzb1daVkxwbXBDWUE9PQ==
  1. 创建pv
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169

apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv-0
  labels:
    pv: redis-cluster-pv-0
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteMany
  cephfs:
    monitors:
      - node1:6789
      - node2:6789
      - node3:6789
    ###可以配置cephfs的子目录,绑定不同的用户,用于权限隔离####
    path: /redis-cluster/data-redis-cluster-0
    ###ceph的账号###
    user: redis-cluster
    secretRef:
    ###和redis-cluster-secret.yaml中的metadata.name保持一致###
      name: redis-cluster-ceph-secret
    readOnly: false
    ###回收策略:Retain手动回收,Recycle需要擦出后才能再使用,Delete相关联的存储资产被删除####
  persistentVolumeReclaimPolicy: Recycle
--- 

apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv-1
  labels:
    pv: redis-cluster-pv-1
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteMany
  cephfs:
    monitors:
      - node1:6789
      - node2:6789
      - node3:6789
    ###可以配置cephfs的子目录,绑定不同的用户,用于权限隔离####
    path: /redis-cluster/data-redis-cluster-1
    ###ceph的账号###
    user: redis-cluster
    secretRef:
    ###和redis-cluster-secret.yaml中的metadata.name保持一致###
      name: redis-cluster-ceph-secret
    readOnly: false
    ###回收策略:Retain手动回收,Recycle需要擦出后才能再使用,Delete相关联的存储资产被删除####
  persistentVolumeReclaimPolicy: Recycle
--- 

apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv-2
  labels:
    pv: redis-cluster-pv-2
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteMany
  cephfs:
    monitors:
      - node1:6789
      - node2:6789
      - node3:6789
    ###可以配置cephfs的子目录,绑定不同的用户,用于权限隔离####
    path: /redis-cluster/data-redis-cluster-2
    ###ceph的账号###
    user: redis-cluster
    secretRef:
    ###和redis-cluster-secret.yaml中的metadata.name保持一致###
      name: redis-cluster-ceph-secret
    readOnly: false
    ###回收策略:Retain手动回收,Recycle需要擦出后才能再使用,Delete相关联的存储资产被删除####
  persistentVolumeReclaimPolicy: Recycle

--- 

apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv-3
  labels:
    pv: redis-cluster-pv-3
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteMany
  cephfs:
    monitors:
      - node1:6789
      - node2:6789
      - node3:6789
    ###可以配置cephfs的子目录,绑定不同的用户,用于权限隔离####
    path: /redis-cluster/data-redis-cluster-3
    ###ceph的账号###
    user: redis-cluster
    secretRef:
    ###和redis-cluster-secret.yaml中的metadata.name保持一致###
      name: redis-cluster-ceph-secret
    readOnly: false
    ###回收策略:Retain手动回收,Recycle需要擦出后才能再使用,Delete相关联的存储资产被删除####
  persistentVolumeReclaimPolicy: Recycle
--- 

apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv-4
  labels:
    pv: redis-cluster-pv-4
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteMany
  cephfs:
    monitors:
      - node1:6789
      - node2:6789
      - node3:6789
    ###可以配置cephfs的子目录,绑定不同的用户,用于权限隔离####
    path: /redis-cluster/data-redis-cluster-4
    ###ceph的账号###
    user: redis-cluster
    secretRef:
    ###和redis-cluster-secret.yaml中的metadata.name保持一致###
      name: redis-cluster-ceph-secret
    readOnly: false
    ###回收策略:Retain手动回收,Recycle需要擦出后才能再使用,Delete相关联的存储资产被删除####
  persistentVolumeReclaimPolicy: Recycle
  
--- 

apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv-5
  labels:
    pv: redis-cluster-pv-5
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteMany
  cephfs:
    monitors:
      - node1:6789
      - node2:6789
      - node3:6789
    ###可以配置cephfs的子目录,绑定不同的用户,用于权限隔离####
    path: /redis-cluster/data-redis-cluster-5
    ###ceph的账号###
    user: redis-cluster
    secretRef:
    ###和redis-cluster-secret.yaml中的metadata.name保持一致###
      name: redis-cluster-ceph-secret
    readOnly: false
    ###回收策略:Retain手动回收,Recycle需要擦出后才能再使用,Delete相关联的存储资产被删除####
  persistentVolumeReclaimPolicy: Recycle
  1. 创建pvc
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
###定义PVC,StatefulSet默认使用StorageClass进行PVC的创建,cephfs还没有稳定的StorageClass,所以手动创建pv和pvc,用于绑定###
###注意名称为 {volumeClaimTemplates.metadata.name}-{serviceName}-{序号},例如本例中的pvc名称是 data-redis-cluster-0 /1......###
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: data-redis-cluster-0
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests: 
      storage: 1Gi
  selector:
    matchLabels:
    ###和redis-cluster-pv.yaml中的labels对应###
      pv: redis-cluster-pv-0
---

kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: data-redis-cluster-1
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests: 
      storage: 1Gi
  selector:
    matchLabels:
    ###和redis-cluster-pv.yaml中的labels对应###
      pv: redis-cluster-pv-1
---


kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: data-redis-cluster-2
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests: 
      storage: 1Gi
  selector:
    matchLabels:
    ###和redis-cluster-pv.yaml中的labels对应###
      pv: redis-cluster-pv-2
---


kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: data-redis-cluster-3
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests: 
      storage: 1Gi
  selector:
    matchLabels:
    ###和redis-cluster-pv.yaml中的labels对应###
      pv: redis-cluster-pv-3
---


kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: data-redis-cluster-4
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests: 
      storage: 1Gi
  selector:
    matchLabels:
    ###和redis-cluster-pv.yaml中的labels对应###
      pv: redis-cluster-pv-4
---


kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: data-redis-cluster-5
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests: 
      storage: 1Gi
  selector:
    matchLabels:
    ###和redis-cluster-pv.yaml中的labels对应###
      pv: redis-cluster-pv-5
  1. 创建StatefulSet和redis容器
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
###定义ConfigMap,把脚本和配置文件放到ConfigMap###
apiVersion: v1
kind: ConfigMap
metadata:
  name: redis-cluster
  labels:
    app: redis-cluster
data:
###需要执行的重建ip关系的脚本,因为pod重建会分配新的IP,但是redis是根据ip创建的cluster,所以需要重建一下###
  fix-ip.sh: |
    #!/bin/sh
    CLUSTER_CONFIG="/data/nodes.conf"
    if [ -f ${CLUSTER_CONFIG} ]; then
      if [ -z "${POD_IP}" ]; then 
        echo "Unable to determine Pod IP address!"
        exit 1
      fi
      echo "Updating my IP to ${POD_IP} in ${CLUSTER_CONFIG}"
      sed -i.bak -e "/myself/ s/[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}/${POD_IP}/" ${CLUSTER_CONFIG}
    fi
    exec "$@"    
  ###redis的配置文件,添加基本的配置###
  redis.conf: |+
    cluster-enabled yes
    cluster-require-full-coverage no
    cluster-node-timeout 15000
    cluster-config-file /data/nodes.conf
    cluster-migration-barrier 1
    appendonly yes
    protected-mode no
    maxmemory-policy volatile-lru    
---

###创建一个Headless(无头)Service,就是没有clusterIP,默认访问域名是:redis-cluster.default.svc.cluster.local###
###podName是StatefulSet.metadata.name-序号,例如本例:redis-cluster-0###
###服务下pod的默认访问域名是podName-Service域名,例如本例:redis-cluster-0-redis-cluster.default.svc.cluster.local###
###资料参考:https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/   ###
apiVersion: v1
kind: Service
metadata:
  name: redis-cluster
  labels:
    app: redis-cluster
spec:
  ports:
  - port: 6379
    targetPort: 6379
    name: client
  - port: 16379
    targetPort: 16379
    name: gossip
  ###无头Service###
  clusterIP: None 
  selector:
    app: redis-cluster
---


###创建StatefulSet,用于有状态的服务###
###注意:StatefulSet默认使用StorageClass进行PVC的创建,cephfs还没有稳定的StorageClass,所以手动创建pv和pvc,用于绑定####
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: redis-cluster
  labels:
    app: redis-cluster
spec:
  ###服务名称###
  serviceName: redis-cluster
  ###创建6个副本###
  replicas: 6
  selector:
    matchLabels:
      ###redis容器的标签###
      app: redis-cluster
  template:
    metadata:
      labels:
        ###redis容器的标签###
        app: redis-cluster
    spec:
      containers:
      - name: redis
        ###redis的镜像,使用最新的5.0.3创建###
        image: redis:5.0.3
        ###设置redis可以使用的cpu和内存###
        resources:
          limits:
            #cpu: "1"
            memory: 1Gi
          requests:
            #cpu: "1"
            memory: 1Gi
        ###设置redis的端口###    
        ports:
        - containerPort: 6379
          name: client
        - containerPort: 16379
          name: gossip
        ###执行的命令,设置ip的fix-ip.sh,启动redis-server,redis的配置redis.conf
        command: ["/conf/fix-ip.sh", "redis-server", "/conf/redis.conf"]
        ###pod启动之后,需要检查应用服务是否已经启动,服务正常才加入到service.这里是调用redis-cli的ping方法,检查redis是否正常启动###
        readinessProbe:
          exec:
            command:
            - sh
            - -c
            - "redis-cli -h $(hostname) ping"
          ###容器启动后第一次执行探测是需要等待多少秒###
          initialDelaySeconds: 15
          ###探测超时时间.默认1秒,最小1秒.###
          timeoutSeconds: 5
        ###健康检查,pod状态正常,但是应用异常,例如假死,不响应,这样会重建pod.这里是调用redis-cli的ping方法,检查redis状态是否健康###
        livenessProbe:
          exec:
            command:
            - sh
            - -c
            - "redis-cli -h $(hostname) ping"
          ###容器启动后第一次执行探测是需要等待多少秒###
          initialDelaySeconds: 20
          ###执行探测的频率.默认是10秒,最小1秒###
          periodSeconds: 3
          
        ###设置环境变量,用于获取pod的IP###
        env:
        ###fix-ip.sh脚本中,用于获取${POD_IP},使用环境变量获取IP###
        - name: POD_IP
          valueFrom:
            fieldRef:
              fieldPath: status.podIP
        ###挂载的卷###      
        volumeMounts:
        - name: conf
          mountPath: /conf
          readOnly: false
        - name: data
          mountPath: /data
          readOnly: false
      ###卷的描述
      volumes:
      ###和上面的volumeMounts -name对应####
      - name: conf
        ###来自于configMap###
        configMap:
          ###configMap.metadata.name是redis-cluster###
          name: redis-cluster
          ###默认可执行的权限,这样才能执行fix-ip.sh这个脚本###
          defaultMode: 0755
  ###PVC的创建模板,名称为 {volumeClaimTemplates.metadata.name}-{serviceName}-{序号},例如本例中的pvc名称是 data-redis-cluster-0 /1......###
  volumeClaimTemplates:
  - metadata:
      ###模板的名称###
      name: data
      labels:
        name: redis-cluster
    spec:
      ###访问模式###
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          ###Gi是二进制1024Mi,G是十进制1000M###
          storage: 1Gi

  1. 创建集群
1
kubectl exec -it redis-cluster-0-- redis-cli--cluster create --cluster-replicas 1$(kubectl get pods -l app=redis-cluster -o jsonpath='{range.items[*]}{.status.podIP}:6379')
  1. 查看集群
1
kubectl exec -it redis-cluster-0-- redis-cli--cluster check 127.0.0.1:6379