This is the 7th day of my participation in Gwen Challenge
takeaway
This paper introduces the deployment of Cceph - CSI in K8S and the operation of dynamic expansion of PVCCopy the code
Environment version
[root@master kubernetes]# kubectl get node
NAME STATUS ROLES AGE VERSION
master Ready master 40d v1.18.0
node1 Ready node 40d v1.18.0
node2 Ready node 40d v1.18.0
[root@master kubernetes]# ceph version
ceph version 10.2.11 (e4b061b47f07f583c92a050d9e84b1813a35671e)
Copy the code
Configuration ceph – csi
- Create a storage pool
Ceph osd pool create kube [root@master kubernetes]# ceph OSD pool ls RBD kube #Copy the code
- A new user
Ceph auth get-or-create client kube mon 'profile RBD 'osd 'profile RBD pool=kube' MGR 'profile RBD Pool =kube' [client.kube] key = AQBnz11fclr********zmr8ifftAHQbTw== # [root@master kubernetes]# ceph auth get client.kube exported keyring for client.kube [client.kube] key = AQB/8jlfbtSk*******04A/Xp/eWOEx67pw== caps mon = "allow r" caps osd = "allow class-read object_prefix rbd_children, allow rwx pool=kube"Copy the code
- Get ceph cluster information
[root@master kubernetes]# ceph mon dump
dumped monmap epoch 1
epoch 1
fsid 98564ee8-31bc-4ed6-9c31-cee****a8c
last_changed 2020-08-16 22:11:42.371294
created 2020-08-16 22:11:42.371294
0: 192.168.100.11:6789/0 mon.master
# 需要用到fsid:ceph集群的ID,监控阶段信息, 192.168.100.11:6789
Copy the code
- Pull ceph – csi
git clone https://github.com/ceph/ceph-csi
Copy the code
- Modify the cm
cd ceph-csi/deploy/rbd/kubernetes [root@master kubernetes]# cat csi-config-map.yaml --- apiVersion: v1 kind: ConfigMap data: config.json: |- [ { "clusterID": "98564ee8-31bc-4ed6-9c31-cee***ca8c", "monitors": ["192.168.100.11:6789"]}] Metadata: name: Kubectl apply-f csi-config-map.yaml kubectl apply-f csi-config-map.yaml kubectl apply-f csi-config-map.yaml kubectl apply-f csi-config-map.yaml kubectl apply-f csi-config-map.yaml kubectl apply-f csi-config-map.yamlCopy the code
- The new secret
Yaml [root@master kubernetes]# cat csi-rdd-secret. yaml apiVersion: v1 kind: secret metadata: name csi-rbd-secret stringData: userID: kube userKey: AQB/8jlfbtSkIxAAb*******/Xp/eWOEx67pw== # kubectl apply -f csi-rbd-secret.yamlCopy the code
- RBAC authorization
$sed -i "s/namespace: default/namespace: ceph-csi/g" $(grep -rl "namespace: default/namespace: ceph-csi/g" $(grep -rl" default" ./) $ sed -i -e "/^kind: ServiceAccount/{N; N; $(egrep -rl "^kind: ServiceAccount"./)Copy the code
# create must ServiceAccount and RBAC ClusterRole/object $kubectl ClusterRoleBinding resources create -f csi - provisioner - RBAC. Yaml $ kubectl create -f csi-nodeplugin-rbac.yamlCopy the code
PodSecurityPolicy $kubectl create -f csi-provisioner-psp.yaml $kubectl create -f csi-nodeplugin-psp.yamlCopy the code
The deployment of CSI sidecars
Yaml: csi-rbdplugin-provisioner. Yaml: csi-rbdplugin-provisioner. Yaml: csi-rbdplugin-provisioner. 137 #- name: ceph-csi-encryption-kms-config 138 # mountPath: /etc/ceph-csi-encryption-kms-config/ 178 #- name: ceph-csi-encryption-kms-config 179 # configMap: 180 # name: Csi-rbdplugin.yaml 89 # -name: cih-csi-encryption-kms-config 90 # mountPath: /etc/ceph-csi-encryption-kms-config/ 153 #- name: ceph-csi-encryption-kms-config 154 # configMap: 155 # name: ceph-csi-encryption-kms-configCopy the code
- The deployment of csi – rbdplugin – provisioner
Kubectl create -f csi-rbdplugin-provisioner. Yaml # These include external-provisioner, external-attacher, CSI-resizer, and CSI-RbdPlugin.Copy the code
- Deploy the RBD CSI driver
Kubectl ceph-CSI create-f CSI-rbdplugin. yaml # Pod contains the CSI node-driver-Registrar and csi RBD driverCopy the code
[root@master kubernetes]# kubectl get pod |grep csi-rbdplugin
csi-rbdplugin-c28gp 3/3 Running 0 8h
csi-rbdplugin-provisioner-7dfbc5fc7d-frwxz 6/6 Running 0 8h
csi-rbdplugin-provisioner-7dfbc5fc7d-q5ljg 6/6 Running 0 8h
csi-rbdplugin-provisioner-7dfbc5fc7d-v8v49 6/6 Running 0 8h
csi-rbdplugin-x5rkk 3/3 Running 0 8h
Copy the code
- Create Storageclass
[root@master kubernetes]# cat storageclass.yaml apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: csi-rbd-sc provisioner: rbd.csi.ceph.com parameters: clusterID: 98564ee8-31bc-4ed6-9c31-cee1e15eca8c pool: kube imageFeatures: layering csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret csi.storage.k8s.io/provisioner-secret-namespace: default csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret csi.storage.k8s.io/controller-expand-secret-namespace: default csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret csi.storage.k8s.io/node-stage-secret-namespace: default csi.storage.k8s.io/fstype: ext4 reclaimPolicy: Delete allowVolumeExpansion: true mountOptions: - Discard # the clusterID corresponds to fSID # imageFeatures, which is used to determine the created image feature. If not specified, the feature list in the RBD kernel will be used. kubectl apply -f storgeclass.yaml [root@master kubernetes]# kubectl get sc |grep csi csi-rbd-sc rbd.csi.ceph.com Delete Immediate true 8hCopy the code
Try out ceph – csi
Kubernetes provides users and administrators with a set of apis that abstract the details of how storage is supplied from how it is used through the persistentVolume subsystem, where PV (persistentVolume) is the actual storage, PVC (PersistentVolumeClaim) is a user’s request for storage.
CD ceph-CSI /examples/ RBD # create PVC, Yaml [root@master RBD]# cat pvc.yaml -- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: rbd-pvc spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi storageClassName: csi-rdD-sc # create pod kubectl apply-f pod. Yaml [root@master RBD]# cat pod. Yaml -- apiVersion: v1 kind: Pod metadata: name: csi-rbd-demo-pod spec: containers: - name: web-server image: nginx volumeMounts: - name: mypvc mountPath: /var/lib/www/html volumes: - name: mypvc persistentVolumeClaim: claimName: rbd-pvc readOnly: falseCopy the code
Kubectl get PVC,pvCopy the code
- Pod test
kubectl exec -it csi-rbd-demo-pod bash
root@csi-rbd-demo-pod:/# cd /var/lib/www/
root@csi-rbd-demo-pod:/var/lib/www# ls -l
total 4
drwxrwxrwx 3 root root 4096 Sep 14 09:09 html
root@csi-rbd-demo-pod:/var/lib/www# echo "https://fuckcloudnative.io" > sealos.txt
root@csi-rbd-demo-pod:/var/lib/www# cat sealos.txt
https://fuckcloudnative.io
Copy the code
- View RBD images in ceph Pool
[root@master rbd]# rbd ls -p kube csi-vol-60e0cf1b-fd5d-11ea-82ec-c2a930d7d7e0 Csi-vol-928bae1f-fd5e-11ea-82ec-c2a930d7d7e0 # 2 because the previous capacity expansion occupied a [root@master RBD]# RBD info csi-vol-60e0cf1b-fd5d-11ea-82ec-c2a930d7d7e0 -p kube rbd image 'csi-vol-60e0cf1b-fd5d-11ea-82ec-c2a930d7d7e0': size 2048 MB in 512 objects order 22 (4096 kB objects) block_name_prefix: rbd_data.25a0fa6552204 format: 2 features: layering flags: [root@master rbd]# rbd info csi-vol-928bae1f-fd5e-11ea-82ec-c2a930d7d7e0 -p kube rbd image 'csi-vol-928bae1f-fd5e-11ea-82ec-c2a930d7d7e0': size 3072 MB in 768 objects order 22 (4096 kB objects) block_name_prefix: rbd_data.25a0fb1419ae9 format: 2 features: # You can see that the feature restrictions on the image are in effect, just layering. In fact, the image will be mounted to node as a block device. You can use the RBD command to view the mapping information: [root@node1 ~]# rbd showmapped id pool image snap device 0 kube csi-vol-928bae1f-fd5e-11ea-82ec-c2a930d7d7e0 - /dev/rbd0 [root@node1 ~]# lsblk -l |grep rbd rbd0 253:0 0 3G 0 disk /var/lib/kubelet/pods/a6d918de-526b-43c0-ae2e-1a271dd4d471/volumes/kubernetes.io~csi/pvc-ca05d200-f7f7-4b1a-ac05-7be3eb7 1bd6f/mountCopy the code
Expansion of PVC
- Extension of PVC
# directly modify the yaml file, amend the storage size Numbers after the apply # or kubectl csi - RBD - edit PVC PVC, edit PVC, spec. Resource. Requests. Storage, modify the exit.Copy the code
- Scale effect
The CSI driver will automatically perform capacity expansion on the storage side and observe changes in the PVC state, which usually takes a few secondsCopy the code
You can also enter pod to check whether the mount point expansion has taken effectCopy the code