Introduction to the

This article describes how glusterFS can be used to dynamically apply PV for K8S. Glusterfs provides underlying storage functions, and Heketi provides Restful apis for Managing GlusterFS. Three access modes for K8S PV: ReadWriteOnce, ReadOnlyMany, and ReadWriteMany

The access pattern is only a capability description and is not enforced, and the storage provider is responsible for running errors when accessing a PV that is not used as declared by the PVC. For example, if the PVC access mode is set to ReadOnlyMany, the POD can still be writable after mounting. If the WRITable mode is required, you need to specify readOnly: true when applying for a PVC

The installation

The Vagrantfile for the experiment

# -*- mode: ruby -*-
# vi: set ft=ruby :

ENV["LC_ALL"] = "en_US.UTF-8"

Vagrant.configure("2") do|config| (1.. 3).eachdo |i|
      config.vm.define "lab#{i}" do |node|
        node.vm.box = "Centos - 7.4 - the docker - 17"
        node.ssh.insert_key = false
        node.vm.hostname = "lab#{i}"
        node.vm.network "private_network", ip: "11.11.11.11 # {I}"
        node.vm.provision "shell",
          inline: "echo hello from node #{i}"
        node.vm.provider "virtualbox" do |v|
          v.cpus = 2
          v.customize ["modifyvm", :id, "--name"."lab#{i}"."--memory"."3096"]
          file_to_disk = "lab#{i}_vdb.vdi"unless File.exist? (file_to_disk)# 50GB
            v.customize ['createhd'.'--filename', file_to_disk, '--size', 50 * 1024]
          end
          v.customize ['storageattach', :id, '--storagectl'.'IDE'.'--port', 1, '--device', 0, '--type'.'hdd'.'--medium', file_to_disk]
        end
      end
    end
end
Copy the code

Environment Configuration Description

Install the DM_THIN_pool module on each node
modprobe dm_thin_pool

Configure to enable self-loading
cat >/etc/modules-load.d/glusterfs.conf<<EOF
dm_thin_pool
EOF

# glusterfs installation - the fuse
yum install -y glusterfs-fuse
Copy the code

Install GlusterFS with Heketi

Install heketi client
# https://github.com/heketi/heketi/releases
# Go to Github and download the versionWget https://github.com/heketi/heketi/releases/download/v7.0.0/heketi-client-v7.0.0.linux.amd64.tar.gz tar xf Heketi - the client - v7.0.0. Linux. Amd64. Tar. Gz cp heketi - client/bin/heketi - cli/usr /local/bin

# check version
heketi-cli -v

The following deployment steps are performed in the following directory
cd heketi-client/share/heketi/kubernetes

# Deploy GlusterFS in K8S
kubectl create -f glusterfs-daemonset.json

# Check node
kubectl get nodes

Label the node that provides storage nodes
kubectl label node lab1 lab2 lab3 storagenode=glusterfs

# Check glusterFS status
kubectl get pods -o wide

# Deploy Heketi Server
Configure heketi server permissions
kubectl create -f heketi-service-account.json
kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account

Create cofig Secret
kubectl create secret generic heketi-config-secret --from-file=./heketi.json

Initialize the deployment
kubectl create -f heketi-bootstrap.json

Check the heketi bootstrap status
kubectl get pods -o wide
kubectl get svc

# Configure port forwarding heketi Server
HEKETI_BOOTSTRAP_POD=$(kubectl get pods | grep deploy-heketi | awk '{print $1}')
kubectl port-forward $HEKETI_BOOTSTRAP_PODA 58080-8080# test access
# Start another terminal
curl http://localhost:58080/hello

# configuration glusterfs
The hostNames/Manage field must be the same as kubectl get node
This experiment uses the same IP address as the K8S cluster
cat >topology.json<<EOF
{
  "clusters": [{"nodes": [{"node": {
            "hostnames": {
              "manage": [
                "lab1"]."storage": [
                "11.11.11.111"]},"zone": 1}."devices": [{"name": "/dev/sdb"."destroydata": false}]}, {"node": {
            "hostnames": {
              "manage": [
                "lab2"]."storage": [
                "11.11.11.112"]},"zone": 1}."devices": [{"name": "/dev/sdb"."destroydata": false}]}, {"node": {
            "hostnames": {
              "manage": [
                "lab3"]."storage": [
                "11.11.11.113"]},"zone": 1}."devices": [{"name": "/dev/sdb"."destroydata": false
            }
          ]
        }
      ]
    }
  ]
}
EOF
export HEKETI_CLI_SERVER=http://localhost:58080
heketi-cli topology load --json=topology.json

Create a volume to store the Heketi database
heketi-cli setup-openshift-heketi-storage
kubectl create -f heketi-storage.json

# check status
The status changes to Completed when all jobs are Completed
The following steps can be performed
kubectl get pods
kubectl get job

Delete resources generated during deployment
kubectl delete all,service,jobs,deployment,secret --selector="deploy-heketi"

# Deploy Heketi Server
kubectl create -f heketi-deployment.json

Check the heketi server status
kubectl get pods -o wide
kubectl get svc

# check heketi status
# Configure port forwarding heketi Server
HEKETI_BOOTSTRAP_POD=$(kubectl get pods | grep heketi | awk '{print $1}')
kubectl port-forward $HEKETI_BOOTSTRAP_PODA 58080-8080export HEKETI_CLI_SERVER=http://localhost:58080
heketi-cli cluster list
heketi-cli volume list
Copy the code

test

# to create StorageClass
# Authentication is not enabled
# restUser RestUserKey can be written arbitrarily
HEKETI_SERVER=$(kubectl get svc | grep heketi | head -1 | awk '{print $3}')
echo $HEKETI_SERVER
cat >storageclass-glusterfs.yaml<<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
  name: gluster-heketi
provisioner: kubernetes.io/glusterfs
parameters:
  resturl: "http://$HEKETI_SERVER: 8080"
  restauthenabled: "false"
  restuser: "will"
  restuserkey: "will"
  gidMin: "40000"
  gidMax: "50000"
  volumetype: "replicate:3"
EOF
kubectl create -f storageclass-glusterfs.yaml

# check
kubectl get sc

Create a PVC test
cat >gluster-pvc-test.yaml<<EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
 name: gluster1
 annotations:
   volume.beta.kubernetes.io/storage-class: gluster-heketi
spec:
 accessModes:
  - ReadWriteOnce
 resources:
   requests:
     storage: 5Gi
EOF
kubectl apply -f gluster-pvc-test.yaml
 
# check
kubectl get pvc
kubectl get pv
 
Create nginx pod mount test
cat >nginx-pod.yaml<<EOF
apiVersion: v1
kind: Pod
metadata:
  name: nginx-pod1
  labels:
    name: nginx-pod1
spec:
  containers:
  - name: nginx-pod1
    image: nginx:alpine
    ports:
    - name: web
      containerPort: 80
    volumeMounts:
    - name: gluster-vol1
      mountPath: /usr/share/nginx/html
  volumes:
  - name: gluster-vol1
    persistentVolumeClaim:
      claimName: gluster1
EOF
kubectl apply -f nginx-pod.yaml
 
# check
kubectl get pods -o wide
 
# Modify file contents
kubectl exec -ti nginx-pod1 -- /bin/sh -c 'echo Hello World from GlusterFS!!! > /usr/share/nginx/html/index.html'
 
# Access test
POD_ID=$(kubectl get pods -o wide | grep nginx-pod1 | awk '{print $(NF-1)}')
curl http://$POD_ID
 
# node view the file contents
GLUSTERFS_POD=$(kubectl get pod | grep glusterfs | head -1 | awk '{print $1}')
kubectl exec -ti $GLUSTERFS_POD /bin/sh
mount | grep heketi
cat /var/lib/heketi/mounts/vg_56033aa8a9131e84faa61a6f4774d8c3/brick_1ac5f3a0730457cf3fcec6d881e132a2/brick/index.html
Copy the code

Reference documentation

  • Github.com/heketi/heke…
  • Github.com/gluster/glu…
  • Github.com/gluster/glu…
  • Jimmysong. IO/kubernetes -…
  • Kubernetes. IO/docs/concep…
  • Docs.openshift.com/enterprise/…