Basic Environment Configuration
# centos
yum install iscsi-initiator-utils
# ubuntu
apt-get install open-iscsi
Copy the code
Install the longhorn – system
apiVersion: v1
kind: Namespace
metadata:
name: longhorn-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: longhorn-service-account
namespace: longhorn-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: longhorn-role
rules:
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- "*"
- apiGroups: [""]
resources: ["pods", "events", "persistentvolumes", "persistentvolumeclaims","persistentvolumeclaims/status", "nodes", "proxy/nodes", "pods/log", "secrets", "services", "endpoints", "configmaps"]
verbs: ["*"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: ["apps"]
resources: ["daemonsets", "statefulsets", "deployments"]
verbs: ["*"]
- apiGroups: ["batch"]
resources: ["jobs", "cronjobs"]
verbs: ["*"]
- apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"]
verbs: ["watch", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses", "volumeattachments", "csinodes", "csidrivers"]
verbs: ["*"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: ["longhorn.io"]
resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings",
"engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status"]
verbs: ["*"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: longhorn-bind
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: longhorn-role
subjects:
- kind: ServiceAccount
name: longhorn-service-account
namespace: longhorn-system
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
longhorn-manager: Engine
name: engines.longhorn.io
spec:
group: longhorn.io
names:
kind: Engine
listKind: EngineList
plural: engines
shortNames:
- lhe
singular: engine
scope: Namespaced
version: v1beta1
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
longhorn-manager: Replica
name: replicas.longhorn.io
spec:
group: longhorn.io
names:
kind: Replica
listKind: ReplicaList
plural: replicas
shortNames:
- lhr
singular: replica
scope: Namespaced
version: v1beta1
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
longhorn-manager: Setting
name: settings.longhorn.io
spec:
group: longhorn.io
names:
kind: Setting
listKind: SettingList
plural: settings
shortNames:
- lhs
singular: setting
scope: Namespaced
version: v1beta1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
longhorn-manager: Volume
name: volumes.longhorn.io
spec:
group: longhorn.io
names:
kind: Volume
listKind: VolumeList
plural: volumes
shortNames:
- lhv
singular: volume
scope: Namespaced
version: v1beta1
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
longhorn-manager: EngineImage
name: engineimages.longhorn.io
spec:
group: longhorn.io
names:
kind: EngineImage
listKind: EngineImageList
plural: engineimages
shortNames:
- lhei
singular: engineimage
scope: Namespaced
version: v1beta1
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
longhorn-manager: Node
name: nodes.longhorn.io
spec:
group: longhorn.io
names:
kind: Node
listKind: NodeList
plural: nodes
shortNames:
- lhn
singular: node
scope: Namespaced
version: v1beta1
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
longhorn-manager: InstanceManager
name: instancemanagers.longhorn.io
spec:
group: longhorn.io
names:
kind: InstanceManager
listKind: InstanceManagerList
plural: instancemanagers
shortNames:
- lhim
singular: instancemanager
scope: Namespaced
version: v1beta1
subresources:
status: {}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: longhorn-default-setting
namespace: longhorn-system
data:
default-setting.yaml: |-
backup-target:
backup-target-credential-secret:
create-default-disk-labeled-nodes:
default-data-path:
replica-soft-anti-affinity:
storage-over-provisioning-percentage:
storage-minimal-available-percentage:
upgrade-checker:
default-replica-count:
guaranteed-engine-cpu:
default-longhorn-static-storage-class:
backupstore-poll-interval:
taint-toleration:
priority-class:
registry-secret:
auto-salvage:
disable-scheduling-on-cordoned-node:
replica-zone-soft-anti-affinity:
volume-attachment-recovery-policy:
mkfs-ext4-parameters:
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: longhorn-psp
spec:
privileged: true
allowPrivilegeEscalation: true
requiredDropCapabilities:
- NET_RAW
allowedCapabilities:
- SYS_ADMIN
hostNetwork: false
hostIPC: false
hostPID: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
fsGroup:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- configMap
- downwardAPI
- emptyDir
- secret
- projected
- hostPath
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: longhorn-psp-role
namespace: longhorn-system
rules:
- apiGroups:
- policy
resources:
- podsecuritypolicies
verbs:
- use
resourceNames:
- longhorn-psp
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: longhorn-psp-binding
namespace: longhorn-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: longhorn-psp-role
subjects:
- kind: ServiceAccount
name: longhorn-service-account
namespace: longhorn-system
- kind: ServiceAccount
name: default
namespace: longhorn-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: longhorn-manager
name: longhorn-manager
namespace: longhorn-system
spec:
selector:
matchLabels:
app: longhorn-manager
template:
metadata:
labels:
app: longhorn-manager
spec:
containers:
- name: longhorn-manager
image: longhornio/longhorn-manager:v1.0.2
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
command:
- longhorn-manager
- -d
- daemon
- --engine-image
- longhornio/longhorn-engine:v1.0.2
- --instance-manager-image
- longhornio/longhorn-instance-manager:v1_20200514
- --manager-image
- longhornio/longhorn-manager:v1.0.2
- --service-account
- longhorn-service-account
ports:
- containerPort: 9500
name: manager
readinessProbe:
tcpSocket:
port: 9500
volumeMounts:
- name: dev
mountPath: /host/dev/
- name: proc
mountPath: /host/proc/
- name: varrun
mountPath: /var/run/
mountPropagation: Bidirectional
- name: longhorn
mountPath: /var/lib/longhorn/
mountPropagation: Bidirectional
- name: longhorn-default-setting
mountPath: /var/lib/longhorn-setting/
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Should be: mount path of the volume longhorn-default-setting + the key of the configmap data in 04-default-setting.yaml
- name: DEFAULT_SETTING_PATH
value: /var/lib/longhorn-setting/default-setting.yaml
volumes:
- name: dev
hostPath:
path: /dev/
- name: proc
hostPath:
path: /proc/
- name: varrun
hostPath:
path: /var/run/
- name: longhorn
hostPath:
path: /var/lib/longhorn/
- name: longhorn-default-setting
configMap:
name: longhorn-default-setting
# imagePullSecrets:
# - name: ""
serviceAccountName: longhorn-service-account
updateStrategy:
rollingUpdate:
maxUnavailable: "100%"
---
apiVersion: v1
kind: Service
metadata:
labels:
app: longhorn-manager
name: longhorn-backend
namespace: longhorn-system
spec:
type: ClusterIP
sessionAffinity: ClientIP
selector:
app: longhorn-manager
ports:
- name: manager
port: 9500
targetPort: manager
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: longhorn-ui
name: longhorn-ui
namespace: longhorn-system
spec:
replicas: 1
selector:
matchLabels:
app: longhorn-ui
template:
metadata:
labels:
app: longhorn-ui
spec:
containers:
- name: longhorn-ui
image: longhornio/longhorn-ui:v1.0.2
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 0
ports:
- containerPort: 8000
name: http
env:
- name: LONGHORN_MANAGER_IP
value: "http://longhorn-backend:9500"
# imagePullSecrets:
# - name:
---
kind: Service
apiVersion: v1
metadata:
labels:
app: longhorn-ui
name: longhorn-frontend
namespace: longhorn-system
spec:
type: ClusterIP
selector:
app: longhorn-ui
ports:
- name: http
port: 80
targetPort: http
nodePort: null
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: longhorn-driver-deployer
namespace: longhorn-system
spec:
replicas: 1
selector:
matchLabels:
app: longhorn-driver-deployer
template:
metadata:
labels:
app: longhorn-driver-deployer
spec:
initContainers:
- name: wait-longhorn-manager
image: longhornio/longhorn-manager:v1.0.2
command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done']
containers:
- name: longhorn-driver-deployer
image: longhornio/longhorn-manager:v1.0.2
imagePullPolicy: IfNotPresent
command:
- longhorn-manager
- -d
- deploy-driver
- --manager-image
- longhornio/longhorn-manager:v1.0.2
- --manager-url
- http://longhorn-backend:9500/v1
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
# Manually set root directory for csi
#- name: KUBELET_ROOT_DIR
# value: /var/lib/rancher/k3s/agent/kubelet
# For AirGap Installation
# Replace PREFIX with your private registery
#- name: CSI_ATTACHER_IMAGE
# value: PREFIX/csi-attacher:v2.0.0
#- name: CSI_PROVISIONER_IMAGE
# value: PREFIX/csi-provisioner:v1.4.0
#- name: CSI_NODE_DRIVER_REGISTRAR_IMAGE
# value: PREFIX/csi-node-driver-registrar:v1.2.0
#- name: CSI_RESIZER_IMAGE
# value: PREFIX/csi-resizer:v0.3.0
# Manually specify number of CSI attacher replicas
#- name: CSI_ATTACHER_REPLICA_COUNT
# value: "3"
# Manually specify number of CSI provisioner replicas
#- name: CSI_PROVISIONER_REPLICA_COUNT
# value: "3"
#- name: CSI_RESIZER_REPLICA_COUNT
# value: "3"
#imagePullSecrets:
#- name:
serviceAccountName: longhorn-service-account
securityContext:
runAsUser: 0
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn
provisioner: driver.longhorn.io
allowVolumeExpansion: true
parameters:
numberOfReplicas: "2"
staleReplicaTimeout: "2880"
fromBackup: ""
# diskSelector: "ssd,fast"
# nodeSelector: "storage,fast"
# recurringJobs: '[{"name":"snap", "task":"snapshot", "cron":"*/1 * * * *", "retain":1},
# {"name":"backup", "task":"backup", "cron":"*/2 * * * *", "retain":1,
# "labels": {"interval":"2m"}}]'
Copy the code
Note:
- NumberOfReplicas in StorageClass is the same as the number of nodes
- /var/lib/longhorn in volume in Daemonset serves as a mount point for distributed storage and can be modified as required
(base) [root@node46 longhorn]# kubectl get pod -n longhorn-system
NAME READY STATUS RESTARTS AGE
csi-attacher-7cb499df6-c4xj9 1/1 Running 0 84m
csi-attacher-7cb499df6-fcxzl 1/1 Running 0 84m
csi-attacher-7cb499df6-rszfs 1/1 Running 0 84m
csi-provisioner-67846b4b55-28h9g 1/1 Running 0 84m
csi-provisioner-67846b4b55-6gv8b 1/1 Running 0 84m
csi-provisioner-67846b4b55-pn89w 1/1 Running 0 84m
csi-resizer-5cb8df7db9-ftd5p 1/1 Running 0 84m
csi-resizer-5cb8df7db9-mq29s 1/1 Running 0 84m
csi-resizer-5cb8df7db9-spgbv 1/1 Running 0 84m
engine-image-ei-ee18f965-q95kd 1/1 Running 0 84m
engine-image-ei-ee18f965-tgwfq 1/1 Running 0 84m
instance-manager-e-a359f588 1/1 Running 0 84m
instance-manager-e-d7d8a07a 1/1 Running 0 84m
instance-manager-r-5d0b3eac 1/1 Running 0 84m
instance-manager-r-afd7923f 1/1 Running 0 84m
longhorn-csi-plugin-6g92n 2/2 Running 0 84m
longhorn-csi-plugin-hsk9f 2/2 Running 0 84m
longhorn-driver-deployer-6b7d76659f-699mx 1/1 Running 0 84m
longhorn-manager-7gk8f 1/1 Running 0 84m
longhorn-manager-td4tm 1/1 Running 1 84m
longhorn-ui-68b99bd456-28npc 1/1 Running 0 84m
(base) [root@node46 longhorn]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
longhorn driver.longhorn.io Delete Immediate true 84m
(base) [root@node46 longhorn]#
Copy the code
Test the longhorn
pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-pv-claim
labels:
type: longhorn
app: example
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
Copy the code
mysql.yaml
apiVersion: apps/v1 kind: Deployment metadata: name: my-mysql labels: app: example spec: selector: matchLabels: app: example tier: mysql strategy: type: Recreate template: metadata: labels: app: example tier: mysql spec: containers: - image: mysql:5.6 name: mysql env: -name: MYSQL_ROOT_PASSWORD value: password ports: -containerport: 3306 name: mysql volumeMounts: - name: mysql-persistent-storage mountPath: /var/lib/mysql volumes: - name: mysql-persistent-storage persistentVolumeClaim: claimName: mysql-pv-claimCopy the code
The results of
(base) [root@node46 longhorn]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
mysql-pv-claim Bound pvc-b52c4ed0-993f-4a21-a5ca-4633f5d9d2ee 5Gi RWO longhorn 84m
(base) [root@node46 longhorn]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-b52c4ed0-993f-4a21-a5ca-4633f5d9d2ee 5Gi RWO Delete Bound default/mysql-pv-claim longhorn 84m
(base) [root@node46 longhorn]# kubectl get pod
NAME READY STATUS RESTARTS AGE
my-mysql-689bccf96b-bgnqc 1/1 Running 0 55m
[root@node41 ~]# cd /var/lib/longhorn/
engine-binaries/ longhorn-disk.cfg replicas/
[root@node41 ~]# ls /var/lib/longhorn/replicas/pvc-b52c4ed0-993f-4a21-a5ca-4633f5d9d2ee-0f45bbb0/
revision.counter volume-head-000.img volume-head-000.img.meta volume.meta
[root@node41 ~]#
Copy the code
Note:
- Longhorn is distributed block storage, different from distributed file system (MFS, LizardFS), different from storage size beyond THE PV set (5g in the example above)
- Use the dd command to test (bs=1m,count=1024 or BS = 500K,count=2048) and find that the storage performance of Longhorn is not as good as that of the local disk
- Longhorn performance significantly outperforms nfS-client-provisioner (2-3 times)
Uninstall the longhorn
uninstall.yaml
apiVersion: v1 kind: ServiceAccount metadata: name: longhorn-uninstall-service-account namespace: default --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: longhorn-uninstall-role rules: - apiGroups: - apiextensions.k8s.io resources: - customresourcedefinitions verbs: - "*" - apiGroups: [""] resources: ["pods", "persistentvolumes", "persistentvolumeclaims", "nodes"] verbs: ["*"] - apiGroups: ["apps"] resources: ["daemonsets", "statefulsets", "deployments"] verbs: ["*"] - apiGroups: ["batch"] resources: ["jobs", "cronjobs"] verbs: ["*"] - apiGroups: ["scheduling.k8s.io"] resources: ["priorityclasses"] verbs: ["watch", "list"] - apiGroups: ["storage.k8s.io"] resources: ["csidrivers"] verbs: ["*"] - apiGroups: ["longhorn.io"] resources: ["volumes", "engines", "replicas", "settings", "engineimages", "nodes", "instancemanagers"] verbs: ["*"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: longhorn-uninstall-bind roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: longhorn-uninstall-role subjects: - kind: ServiceAccount name: longhorn-uninstall-service-account namespace: default --- apiVersion: batch/v1 kind: Job metadata: name: longhorn-uninstall namespace: default spec: activeDeadlineSeconds: 900 backoffLimit: 1 template: metadata: name: longhorn-uninstall spec: containers: - name: Longhorn-uninstall Image: Longhornio/Longhorn-Manager :v1.0.2 imagePullPolicy: IfNotPresent Command: - longhorn-manager - uninstall - --force env: - name: LONGHORN_NAMESPACE value: longhorn-system restartPolicy: OnFailure serviceAccountName: longhorn-uninstall-service-accountCopy the code
uninstall
kubectl apply -f uninstall.yaml kubectl get job/longhorn-uninstall ... Run kubectl delete -f longhorn.yaml kubectl delete -f uninstall.yaml after the job task is completeCopy the code