使用kubernetes方式部署glusterfs作为kubernetes持久化文件存储
准备四台虚拟机,系统使用centos7,并安装配置好kubernetes集群环境,本次实验使用kubeadm来安装单master多node集群。所有工作节点新挂在一块硬盘,不需要格式化。[root@k8s-node1 ~]# lsblkNAMEMAJ:MIN RMSIZE RO TYPE MOUNTPOINTsda8:00100G0 disk├─sda18:10300M0 part /boot├─
准备四台虚拟机,系统使用centos7,并安装配置好kubernetes集群环境,本次实验使用kubeadm来安装单master多node集群。所有工作节点新挂在一块硬盘,不需要格式化。
[root@k8s-node1 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 100G 0 disk
├─sda1 8:1 0 300M 0 part /boot
├─sda2 8:2 0 3.9G 0 part
└─sda3 8:3 0 95.9G 0 part /
sdb 8:16 0 100G 0 disk
sr0 11:0 1 1024M 0 rom
IP | 主机名 | 功能描述 |
192.168.87.100 | k8s-master | k8s控制平面 |
192.168.87.101 | k8s-node1 | k8s工作节点 |
192.168.87.102 | k8s-node2 | k8s工作节点 |
192.168.87.103 | k8s-node3 | k8s工作节点 |
1、所有节点安装glusterfs客户端
yum install -y glusterfs glusterfs-fuse -y
2、三个工作节点打上对应的标签
kubectl label node k8s-node1 storagenode=glusterfs
kubectl label node k8s-node2 storagenode=glusterfs
kubectl label node k8s-node3 storagenode=glusterfs
3、所以节点加载对应模块
#加载模块
modprobe dm_snapshot
modprobe dm_mirror
modprobe dm_thin_pool
#验证模块是否加载成功
lsmod | grep dm_snapshot
lsmod | grep dm_mirror
lsmod | grep dm_thin_pool
4、下载heketi安装包,
安装包包含了glusterfs安装必备的一些yaml文件,下载后上传到master节点,并解压缩。所有的yaml文件默认都没指定namespace, 也可以自己指定名称空间。
https://github.com/heketi/heketi/releases/download/v10.4.0/heketi-client-v10.4.0-release-10.linux.amd64.tar.gzhttps://github.com/heketi/heketi/releases/download/v10.4.0/heketi-client-v10.4.0-release-10.linux.amd64.tar.gz
tar -zxvf heketi-client-v10.4.0-release-10.linux.amd64.tar.gz
cd heketi-client/share/heketi/kubernetes/
5、 创建glusterfs集群
kubectl apply -f glusterfs-daemonset.json
[root@k8s-master kubernetes]# kubectl get pods
NAME READY STATUS RESTARTS AGE
glusterfs-btgph 1/1 Running 0 2m54s
glusterfs-gn58j 1/1 Running 0 2m54s
glusterfs-q4p7j 1/1 Running 0 2m54s
6、创建heketi服务
创建serviceaccount
kubectl apply -f heketi-service-account.json
绑定heketi的sa账号对应的权限绑定和secret,创建secret之前先修改heketi.json文件,主要修改了admin和user的密码
{
"_port_comment": "Heketi Server Port Number",
"port": "8080",
"_use_auth": "Enable JWT authorization. Please enable for deployment",
"use_auth": false,
"_jwt": "Private keys for access",
"jwt": {
"_admin": "Admin has access to all APIs",
"admin": {
"key": "123456"
},
"_user": "User only has access to /volumes endpoint",
"user": {
"key": "123456"
}
},
"_glusterfs_comment": "GlusterFS Configuration",
"glusterfs": {
"_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
"executor": "kubernetes",
"_db_comment": "Database file name",
"db": "/var/lib/heketi/heketi.db",
"kubeexec": {
"rebalance_on_expansion": true
},
"sshexec": {
"rebalance_on_expansion": true,
"keyfile": "/etc/heketi/private_key",
"fstab": "/etc/fstab",
"port": "22",
"user": "root",
"sudo": false
}
},
"_backup_db_to_kube_secret": "Backup the heketi database to a Kubernetes secret when running in Kubernetes. Default is off.",
"backup_db_to_kube_secret": false
}
kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=cluster-admin --serviceaccount=default:heketi-service-account
kubectl create secret generic heketi-config-secret --from-file=./heketi.json
初始化部署heketi
[root@k8s-master kubernetes]# kubectl create -f heketi-bootstrap.json
[root@k8s-master kubernetes]# kubectl get pods
NAME READY STATUS RESTARTS AGE
deploy-heketi-974d59f8c-4x29c 1/1 Running 0 20s
glusterfs-btgph 1/1 Running 0 19m
glusterfs-gn58j 1/1 Running 0 19m
glusterfs-q4p7j 1/1 Running 0 19m
7、安装heketi-cli
[root@k8s-master bin]# pwd
/root/glusterfs/heketi-client/bin
[root@k8s-master bin]# ls
heketi-cli
[root@k8s-master bin]# cp heketi-cli /usr/local/bin/
[root@k8s-master bin]# heketi-cli -v
heketi-cli v10.4.0-release-10
把heketi-cli复制到工作节点
scp /usr/local/bin/heketi-cli k8s-node1:/usr/local/bin/
scp /usr/local/bin/heketi-cli k8s-node2:/usr/local/bin/
scp /usr/local/bin/heketi-cli k8s-node3:/usr/local/bin/
8、配置topology-sample.json
cd heketi-client/share/heketi/kubernetes/
{
"clusters": [
{
"nodes": [
{
"node": {
"hostnames": {
"manage": [
"k8s-node1"
],
"storage": [
"192.168.87.101"
]
},
"zone": 1
},
"devices": [
{
"name": "/dev/sdb",
"destroydata": true
}
]
},
{
"node": {
"hostnames": {
"manage": [
"k8s-node2"
],
"storage": [
"192.168.87.102"
]
},
"zone": 1
},
"devices": [
{
"name": "/dev/sdb",
"destroydata": true
}
]
},
{
"node": {
"hostnames": {
"manage": [
"k8s-node3"
],
"storage": [
"192.168.87.103"
]
},
"zone": 1
},
"devices": [
{
"name": "/dev/sdb",
"destroydata": true
}
]
}
]
}
]
}
9、获取当前heketi的ClusterIP
[root@k8s-master kubernetes]# kubectl get svc|grep heketi
deploy-heketi ClusterIP 10.111.162.155 <none> 8080/TCP 9m43s
[root@k8s-master kubernetes]# curl http://10.111.162.155:8080/hello
Hello from Heketi
[root@k8s-master kubernetes]# export HEKETI_CLI_SERVER=http://10.111.162.155:8080
[root@k8s-master kubernetes]# echo $HEKETI_CLI_SERVER
http://10.111.162.155:8080
10、使用heketi创建glusterfs集群
[root@k8s-master kubernetes]# heketi-cli -s $HEKETI_CLI_SERVER --user admin --secret '123456' topology load --json=topology-sample.json
Creating cluster ... ID: 954e388856aca0b82f3e8e65913229f1
Allowing file volumes on cluster.
Allowing block volumes on cluster.
Creating node k8s-node1 ... ID: 183d04f5e7ab60db3ccef563f173637d
Adding device /dev/sdb ... OK
Creating node k8s-node2 ... ID: 6c30ca747bdba21548ed6fc034c58c8b
Adding device /dev/sdb ... OK
Creating node k8s-node3 ... ID: bb53dc577a1615abb1c78bee9e25d075
Adding device /dev/sdb ... OK
[root@k8s-master kubernetes]# heketi-cli -s $HEKETI_CLI_SERVER --user admin --secret '123456' topology info
11、持久化heketi配置
所有节点安装:
yum install -y device-mapper*
获取持久化配置的k8s资源文件
[root@k8s-master kubernetes]# heketi-cli -s $HEKETI_CLI_SERVER --user admin --secret '123456' setup-openshift-heketi-storage Saving heketi-storage.json
Saving heketi-storage.json
执行k8s资源文件
[root@k8s-master kubernetes]# kubectl apply -f heketi-storage.json
secret/heketi-storage-secret created
endpoints/heketi-storage-endpoints created
service/heketi-storage-endpoints created
job.batch/heketi-storage-copy-job created
删除中间产物
kubectl delete all,svc,jobs,deployment,secret --selector="deploy-heketi"
创建持久配置heketi
[root@k8s-master kubernetes]# kubectl apply -f heketi-deployment.json
secret/heketi-db-backup created
service/heketi created
deployment.apps/heketi created
查看持久化后heketi的svc,并重新声明环境变量
[root@k8s-master kubernetes]# kubectl get svc|grep heketi
heketi ClusterIP 10.101.246.205 <none> 8080/TCP 54s
heketi-storage-endpoints ClusterIP 10.102.127.45 <none> 1/TCP 117s
You have new mail in /var/spool/mail/root
[root@k8s-master kubernetes]# curl http://10.101.246.205:8080/hello
Hello from Heketi
[root@k8s-master kubernetes]# export HEKETI_CLI_SERVER=http://10.101.246.205:8080
[root@k8s-master kubernetes]# echo $HEKETI_CLI_SERVER
http://10.101.246.205:8080
查看集群信息
oot@k8s-master kubernetes]# heketi-cli -s $HEKETI_CLI_SERVER --user admin --secret '123456' topology info
Cluster Id: 86b78d5c5eb4fda20f5626188d46b77f
File: true
Block: true
Volumes:
Name: heketidbstorage
Size: 2
Id: 3e57d9407c10797ad062826f9b56010b
Cluster Id: 86b78d5c5eb4fda20f5626188d46b77f
Mount: 192.168.87.101:heketidbstorage
Mount Options: backup-volfile-servers=192.168.87.103,192.168.87.102
Durability Type: replicate
Replica: 3
Snapshot: Disabled
Bricks:
Id: e7461e38b5aabb14578d6f839f574852
Path: /var/lib/heketi/mounts/vg_90684d0d12a5d9872cd582493fff4c4c/brick_e7461e38b5aabb14578d6f839f574852/brick
Size (GiB): 2
Node: 8c00bb155466360217390cfcb88f3e4c
Device: 90684d0d12a5d9872cd582493fff4c4c
Id: f54a19c0655c37e757718795830bdfd8
Path: /var/lib/heketi/mounts/vg_0ba23d64a02d52829bb466379fd4d445/brick_f54a19c0655c37e757718795830bdfd8/brick
Size (GiB): 2
Node: 3124c7acb431d0fd30edd95271fbfe90
Device: 0ba23d64a02d52829bb466379fd4d445
Id: f8de34ac2f34339e00624437e8682e8f
Path: /var/lib/heketi/mounts/vg_6d84568d990154e7aa2617889b11183d/brick_f8de34ac2f34339e00624437e8682e8f/brick
Size (GiB): 2
Node: 9c4635307b289dd66baa0dcd8c5347c4
Device: 6d84568d990154e7aa2617889b11183d
Nodes:
Node Id: 3124c7acb431d0fd30edd95271fbfe90
State: online
Cluster Id: 86b78d5c5eb4fda20f5626188d46b77f
Zone: 1
Management Hostnames: k8s-node1
Storage Hostnames: 192.168.87.101
Devices:
Id:0ba23d64a02d52829bb466379fd4d445 State:online Size (GiB):99 Used (GiB):2 Free (GiB):97
Known Paths: /dev/sdb
Bricks:
Id:f54a19c0655c37e757718795830bdfd8 Size (GiB):2 Path: /var/lib/heketi/mounts/vg_0ba23d64a02d52829bb466379fd4d445/brick_f54a19c0655c37e757718795830bdfd8/brick
Node Id: 8c00bb155466360217390cfcb88f3e4c
State: online
Cluster Id: 86b78d5c5eb4fda20f5626188d46b77f
Zone: 1
Management Hostnames: k8s-node3
Storage Hostnames: 192.168.87.103
Devices:
Id:90684d0d12a5d9872cd582493fff4c4c State:online Size (GiB):99 Used (GiB):2 Free (GiB):97
Known Paths: /dev/sdb
Bricks:
Id:e7461e38b5aabb14578d6f839f574852 Size (GiB):2 Path: /var/lib/heketi/mounts/vg_90684d0d12a5d9872cd582493fff4c4c/brick_e7461e38b5aabb14578d6f839f574852/brick
Node Id: 9c4635307b289dd66baa0dcd8c5347c4
State: online
Cluster Id: 86b78d5c5eb4fda20f5626188d46b77f
Zone: 1
Management Hostnames: k8s-node2
Storage Hostnames: 192.168.87.102
Devices:
Id:6d84568d990154e7aa2617889b11183d State:online Size (GiB):99 Used (GiB):2 Free (GiB):97
Known Paths: /dev/sdb
Bricks:
Id:f8de34ac2f34339e00624437e8682e8f Size (GiB):2 Path: /var/lib/heketi/mounts/vg_6d84568d990154e7aa2617889b11183d/brick_f8de34ac2f34339e00624437e8682e8f/brick
12、创建 storageclass-gfs-heketi.yaml
apiVersion: v1
kind: Secret
metadata:
name: heketi-secret
namespace: kube-system
type: kubernetes.io/glusterfs
data:
key: "MTIzNDU2" #请替换为您自己的密钥。Base64 编码。
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
storageclass.kubesphere.io/supported-access-modes: '["ReadWriteOnce","ReadOnlyMany","ReadWriteMany"]'
name: glusterfs
parameters:
clusterid: "86b78d5c5eb4fda20f5626188d46b77f" #请替换为您自己的 GlusterFS 集群 ID。
gidMax: "50000"
gidMin: "40000"
restauthenabled: "true"
resturl: "http://10.101.246.205:8080" #Gluster REST 服务/Heketi 服务 URL 可按需供应 gluster 存储卷。请替换为您自己的 URL。
restuser: admin
secretName: heketi-secret
secretNamespace: kube-system
volumetype: "replicate:3" #请替换为您自己的存储卷类型。
provisioner: kubernetes.io/glusterfs
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
kubectl apply -f storageclass-gfs-heketi.yaml
13、测试pvc,创建pod-use-pvc.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-use-pvc
spec:
containers:
- name: pod-use-pvc
image: busybox
command:
- sleep
- "3600"
volumeMounts:
- name: gluster-volume
mountPath: "/pv-data"
readOnly: false
volumes:
- name: gluster-volume
persistentVolumeClaim:
claimName: pvc-gluster-heketi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: pvc-gluster-heketi
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "glusterfs"
resources:
requests:
storage: 1Gi
[root@k8s-master kubernetes]# kubectl get pods
NAME READY STATUS RESTARTS AGE
glusterfs-btgph 1/1 Running 0 62m
glusterfs-gn58j 1/1 Running 0 62m
glusterfs-q4p7j 1/1 Running 0 62m
heketi-5dccbd5d49-zprtr 1/1 Running 0 12m
pod-use-pvc 1/1 Running 0 81s
[root@k8s-master kubernetes]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc-gluster-heketi Bound pvc-3f187cad-7952-424c-bff9-6bb0a391055f 1Gi RWO glusterfs 87s
[root@k8s-master kubernetes]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-3f187cad-7952-424c-bff9-6bb0a391055f 1Gi RWO Delete Bound default/pvc-gluster-heketi glusterfs 87s
[root@k8s-master kubernetes]#
更多推荐
所有评论(0)