一、资源准备 (所有机器上执行)

主机名公网 IP私网 IP
k8s-master0139.104.173.77172.24.114.3
k8s-node0139.104.179.210172.24.114.4
k8s-node0239.104.173.12172.24.114.1
k8s-node0339.104.177.2172.24.114.2
  1. 更改主机名
# 在虚拟机 172.24.114.3 上,设置 k8s-master01 节点
hostnamectl set-hostname k8s-master01
bash #立马生效

# 在虚拟机 172.24.114.4 上,设置 k8s-node01 节点
hostnamectl set-hostname k8s-node01
bash #立马生效

# 在虚拟机 172.24.114.1 上,设置 k8s-node02 节点
hostnamectl set-hostname k8s-node02
bash #立马生效

# 在虚拟机 172.24.114.2 上,设置 k8s-node03 节点
hostnamectl set-hostname k8s-node03
bash #立马生效
  1. 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
  1. 允许 iptables 检查桥接流量
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF

cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sudo sysctl --system
  1. 将 SELinux 设置为 permissive 模式(相当于将其禁用)
sudo setenforce 0
sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
  1. 关闭swap(k8s禁止虚拟内存提供性能)
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab #关闭swap分区
  1. 配置/etc/hosts
# 自定义master与node IP,请根据个人情况修改
cat >> /etc/hosts << EOF
172.28.12.148 master1
172.28.12.149 node1
EOF

6.1 安装docker

#清理过往版本docker
 sudo yum remove docker \
                  docker-client \
                  docker-client-latest \
                  docker-common \
                  docker-latest \
                  docker-latest-logrotate \
                  docker-logrotate \
                  docker-engine
#安装docker
sudo yum install -y yum-utils
sudo yum-config-manager \
    --add-repo \
    http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum makecache fast
sudo yum install docker-ce docker-ce-cli containerd.io -y
sudo systemctl start docker
sudo systemctl enable docker
sudo systemctl status docker

6.2 修改docker驱动

执行kubeadm init集群初始化时遇到:

[WARNING IsDockerSystemdCheck]: detected “cgroupfs” as the Docker
cgroup driver. The recommended driver is “systemd”.

[警告IsDockerSystemdCheck]:检测到“cgroupfs”作为Docker cgroup驱动程序。
推荐的驱动程序是“systemd”

#新增配置文件
cat  >> /etc/docker/daemon.json << EOF
{
 "exec-opts":["native.cgroupdriver=systemd"]
}
EOF

#重启docker
systemctl restart docker
systemctl status docker
  1. 配置阿里云kubernetes软件源
    报错:[Errno -1] repomd.xml signature could not be verified for kubernetes Trying other mirror.
    解决:https://github.com/kubernetes/kubernetes/issues/60134
    处理:repo_gpgcheck=0
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
#repo_gpgcheck=1
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
  1. 安装kubelet kubeadm kubectl
sudo yum update -y #针对修改repo_gpgcheck=0
# sudo yum install -y kubelet-1.19.4 kubeadm-1.19.4 kubectl-1.19.4 #可以根据github发布版本,指定
# sudo yum install -y kubelet-1.20.0 kubeadm-1.20.0 kubectl-1.20.0 #可以根据github发布版本,指定
sudo  yum install -y kubelet kubeadm kubectl #最好不要指定版本,默认更新为最新
sudo systemctl enable --now kubelet
sudo systemctl start kubelet
#sudo systemctl status kubelet 此时kubelet还没有正常准备,待kubeadm init后master节点会ok,将node节点join添加后kubelet也会正常

在这里插入图片描述

  • 检查工具安装
yum list installed | grep kubelet
yum list installed | grep kubeadm
yum list installed | grep kubectl

kubelet --version #查看集群版本结果 Kubernetes v1.23.3

二、kubeadm创建集群

  • kubeadm初始化集群 #在master上执行

  • 切记修改为master的IP地址, --apiserver-advertise-address 172.24.114.3

#apiserver-advertise-address 172.28.12.148为master节点IP,根据个人master IP修改
kubeadm init --apiserver-advertise-address 172.24.114.3 \
--image-repository registry.aliyuncs.com/google_containers \
--pod-network-cidr 10.244.0.0/16 \
--service-cidr 10.96.0.0/12
#--kubernetes-version v1.23.3 \ #本行,可以不添加,默认使用最新的版本
  • 此处如果执行失败,可能master的IP填写错误或者未填写 --apiserver-advertise-address
    172.24.114.3
kubeadm reset
  • 然后输入:y
  • 已完成 kubeadm init 重置,重新执行以上命令kubeadm init …
#客户端kubectl接入集群
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

注意:在worker节点上执行

#worker节点添加到集群 ---> 在worker节点node1上执行
#自动生成,请保留
kubeadm join 172.24.114.3:6443 --token bofh8w.5r6qwmvargj3d0do \
    --discovery-token-ca-cert-hash sha256:0724d03bf5ca008808b4dc9c68643c90e54d36733a487dc7d73`在这里插入代码片`0dca35a952b89 
[root@iZ0jlhvtxignmaozy30vffZ ~]# kubectl get nodes
NAME                      STATUS     ROLES    AGE     VERSION
iz0jlhvtxignmaozy30vffz   NotReady   master   8m19s   v1.19.4
iz0jlhvtxignmaozy30vfgz   NotReady   <none>   21s     v1.19.4

  1. 添加pod网络 flannel --> master节点执行
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
  • 若上面网址较慢或无反应,请复制一下内容放置 kube-flannel.yml
  • cat kube-flannel.yml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ['extensions']
  resources: ['podsecuritypolicies']
  verbs: ['use']
  resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni-plugin
       #image: flannelcni/flannel-cni-plugin:v1.0.1 for ppc64le and mips64le (dockerhub limitations may apply)
        image: rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1
        command:
        - cp
        args:
        - -f
        - /flannel
        - /opt/cni/bin/flannel
        volumeMounts:
        - name: cni-plugin
          mountPath: /opt/cni/bin
      - name: install-cni
       #image: flannelcni/flannel:v0.16.3 for ppc64le and mips64le (dockerhub limitations may apply)
        image: rancher/mirrored-flannelcni-flannel:v0.16.3
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
       #image: flannelcni/flannel:v0.16.3 for ppc64le and mips64le (dockerhub limitations may apply)
        image: rancher/mirrored-flannelcni-flannel:v0.16.3
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
        - name: xtables-lock
          mountPath: /run/xtables.lock
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni-plugin
        hostPath:
          path: /opt/cni/bin
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg
      - name: xtables-lock
        hostPath:
          path: /run/xtables.lock
          type: FileOrCreate
kubectl apply -f ./kube-flannel.yml #此处应用后node状态由NotReady --> Ready

12.控制面master1查看集群

[root@iZ0jlhvtxignmaozy30vffZ ~]# kubectl get nodes 
NAME                      STATUS   ROLES    AGE     VERSION
iz0jlhvtxignmaozy30vffz   Ready    master   11m     v1.19.4
iz0jlhvtxignmaozy30vfgz   Ready    <none>   3m14s   v1.19.4
[root@iZ0jlhvtxignmaozy30vffZ ~]# kubectl get pod -A
NAMESPACE     NAME                                              READY   STATUS    RESTARTS   AGE
kube-system   coredns-6d56c8448f-n7f9k                          1/1     Running   0          20m
kube-system   coredns-6d56c8448f-tz6m4                          1/1     Running   0          20m
kube-system   etcd-iz0jlhvtxignmaozy30vffz                      1/1     Running   0          20m
kube-system   kube-apiserver-iz0jlhvtxignmaozy30vffz            1/1     Running   0          20m
kube-system   kube-controller-manager-iz0jlhvtxignmaozy30vffz   1/1     Running   0          20m
kube-system   kube-flannel-ds-7j7jn                             1/1     Running   0          9m51s
kube-system   kube-flannel-ds-fdkbv                             1/1     Running   0          9m51s
kube-system   kube-proxy-fdz49                                  1/1     Running   0          12m
kube-system   kube-proxy-kgzcp                                  1/1     Running   0          20m
kube-system   kube-scheduler-iz0jlhvtxignmaozy30vffz            1/1     Running   0          20m

13.工作负载worker节点(k8s-node01、k8s-node02、k8s-node03 配置admin.conf,实现worker节点 kubectl get …)


# k8s-master01 传文件到k8s-node01、k8s-node02、k8s-node03
# 其中172.24.114.4为 k8s-node01 节点
scp -r /etc/kubernetes/admin.conf root@172.24.114.4:/etc/kubernetes/

# 其中172.24.114.1为  k8s-node02 节点
scp -r /etc/kubernetes/admin.conf root@172.24.114.1:/etc/kubernetes/

# 其中172.24.114.2为 k8s-node03 节点
scp -r /etc/kubernetes/admin.conf root@172.24.114.2:/etc/kubernetes/

# 在k8s-node01、k8s-node02、k8s-node03 执行
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source ~/.bash_profile

node1查询集群信息

三、新增work节点

14、请按顺序执行0-9步骤

15、新worker节点添加到集群

#在新worker节点上
kubeadm join --token <token>  --discovery-token-ca-cert-hash sha256:<hash>

kubeadm token list #集群创建在24小时以内,获取<token>
或者
kubeadm token create   #集群创超过24小时,获取<token>

openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \
   openssl dgst -sha256 -hex | sed 's/^.* //'  #获取<hash>

16、新节点配置kubectl客户端工具
请执行第13步骤
在这里插入图片描述

四、安装 NFS

主机名公网 IP私网 IP
k8s-master0139.104.173.77172.24.114.3
k8s-node0139.104.179.210172.24.114.4
k8s-node0239.104.173.12172.24.114.1
k8s-node0339.104.177.2172.24.114.2

4.0、搭建 NFS (选择 k8s-master01 172.24.114.3 )

       server: 172.24.114.3 
       path: /data/rabbitmq

4.1 在提供 NFS 存储主机上执行,这里默认k8s-master01节点

yum install -y nfs-utils #这条命令所有节点master、worker都执行

echo "/data/rabbitmq *(insecure,rw,sync,no_root_squash)" > /etc/exports

# 执行以下命令,启动 nfs 服务;创建共享目录
mkdir -p /data/rabbitmq

# 在master执行
chmod -R 777 /data/rabbitmq

# 使配置生效
exportfs -r

#检查配置是否生效
exportfs

systemctl enable rpcbind && systemctl start rpcbind

systemctl enable nfs && systemctl start nfs

4.2 在worker主机上执行(k8s-node01 k8s-node02 k8s-node03)

yum install -y nfs-utils #这条命令所有节点master、worker都执行

showmount -e 172.24.114.3 #查看worker节点是否能查到master节点的nfs文件

五、配置 StorageClass 存储

vim postgresql-storage.yaml
## 创建了一个存储类
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-storage
  annotations:
    storageclass.kubernetes.io/is-default-class: "true"
provisioner: nfs-provisioner  #Deployment中spec.template.spec.containers.env.name.PROVISIONER_NAME 保持一致
parameters:
  archiveOnDelete: "true"  ## 删除pv的时候,pv的内容是否要备份

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/nfs-subdir-external-provisioner:v4.0.2
          # resources:
          #    limits:
          #      cpu: 10m
          #    requests:
          #      cpu: 10m
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: nfs-provisioner
            - name: NFS_SERVER
              value: 172.24.114.3 ## 指定自己nfs服务器地址
            - name: NFS_PATH  
              value: /data/rabbitmq  ## nfs服务器共享的目录
      volumes:
        - name: nfs-client-root
          nfs:
            server: 172.24.114.3
            path: /data/rabbitmq
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
  
kubectl apply -f redis-storage.yaml

六、安装 helm

6.1 安装包的下载地址:https://github.com/helm/helm/releases
6.2 下载软件包:helm-v3.6.3-linux-amd64.tar.gz,如下二选一

wget https://get.helm.sh/helm-v3.6.3-linux-amd64.tar.gz

# curl -L https://get.helm.sh/helm-v3.6.3-linux-amd64.tar.gz -o helm-v3.6.3-linux-amd64.tar.gz

6.3 解压安装包

[root@k8s-master01 ~]# tar -zxvf helm-v3.6.3-linux-amd64.tar.gz 
[root@k8s-master01~]# cd linux-amd64/
[root@k8s-master01 linux-amd64]# ls
 helm  LICENSE  README.md
[root@server1 linux-amd64]# cp helm /usr/local/bin/

[root@k8s-master01 ~]# helm version
version.BuildInfo{Version:"v3.6.2", GitCommit:"ee407bdf364942bcb8e8c665f82e15aa28009b71", GitTreeState:"clean", GoVersion:"go1.16.5"}

七、helm 安装 rabbitmq-ha 高可用集群

7.0 Helm中文官网:https://helm.sh/zh/docs/helm/helm_show_values/
7.1 Helm 添加第三方 Chart 库:

[root@k8s-master01 ~]# helm repo add stable http://mirror.azure.cn/kubernetes/charts/
"stable" already exists with the same configuration, skipping

[root@k8s-master01 ~]# helm repo add aliyuncs https://apphub.aliyuncs.com  # 阿里charts仓库

root@k8s-master01 ~]# helm repo list
NAME    	URL                                      
bitnami 	https://charts.bitnami.com/bitnami       
dandydev	https://dandydeveloper.github.io/charts  
stable  	http://mirror.azure.cn/kubernetes/charts/

7.2 推荐些许持续更新的 repo(此处不用执行)

[root@k8s-master01 ~]# helm repo add aliyuncs https://apphub.aliyuncs.com  # 阿里charts仓库
[root@k8s-master01 ~]# helm repo add stable http://mirror.azure.cn/kubernetes/charts  #微软
[root@k8s-master01 ~]# helm repo add bitnami https://charts.bitnami.com/bitnami  #大部分都有
[root@k8s-master01 ~]# helm repo add harbor https://helm.goharbor.io #harbor的
[root@k8s-master01 ~]# helm repo add gpu-helm-charts https://nvidia.github.io/gpu-monitoring-tools/helm-charts    #NVIDIA DCGM的
[root@k8s-master01 ~]# helm repo add elastic https://helm.elastic.co #elastic的 elasticsearch
[root@k8s-master01 ~]# helm repo add stablecharts https://charts.helm.sh/stable #不更新了还是有些东西的
[root@k8s-master01 ~]# helm update

7.3 添加第三库之后就可以使用以下方式查询:

[root@k8s-master01 ~]# helm search repo rabbitmq-ha --versions
NAME                	CHART VERSION	APP VERSION	DESCRIPTION                                       
aliyuncs/rabbitmq-ha	1.39.0       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.38.2       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.38.1       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.36.4       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.36.3       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.36.0       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.34.1       	3.7.19     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.34.0       	3.7.19     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.33.0       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.32.4       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.32.3       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.32.2       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.32.0       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.31.0       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.30.0       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.29.1       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.29.0       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.28.0       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.27.2       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.27.1       	3.7.12     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.47.1       	3.8.7      	DEPRECATED - Highly available RabbitMQ cluster,...
stable/rabbitmq-ha  	1.47.0       	3.8.7      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.46.6       	3.8.7      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.46.5       	3.8.6      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.46.4       	3.8.5      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.46.3       	3.8.5      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.46.2       	3.8.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.46.1       	3.8.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.46.0       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.45.0       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.44.4       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.44.3       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.44.2       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.44.1       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.44.0       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.43.0       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.42.0       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.41.1       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.41.0       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.40.2       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.40.1       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.40.0       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.39.0       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.38.2       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.38.1       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.38.0       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.37.0       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.36.5       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.36.4       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.36.3       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.36.2       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.36.1       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.36.0       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.35.0       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.34.1       	3.7.19     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.34.0       	3.7.19     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.33.1       	3.7.19     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.33.0       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.32.4       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.32.3       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.32.2       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.32.1       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.32.0       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.31.0       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.30.0       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.29.1       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.29.0       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.28.0       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.27.2       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.27.1       	3.7.12     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.27.0       	3.7.12     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.26.0       	3.7.12     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.25.0       	3.7.12     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.24.0       	3.7.12     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.23.0       	3.7.12     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.22.2       	3.7.12     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.22.1       	3.7.12     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.22.0       	3.7.12     	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.21.1       	3.7.8      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.21.0       	3.7.8      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.20.1       	3.7.8      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.20.0       	3.7.8      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.19.0       	3.7.8      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.18.1       	3.7.8      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.18.0       	3.7.8      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.17.0       	3.7.8      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.16.1       	3.7.8      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.16.0       	3.7.8      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.15.0       	3.7.8      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.14.4       	3.7.8      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.14.3       	3.7.8      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.14.2       	3.7.8      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.14.1       	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.14.0       	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.13.1       	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.13.0       	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.12.1       	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.12.0       	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.11.1       	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.11.0       	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.10.0       	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.9.1        	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.9.0        	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.8.2        	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.8.1        	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.8.0        	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.7.2        	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.7.0        	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.6.3        	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.6.2        	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.6.1        	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.6.0        	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.5.2        	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.5.1        	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.5.0        	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.4.0        	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.3.5        	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.3.4        	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.3.3        	3.7.4      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.3.2        	3.7.3      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.3.1        	3.7.3      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.3.0        	3.7.3      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.2.0        	3.7.3      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.1.0        	3.7.3      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.0.8        	3.7.3      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.0.7        	3.7.3      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.0.6        	3.7.3      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.0.5        	3.7.3      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.0.4        	3.7.3      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.0.3        	3.7.3      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.0.2        	3.7.3      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.0.1        	3.7.3      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	1.0.0        	3.7.3      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	0.1.1        	3.7.0      	Highly available RabbitMQ cluster, the open sou...
stable/rabbitmq-ha  	0.1.0        	3.7.0      	Highly available RabbitMQ cluster, the open sou...

建议使用 阿里云charts仓库

[root@k8s-master01 ~]# helm search repo aliyuncs/rabbitmq-ha --versions
NAME                	CHART VERSION	APP VERSION	DESCRIPTION                                       
aliyuncs/rabbitmq-ha	1.39.0       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.38.2       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.38.1       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.36.4       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.36.3       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.36.0       	3.8.0      	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.34.1       	3.7.19     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.34.0       	3.7.19     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.33.0       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.32.4       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.32.3       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.32.2       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.32.0       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.31.0       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.30.0       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.29.1       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.29.0       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.28.0       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.27.2       	3.7.15     	Highly available RabbitMQ cluster, the open sou...
aliyuncs/rabbitmq-ha	1.27.1       	3.7.12     	Highly available RabbitMQ cluster, the open sou...

7.4 拉取并修改指定版本的 rabbitmq-ha 安装包

[root@k8s-master01 ~]# mkdir -p /root/tools
[root@k8s-master01 ~]# cd /root/tools/
[root@k8s-master01 tools]# helm pull aliyuncs/rabbitmq-ha --version=1.36.4
[root@k8s-master01 tools]# ll
total 20
-rw-r--r-- 1 root root 18573 Apr 21 10:04 rabbitmq-ha-1.36.4.tgz
[root@k8s-master01 tools]# tar -xvf rabbitmq-ha-1.36.4.tgz 
rabbitmq-ha/Chart.yaml
rabbitmq-ha/values.yaml
rabbitmq-ha/templates/NOTES.txt
rabbitmq-ha/templates/_helpers.tpl
rabbitmq-ha/templates/alerts.yaml
rabbitmq-ha/templates/configmap.yaml
rabbitmq-ha/templates/ingress.yaml
rabbitmq-ha/templates/pdb.yaml
rabbitmq-ha/templates/role.yaml
rabbitmq-ha/templates/rolebinding.yaml
rabbitmq-ha/templates/secret.yaml
rabbitmq-ha/templates/service-discovery.yaml
rabbitmq-ha/templates/service.yaml
rabbitmq-ha/templates/serviceaccount.yaml
rabbitmq-ha/templates/servicemonitor.yaml
rabbitmq-ha/templates/statefulset.yaml
rabbitmq-ha/.helmignore
rabbitmq-ha/OWNERS
rabbitmq-ha/README.md
rabbitmq-ha/ci/prometheus-exporter-values.yaml
rabbitmq-ha/ci/prometheus-plugin-values.yaml
[root@k8s-master01 tools]# 

[root@k8s-master01 tools]# ll
total 24
drwxr-xr-x 4 root root  4096 Apr 21 10:04 rabbitmq-ha
-rw-r--r-- 1 root root 18573 Apr 21 10:04 rabbitmq-ha-1.36.4.tgz

[root@k8s-master01 tools]# cd rabbitmq-ha

[root@k8s-master01 rabbitmq-ha]# ll
total 72
-rwxr-xr-x 1 root root   525 Nov 21  2019 Chart.yaml
drwxr-xr-x 2 root root  4096 Apr 21 10:04 ci
-rwxr-xr-x 1 root root    59 Nov 21  2019 OWNERS
-rwxr-xr-x 1 root root 39700 Nov 21  2019 README.md
drwxr-xr-x 2 root root  4096 Apr 21 10:04 templates
-rwxr-xr-x 1 root root 15630 Nov 21  2019 values.yaml

  • 编辑 rabbitmq-ha 安装包中 values.yaml
    修改如下:
    managementUsername: management
    managementPassword: rabbitmq

    service.type: NodePort

    persistentVolume:
    enabled: true
    storageClass: “nfs-storage”

[root@k8s-master01 rabbitmq-ha]# vi values.yaml 
# 如上内容修改

[root@k8s-master01 rabbitmq-ha]# kubectl create namespace rmq

[root@k8s-master01 rabbitmq-ha]# helm install rq . -n rmq
NAME: rq
LAST DEPLOYED: Thu Apr 21 10:24:55 2022
NAMESPACE: rmq
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
** Please be patient while the chart is being deployed **

  Credentials:

    Username            : guest
    Password            : $(kubectl get secret --namespace rmq rq-rabbitmq-ha -o jsonpath="{.data.rabbitmq-password}" | base64 --decode)
    Management username : management
    Management password : $(kubectl get secret --namespace rmq rq-rabbitmq-ha -o jsonpath="{.data.rabbitmq-management-password}" | base64 --decode)
    ErLang Cookie       : $(kubectl get secret --namespace rmq rq-rabbitmq-ha -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 --decode)

  RabbitMQ can be accessed within the cluster on port 5672 at rq-rabbitmq-ha.rmq.svc.cluster.local

  To access the cluster externally execute the following commands:

    export NODE_IP=$(kubectl get nodes --namespace rmq -o jsonpath="{.items[0].status.addresses[?(@.type=='ExternalIP')].address}")
    export NODE_PORT_AMQP=$(kubectl get --namespace rmq -o jsonpath='{.spec.ports[?(@.name=="amqp")].nodePort}' services rq-rabbitmq-ha)
    export NODE_PORT_STATS=$(kubectl get --namespace rmq -o jsonpath='{.spec.ports[?(@.name=="http")].nodePort}' services rq-rabbitmq-ha)  
  To Access the RabbitMQ AMQP port:

    amqp://$NODE_IP:$NODE_PORT_AMQP/ 

  To Access the RabbitMQ Management interface:

    http://$NODE_IP:$NODE_PORT_STATS/

八、测试 rabbitmq 高可用集群

8.1 查看安装 chart

[root@k8s-master01 ~]# helm list -A
NAME	NAMESPACE	REVISION	UPDATED                                	STATUS  	CHART             	APP VERSION
rq  	rmq      	1       	2022-04-21 10:24:55.917033922 +0800 CST	deployed	rabbitmq-ha-1.36.4	3.8.0 

8.2 查看部署状态

[root@k8s-master01 ~]# kubectl get all -n rmq
NAME                   READY   STATUS    RESTARTS   AGE
pod/rq-rabbitmq-ha-0   1/1     Running   0          22h
pod/rq-rabbitmq-ha-1   1/1     Running   0          22h
pod/rq-rabbitmq-ha-2   1/1     Running   0          22h

NAME                               TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                                         AGE
service/rq-rabbitmq-ha             NodePort    10.110.56.225   <none>        15672:31880/TCP,5672:30482/TCP,4369:32136/TCP   22h
service/rq-rabbitmq-ha-discovery   ClusterIP   None            <none>        15672/TCP,5672/TCP,4369/TCP                     22h

NAME                              READY   AGE
statefulset.apps/rq-rabbitmq-ha   3/3     22h

8.3 获取访问信息


[root@k8s-master01 ~]# helm list -A
NAME	NAMESPACE	REVISION	UPDATED                                	STATUS  	CHART             	APP VERSION
rq  	rmq      	1       	2022-04-21 10:24:55.917033922 +0800 CST	deployed	rabbitmq-ha-1.36.4	3.8.0 

[root@k8s-master01 ~]# helm status rq -n rmq
NAME: rq
LAST DEPLOYED: Thu Apr 21 10:24:55 2022
NAMESPACE: rmq
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
** Please be patient while the chart is being deployed **

  Credentials:

    Username            : guest
    Password            : $(kubectl get secret --namespace rmq rq-rabbitmq-ha -o jsonpath="{.data.rabbitmq-password}" | base64 --decode)
    Management username : management
    Management password : $(kubectl get secret --namespace rmq rq-rabbitmq-ha -o jsonpath="{.data.rabbitmq-management-password}" | base64 --decode)
    ErLang Cookie       : $(kubectl get secret --namespace rmq rq-rabbitmq-ha -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 --decode)

  RabbitMQ can be accessed within the cluster on port 5672 at rq-rabbitmq-ha.rmq.svc.cluster.local

  To access the cluster externally execute the following commands:

    export NODE_IP=$(kubectl get nodes --namespace rmq -o jsonpath="{.items[0].status.addresses[?(@.type=='ExternalIP')].address}")
    export NODE_PORT_AMQP=$(kubectl get --namespace rmq -o jsonpath='{.spec.ports[?(@.name=="amqp")].nodePort}' services rq-rabbitmq-ha)
    export NODE_PORT_STATS=$(kubectl get --namespace rmq -o jsonpath='{.spec.ports[?(@.name=="http")].nodePort}' services rq-rabbitmq-ha)  
  To Access the RabbitMQ AMQP port:

    amqp://$NODE_IP:$NODE_PORT_AMQP/ 

  To Access the RabbitMQ Management interface:

    http://$NODE_IP:$NODE_PORT_STATS/

8.4 登录 rabbitmq 管理控制台

  • 如上可知,管理控制台账号、密码:
  • a、Management username : management
  • b、Management password : $(kubectl get secret --namespace rmq rq-rabbitmq-ha -o
    jsonpath=“{.data.rabbitmq-management-password}” | base64 --decode)
# 获取 Management password 为:
[root@k8s-master01 ~]# echo $(kubectl get secret --namespace rmq rq-rabbitmq-ha -o jsonpath="{.data.rabbitmq-management-password}" | base64 --decode)
rabbitmq

[root@k8s-master01 ~]# kubectl get svc -n rmq
NAME                       TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                                         AGE
rq-rabbitmq-ha             NodePort    10.110.56.225   <none>        15672:31880/TCP,5672:30482/TCP,4369:32136/TCP   22h
rq-rabbitmq-ha-discovery   ClusterIP   None            <none>        15672/TCP,5672/TCP,4369/TCP                     22h

主机名公网 IP私网 IP
k8s-master0139.104.173.77172.24.114.3
k8s-node0139.104.179.210172.24.114.4
k8s-node0239.104.173.12172.24.114.1
k8s-node0339.104.177.2172.24.114.2

c、浏览器访问地址(k8s-master01、k8s-node01、k8s-node02、k8s-node03 任意节点公网 IP 即可) 39.104.173.77:31880
在这里插入图片描述
在这里插入图片描述
e、创建用户并设置角色
找任意一个 rq-rabbitmq-ha-0、rq-rabbitmq-ha-1、rq-rabbitmq-ha-2 的pod 进去操作就可以

[root@k8s-master01 ~]# kubectl exec -it pod/rq-rabbitmq-ha-0 -n rmq bash

# 创建用户
bash-5.0$ rabbitmqctl add_user  admin 123456
Adding user "admin" ...

# 设置权限
bash-5.0$ rabbitmqctl set_permissions -p "/" admin ".*" ".*" ".*"
Setting permissions for user "admin" in vhost "/" ...

# 赋予其administrator角色:
bash-5.0$ rabbitmqctl set_user_tags admin administrator
Setting tags for user "admin" to [administrator] ...

bash-5.0$ rabbitmqctl list_users
Listing users ...
user	tags
management	[management]
admin	[administrator]
guest	[administrator]

bash-5.0$ rabbitmqctl cluster_status
Cluster status of node rabbit@rq-rabbitmq-ha-0.rq-rabbitmq-ha-discovery.rmq.svc.cluster.local ...
Basics

Cluster name: rabbit@rq-rabbitmq-ha-0.rq-rabbitmq-ha-discovery.rmq.svc.cluster.local

Disk Nodes

rabbit@rq-rabbitmq-ha-0.rq-rabbitmq-ha-discovery.rmq.svc.cluster.local
rabbit@rq-rabbitmq-ha-1.rq-rabbitmq-ha-discovery.rmq.svc.cluster.local
rabbit@rq-rabbitmq-ha-2.rq-rabbitmq-ha-discovery.rmq.svc.cluster.local

Running Nodes

rabbit@rq-rabbitmq-ha-0.rq-rabbitmq-ha-discovery.rmq.svc.cluster.local
rabbit@rq-rabbitmq-ha-1.rq-rabbitmq-ha-discovery.rmq.svc.cluster.local
rabbit@rq-rabbitmq-ha-2.rq-rabbitmq-ha-discovery.rmq.svc.cluster.local

Versions

rabbit@rq-rabbitmq-ha-0.rq-rabbitmq-ha-discovery.rmq.svc.cluster.local: RabbitMQ 3.8.0 on Erlang 22.1.5
rabbit@rq-rabbitmq-ha-1.rq-rabbitmq-ha-discovery.rmq.svc.cluster.local: RabbitMQ 3.8.0 on Erlang 22.1.5
rabbit@rq-rabbitmq-ha-2.rq-rabbitmq-ha-discovery.rmq.svc.cluster.local: RabbitMQ 3.8.0 on Erlang 22.1.5

f、重新登录浏览器

  • 用户名: admin
  • 密 码: 123456
    在这里插入图片描述
    恭喜你,安装完成 !!

附录一:如果不想在 pod 中 创建用户并设置角色,可以直接修改 values.yaml 完成一次性安装

cat values.yaml  # 请参考如下values.yaml的 key:value
rabbitmqUsername: rmq
rabbitmqPassword: rabbitmq

managementUsername: management
managementPassword: rabbitmq


definitions:
  globalParameters: |-
    {
        "name": "rmq",
        "value": "rabbitmq"
    }
  users: |-
   {
     "name": "rmq",
     "password": "rabbitmq",
     "tags": "administrator"
   }
  vhosts: |-
   {
     "name": "/rabbit"
   }
  parameters: |-
   {
     "value": {
       "src-uri": "amqp://localhost",
       "src-queue": "source",
       "dest-uri": "amqp://localhost",
       "dest-queue": "destination",
       "add-forward-headers": false,
       "ack-mode": "on-confirm",
       "delete-after": "never"
     },
     "vhost": "/",
     "component": "shovel",
     "name": "rmq"
   }
  permissions: |-
   {
     "user": "rmq",
     "vhost": "/",
     "configure": ".*",
     "write": ".*",
     "read": ".*"
   }
  queues: |-
    {
       "name":"rmq",
       "vhost":"/",
       "durable":true,
       "auto_delete":false,
       "arguments":{}
    }
  exchanges: |-
    {
       "name":"rmq",
       "vhost":"/",
       "type":"direct",
       "durable":true,
       "auto_delete":false,
       "internal":false,
       "arguments":{}
    }
  bindings: |-
    {
       "source":"rmq",
       "vhost":"/rabbit",
       "destination":"rmq",
       "destination_type":"queue",
       "routing_key":"myKey",
       "arguments":{}
    }

service:
  annotations: {}
  clusterIP: None

  externalIPs: []

  loadBalancerIP: ""
  loadBalancerSourceRanges: []
  type: NodePort


persistentVolume:
  enabled: true
  storageClass: "nfs-storage"
  name: data
  accessModes:
    - ReadWriteOnce
  size: 8Gi
  annotations:
  • 安装过程
[root@k8s-master01 rabbitmq-ha]# helm list -A
NAME	NAMESPACE	REVISION	UPDATED                                	STATUS  	CHART             	APP VERSION
rq  	rmq      	1       	2022-04-22 09:54:06.860727425 +0800 CST	deployed	rabbitmq-ha-1.36.4	3.8.0      

[root@k8s-master01 rabbitmq-ha]# kubectl create namespace rmq

[root@k8s-master01 rabbitmq-ha]# helm install rq . -n rmq

[root@k8s-master01 rabbitmq-ha]# helm status rq -n rmq
NAME: rq
LAST DEPLOYED: Fri Apr 22 09:54:06 2022
NAMESPACE: rmq
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
** Please be patient while the chart is being deployed **

  Credentials:

    Username            : rmq
    Password            : $(kubectl get secret --namespace rmq rq-rabbitmq-ha -o jsonpath="{.data.rabbitmq-password}" | base64 --decode)
    Management username : management
    Management password : $(kubectl get secret --namespace rmq rq-rabbitmq-ha -o jsonpath="{.data.rabbitmq-management-password}" | base64 --decode)
    ErLang Cookie       : $(kubectl get secret --namespace rmq rq-rabbitmq-ha -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 --decode)

  RabbitMQ can be accessed within the cluster on port 5672 at rq-rabbitmq-ha.rmq.svc.cluster.local

  To access the cluster externally execute the following commands:

    export NODE_IP=$(kubectl get nodes --namespace rmq -o jsonpath="{.items[0].status.addresses[?(@.type=='ExternalIP')].address}")
    export NODE_PORT_AMQP=$(kubectl get --namespace rmq -o jsonpath='{.spec.ports[?(@.name=="amqp")].nodePort}' services rq-rabbitmq-ha)
    export NODE_PORT_STATS=$(kubectl get --namespace rmq -o jsonpath='{.spec.ports[?(@.name=="http")].nodePort}' services rq-rabbitmq-ha)  
  To Access the RabbitMQ AMQP port:

    amqp://$NODE_IP:$NODE_PORT_AMQP/ 

  To Access the RabbitMQ Management interface:

    http://$NODE_IP:$NODE_PORT_STATS/
    
[root@k8s-master01 rabbitmq-ha]# kubectl get all -n rmq
NAME                   READY   STATUS    RESTARTS      AGE
pod/rq-rabbitmq-ha-0   1/1     Running   1 (30m ago)   30m
pod/rq-rabbitmq-ha-1   1/1     Running   0             30m
pod/rq-rabbitmq-ha-2   1/1     Running   0             29m

NAME                               TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                                         AGE
service/rq-rabbitmq-ha             NodePort    10.109.93.168   <none>        15672:31584/TCP,5672:32262/TCP,4369:32536/TCP   30m
service/rq-rabbitmq-ha-discovery   ClusterIP   None            <none>        15672/TCP,5672/TCP,4369/TCP                     30m

NAME                              READY   AGE
statefulset.apps/rq-rabbitmq-ha   3/3     30m

  • 访问浏览器:用户名: rqm; 密码:rabbitmq

在这里插入图片描述
在这里插入图片描述

Logo

华为开发者空间,是为全球开发者打造的专属开发空间,汇聚了华为优质开发资源及工具,致力于让每一位开发者拥有一台云主机,基于华为根生态开发、创新。

更多推荐