1、k8s简介

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

2、k8s部署

净化虚拟机环境

[root@server3 ~]# docker swarm leave
 [root@server4 ~]# docker swarm leave 
 [root@server2 ~]# docker stack rm mystack  删除
 [root@server2 ~]# docker swarm leave --force  强制离开
 [root@server2 ~]# cd /etc/systemd/system/
[root@server2 system]# cd docker.service.d/
[root@server2 docker.service.d]# ls 
10-machine.conf   
[root@server2 docker.service.d]# rm -fr 10-machine.conf   删除   server3、server4同样操作
[root@server2 docker.service.d]# systemctl daemon-reload   重载服务 server3、server4同样操作
[root@server2 docker.service.d]# systemctl restart docker   重启docker  server3、server4同样操作
[root@server2 docker.service.d]# docker volume prune   清理不用的数据卷 server3、server4同样操作
[root@server2 docker.service.d]# docker network  prune  清理不用的网络   server3、server4同样操作

部署:
查看官方网站

[root@server2 ~]# cd /etc/docker/    
[root@server2 docker]# vim daemon.json  编辑文件
{
  "registry-mirrors": ["https://reg.westos.org"],
  "exec-opts": ["native.cgroupdriver=systemd"],      将cgroups改成systemd机制
  "log-driver": "json-file",
  "log-opts": {
     "max-size": "100m"
   },
   "storage-driver": "overlay2"
}
[root@server2 docker]# systemctl restart docker  重启docker服务
[root@server2 docker]# docker info

在这里插入图片描述

[root@server2 docker]# scp daemon.json server3:/etc/docker/    将文件拷贝到server3上
daemon.json                                   100%  218   293.2KB/s   00:00    
[root@server2 docker]# scp daemon.json server4:/etc/docker/   将文件拷贝到server4上
daemon.json                                   100%  218   382.3KB/s   00:00  
[root@server2 docker]# ssh server3 systemctl restart docker   远程重启server3上docker
[root@server2 docker]# ssh server4 systemctl restart docker   远程重启server4上docker
[root@server2 docker]# swapoff -a  禁掉所有交换分区
[root@server2 docker]# vim /etc/fstab  注释掉,不然重启swap分区又会被激活

在这里插入图片描述

server3和server4和server2进行同样的操作,禁掉swap分区
[root@server2 docker]# cd /etc/yum.repos.d/
[root@server2 yum.repos.d]# vim k8s.repo  创建k8s yum源
[kubernetes]
name=kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=0
enabled=1
[root@server2 yum.repos.d]# yum install kubelet kubeadm kubectl -y  安装 k8s
[root@server2 yum.repos.d]# scp k8s.repo server3:/etc/yum.repos.d/  将k8s yum源拷贝到server3上
k8s.repo                                      100%  129   156.9KB/s   00:00    
[root@server2 yum.repos.d]# scp k8s.repo server4:/etc/yum.repos.d/  将k8s yum源拷贝到server4上
k8s.repo                                      100%  129   188.5KB/s   00:00 
[root@server3 ~]# yum install kubelet kubeadm kubectl -y  server3上安装k8s
[root@server4 ~]# yum install kubelet kubeadm kubectl -y  seever4上安装k8s
[root@server2 yum.repos.d]# systemctl enable --now kubelet   在server2上设置开机自启
[root@server3 ~]# systemctl enable --now kubelet   在server3上设置开机自启
[root@server4 ~]# systemctl enable --now kubelet   在server4上设置开机自启。注意docker也需要开机自启
[root@server2 yum.repos.d]# kubeadm config print init-defaults  查看默认配置信息

在这里插入图片描述

[root@server2 yum.repos.d]# kubeadm config images list --image-repository registry.aliyuncs.com/google_containers  列出k8s所需镜像
registry.aliyuncs.com/google_containers/kube-apiserver:v1.23.4
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.23.4
registry.aliyuncs.com/google_containers/kube-scheduler:v1.23.4
registry.aliyuncs.com/google_containers/kube-proxy:v1.23.4
registry.aliyuncs.com/google_containers/pause:3.6
registry.aliyuncs.com/google_containers/etcd:3.5.1-0
registry.aliyuncs.com/google_containers/coredns:v1.8.6
[root@server2 yum.repos.d]# kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers  拉取镜像

新建k8s项目
在这里插入图片描述
在这里插入图片描述

[root@server2 ~]# docker tag registry.aliyuncs.com/google_containers/kube-apiserver:v1.23.4 reg.westos.org/k8s/kube-apiserver:v1.23.4    改标签
[root@server2 ~]# docker tag registry.aliyuncs.com/google_containers/kube-proxy:v1.23.4 reg.westos.org/k8s/kube-proxy:v1.23.4  改标签
[root@server2 ~]# docker tag registry.aliyuncs.com/google_containers/kube-controller-manager:v1.23.4 reg.westos.org/k8s/kube-controller-manager:v1.23.4  改标签
[root@server2 ~]# docker tag registry.aliyuncs.com/google_containers/kube-scheduler:v1.23.4 reg.westos.org/k8s/kube-scheduler:v1.23.4  改标签
[root@server2 ~]# docker tag registry.aliyuncs.com/google_containers/etcd:3.5.1-0  reg.westos.org/k8s/etcd:3.5.1-0  改标签
[root@server2 ~]# docker tag registry.aliyuncs.com/google_containers/coredns:v1.8.6 reg.westos.org/k8s/coredns:v1.8.6   改标签
[root@server2 ~]# docker tag registry.aliyuncs.com/google_containers/pause:3.6 reg.westos.org/k8s/pause:3.6  改标签
[root@server2 ~]# docker logout reg.westos.org   登出仓库
[root@server2 ~]# docker login reg.westos.org   登陆仓库
Username: admin  用管理员用户
Password: 
[root@server2 ~]# docker images | grep k8s | awk '{system(" docker push "$1":"$2" ")}'  将k8s的镜像全部上传到habor仓库

在这里插入图片描述

[root@server2 ~]# kubeadm init --pod-network-cidr=10.244.0.0/16 --image-repository reg.westos.org/k8s    初始化,使用flannel网络组件时必须添加--pod-network-cidr=10.244.0.0/16 子网络

在这里插入图片描述
[root@server2 ~]# vim .bash_profile 本次用超级用户
在这里插入图片描述

[root@server2 ~]# source .bash_profile  使变量生效
[root@server2 ~]# kubectl get node  查看node节点状态,当前server2没有准备好
NAME      STATUS     ROLES                  AGE   VERSION
server2   NotReady   control-plane,master   67m   v1.23.4
[root@server2 ~]# kubectl get pod -n kube-system

在这里插入图片描述
网络插件文档

[root@server2 ~]# yum install wget -y
[root@server2 ~]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml  下载kube-flannel.yml剧本,部署文件
注意:kube-flannel.yml 此文件由于网络问题下载不了需要添加dns >>/etc/hosts
185.199.111.133  raw.githubusercontent.com
[root@server2 ~]# vim kube-flannel.yml

在这里插入图片描述
为了方便期间拉取这两个镜像,放入habor仓库

[root@server1 ~]# docker pull rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1  拉取
[root@server1 ~]# docker pull rancher/mirrored-flannelcni-flannel:v0.17.0  拉取
[root@server1 ~]# docker tag rancher/mirrored-flannelcni-flannel:v0.17.0 reg.westos.org/library/mirrored-flannelcni-flannel:v0.17.0   改标签
[root@server1 ~]# docker tag rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1 reg.westos.org/library/mirrored-flannelcni-flannel-cni-plugin:v1.0.1  改标签
[root@server1 ~]# docker push reg.westos.org/library/mirrored-flannelcni-flannel-cni-plugin:v1.0.1  上传镜像
[root@server1 ~]# docker push reg.westos.org/library/mirrored-flannelcni-flannel:v0.17.0  上传镜像
[root@server2 ~]# vim kube-flannel.yml  编辑文件

在这里插入图片描述

[root@server2 ~]#  source <(kubectl completion bash) && echo 'source <(kubectl completion bash)' >> ~/.bashrc 配置kubectl命令补齐功能,然后重启
 [root@server2 ~]# kubectl apply -f kube-flannel.yml    运行部署的文件   
Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
[root@server2 ~]# kubectl get pod -n kube-system  查看所有pod是否准备就绪
NAME                              READY   STATUS    RESTARTS      AGE
coredns-7b56f6bc55-hlts6          1/1     Running   0             8h
coredns-7b56f6bc55-k6dzz          1/1     Running   0             8h
etcd-server2                      1/1     Running   4 (33m ago)   8h
kube-apiserver-server2            1/1     Running   4 (33m ago)   8h
kube-controller-manager-server2   1/1     Running   4 (33m ago)   8h
kube-flannel-ds-gss9t             1/1     Running   0             4m18s
kube-proxy-xt2m9                  1/1     Running   4 (33m ago)   8h
kube-scheduler-server2            1/1     Running   4 (33m ago)   8h
kubeadm join 172.25.50.2:6443 --token l27d4e.lrzd20myvkauszmb --discovery-token-ca-cert-hash sha256:561970f88e9c1ab1ac60fffbdfadd2b16b51556327987a7ba08fa61328e4417f   在server3上添加初始化生成的jion
[root@server4 ~]# kubeadm join 172.25.50.2:6443 --token l27d4e.lrzd20myvkauszmb --discovery-token-ca-cert-hash sha256:561970f88e9c1ab1ac60fffbdfadd2b16b51556327987a7ba08fa61328e4417f   在server4上添加初始化生成的jion
[root@server2 ~]# kubectl get node  查看节点,已经全部准备好
NAME      STATUS   ROLES                  AGE     VERSION
server2   Ready    control-plane,master   8h      v1.23.4
server3   Ready    <none>                 2m57s   v1.23.4
server4   Ready    <none>                 2m38s   v1.23.4
Logo

华为开发者空间,是为全球开发者打造的专属开发空间,汇聚了华为优质开发资源及工具,致力于让每一位开发者拥有一台云主机,基于华为根生态开发、创新。

更多推荐