一. 前言

无论从成本还是效率上考虑,k8s都极占优势,基本代表了未来趋势,官网推荐kubeadm配置,虽然方便,但掩盖了许多细节问题;k8s虽然咋看仅仅是个容器编排工具,但涉及的相关知识面非常广泛,如果说大数据的相关知识你需要花N天,K8S相关知识基本不会少于2N,如果要深入了解,非一日之功,从二进制部署起步,来日方长,本文是学习笔记的整理,仅作参考。

二. 系统架构

在这里插入图片描述

OS版本: centos7.8
etcd版本:3.4.8
k8s版本: 1.18.6

三. 部署

1.preinstall check#k8s01&02&03 node
1.1 swapoff
1.2 disable selinux
1.3 close firewall
systemctl stop firewalld
systemctl disable firewalld
1.4 vi /etc/hosts
192.168.100.101 k8s01
192.168.100.102 k8s02
192.168.100.103 k8s03

2.install docker #k8s01&02&03 node
这步比较简单,安装最新版即可,网上资料比较多,不再赘述。

3.install&config keepalived+ipvs #k8s01&02 node
3.1 vi /etc/sysctl.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1

3.2 vi /etc/sysconfig/modules/ipvs.modules #optional

#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
 /sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
 if [ $? -eq 0 ]; then
 /sbin/modprobe \${kernel_module}
 fi
done

3.3 chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs

3.4 yum install keepalived ipvsadm
3.5 cat>/etc/keepalived/keepalived.conf #注意:第一个节点k8s01 priority 100,第二个节点k8s02小于100,其它配置一样

global_defs {
   router_id LVS_DEVEL
}
vrrp_instance VI_1 {
    state BACKUP
    nopreempt
    interface ens33
    virtual_router_id 80
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass just0kk
    }
    virtual_ipaddress {
        192.168.100.200
    }
}
virtual_server 192.168.100.200 6443 {
    delay_loop 6
    lb_algo loadbalance
    lb_kind DR
    net_mask 255.255.255.0
    persistence_timeout 0
    protocol TCP
    real_server 192.168.100.101 6443 {
        weight 1
        SSL_GET {
            url {
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
    real_server 192.168.100.102 6443 {
        weight 1
        SSL_GET {
            url {
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
    real_server 192.168.100.103 6443 {
        weight 1
        SSL_GET {
            url {
              path /healthz
              status_code 200
            }
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
        }
    }
}

3.6 systemctl start keepalived
4.config cfssl工具,k8s01 node only for create ca files
网络下载二进制文件拷贝/usr/bin && chmod +x
[root@k8s01 bin]# ls -lt cfss*
-rwxr-xr-x 1 root root 6595195 Aug 2 16:10 cfssl-certinfo
-rwxr-xr-x 1 root root 2277873 Aug 2 16:09 cfssljson
-rwxr-xr-x 1 root root 10376657 Aug 2 16:09 cfssl
5.config 免密登录,k8s01 node,for scp&ssh
ssh-keygen -t rsa
ssh-copy-id -i .ssh/id_rsa.pub k8s02
ssh-copy-id -i .ssh/id_rsa.pub k8s03
6 upload&unzip cni files to /opt/cni/bin #k8s01&02&03 node
https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz
7.upload&unzip all nodes etcd&k8s binary files to /usr/bin #k8s01&02&03 node
kubernetes-server-linux-amd64.tar.gz #ver:1.18.6
etcd-v3.4.8-linux-amd64.tar.gz
8.config&execute k8s_config.sh #k8s01 node
所有的配置逻辑都在k8s_config.sh,详见代码及注释。
sh /home/k8s_config.sh master #注意:这一步由于网络问题docker pull可能会失败,如果报错请手动pull成功(也可以pull国内镜像registry.cn-beijing.aliyuncs.com/zhoujun/pause-amd64:3.1再tag)后再进行后续步骤

9.start service
systemctl start etcd #order by k8s01&02&03 node
systemctl start kube-apiserver #primay(k8s01)
systemctl start kube-controller-manager #primay(k8s01)
systemctl start kube-scheduler #primay(k8s01)
systemctl start kube-apiserver #standby(k8s02)
systemctl start kube-controller-manager #standby(k8s02)
systemctl start kube-scheduler #standby(k8s02)
systemctl start kubelet kube-proxy #all worker nodes(k8s02&03)

四. 测试

1 check node
[root@k8s01 kubernetes]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s02 NotReady 16s v1.18.6
k8s03 NotReady 6m8s v1.18.6
2 安装网络插件
[root@k8s01 kubernetes]# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created
3 check
[root@k8s01 kubernetes]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s02 Ready 5m6s v1.18.6
k8s03 Ready 10m v1.18.6
[root@k8s01 home]# kubectl get pods -A #确保状态为Runing,如果有问题,需及时解决
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system kube-flannel-ds-amd64-hlcvv 1/1 Running 0 3m
kube-system kube-flannel-ds-amd64-w8kb9 1/1 Running 0 3m9s
[root@k8s03 kubernetes]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 898M 0 898M 0% /dev
tmpfs 910M 0 910M 0% /dev/shm
tmpfs 910M 9.7M 901M 2% /run
tmpfs 910M 0 910M 0% /sys/fs/cgroup
/dev/mapper/centos-root 17G 2.4G 15G 15% /
/dev/sda1 1014M 150M 865M 15% /boot
tmpfs 182M 0 182M 0% /run/user/0
tmpfs 910M 12K 910M 1% /var/lib/kubelet/pods/a1982f17-b2de-4afe-b447-057aecc54456/volumes/kubernetes.io~secret/flannel-token-nvtrx
overlay 17G 2.4G 15G 15% /var/lib/docker/overlay2/be116172dd5dc0906d8d127c970d74c461fafb97d0783f2b10c8102b1f571bc7/merged
shm 64M 0 64M 0% /var/lib/docker/containers/d39f4512d83034e6b65790ff963e49a9d674084503c457e4a949a8f9d5f02065/mounts/shm
overlay 17G 2.4G 15G 15% /var/lib/docker/overlay2/581baa49c51918ac91a398f8c32b6a34708d2332d11670ae4acb67119f8fc1a4/merged
[root@k8s03 kubernetes]# ip link
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
link/ether 00:0c:29:84:36:60 brd ff:ff:ff:ff:ff:ff
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN mode DEFAULT group default
link/ether 02:42:e4:7d:b1:55 brd ff:ff:ff:ff:ff:ff
4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN mode DEFAULT group default
link/ether e2:a9:86:33:e7:49 brd ff:ff:ff:ff:ff:ff
[root@k8s03 kubernetes]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
2e150569eb8b 4e9f801d2217 "/opt/bin/flanneld -鈥? 3 minutes ago Up 3 minutes k8s_kube-flannel_kube-flannel-ds-amd64-hlcvv_kube-system_a1982f17-b2de-4afe-b447-057aecc54456_0
d39f4512d830 cargo.caicloud.io/caicloud/pause-amd64:3.1 “/pause” 4 minutes ago Up 4 minutes k8s_POD_kube-flannel-ds-amd64-hlcvv_kube-system_a1982f17-b2de-4afe-b447-057aecc54456_0
4 创建mysql pod
[root@k8s01 home]# cat>mysql-rc.yaml

apiVersion: v1
kind: ReplicationController
metadata:
  name: mysql
spec:
  replicas: 1
  selector:
    app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
      - name: mysql
        image: mysql
        ports:
        - containerPort: 3306
        env:
        - name: MYSQL_ROOT_PASSWORD
          value: "123456"

[root@k8s01 home]# kubectl create -f mysql-rc.yaml
replicationcontroller/mysql created
[root@k8s01 home]# kubectl get pods
NAME READY STATUS RESTARTS AGE
mysql-5kkjh 0/1 ContainerCreating 0 11s
[root@k8s01 home]# kubectl describe pod mysql-5kkjh

Events:
Type Reason Age From Message


Normal Scheduled default-scheduler Successfully assigned default/mysql-5kkjh to k8s02
Normal Pulling 106s kubelet, k8s02 Pulling image “mysql”
[root@k8s01 home]# kubectl get pods
NAME READY STATUS RESTARTS AGE
mysql-5kkjh 1/1 Running 0 2m53s

[root@k8s01 home]# kubectl describe pod mysql-5kkjh #注意此mysql pod跑在k8s02上
Name: mysql-5kkjh
Namespace: default
Priority: 0
Node: k8s02/192.168.100.102
Start Time: Thu, 20 Aug 2020 23:26:34 +0800
Labels: app=mysql
Annotations:
Status: Running
5 配置service允许外部访问
[root@k8s01 home]# cat>mysql-srv.yaml

apiVersion: v1
kind: Service
metadata:
  name: mysql
spec:
  type: NodePort
  ports:
    - port: 3306
      nodePort: 30306
  selector:
    app: mysql

[root@k8s01 home]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 196.169.0.1 443/TCP 9h
[root@k8s01 home]# kubectl apply -f mysql-srv.yaml
service/mysql created
[root@k8s01 home]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 196.169.0.1 443/TCP 9h
mysql NodePort 196.169.254.105 3306:30306/TCP 6s
6 外部访问测试
mysql -uroot -p123456 -P30306 -h192.168.100.102
mysql: [Warning] Using a password on the command line interface can be insecure.
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 8
Server version: 8.0.21 MySQL Community Server - GPL

Copyright © 2000, 2020, Oracle and/or its affiliates. All rights reserved.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type ‘help;’ or ‘\h’ for help. Type ‘\c’ to clear the current input statement.

mysql> show databases;
±-------------------+
| Database |
±-------------------+
| information_schema |
| mysql |
| performance_schema |
| sys |
±-------------------+
4 rows in set (0.03 sec)

mysql> create database hr;
Query OK, 1 row affected (0.01 sec)

mysql> use hr
Database changed
mysql> create table emp (id int,name varchar(40),age int);
Query OK, 0 rows affected (0.01 sec)

mysql> insert into emp values(1,‘test’,18);
Query OK, 1 row affected (0.01 sec)

mysql> commit;
Query OK, 0 rows affected (0.00 sec)

mysql> select * from emp;
±-----±-----±-----+
| id | name | age |
±-----±-----±-----+
| 1 | test | 18 |
±-----±-----±-----+
1 row in set (0.00 sec)

mysql>

五. 切换

1 check etcd #master primary(k8s01)
[root@k8s01 ~]# source /etc/profile
[root@k8s01 home]# etcdctl --write-out=table --endpoints=$ENDPOINTS --cacert=/etc/kubernetes/pki/ca.pem --cert=/etc/kubernetes/pki/kubernetes.pem --key=/etc/kubernetes/pki/kubernetes-key.pem endpoint status
±---------------------±-----------------±--------±--------±----------±-----------±----------±-----------±-------------------±-------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
±---------------------±-----------------±--------±--------±----------±-----------±----------±-----------±-------------------±-------+
| 192.168.100.101:2379 | d94ba21c17c75ffb | 3.4.8 | 1.8 MB | true | false | 7 | 100365 | 100365 | |
| 192.168.100.102:2379 | dc51f874259f7894 | 3.4.8 | 1.8 MB | false | false | 7 | 100365 | 100365 | |
| 192.168.100.103:2379 | 570689c9b7ce17ab | 3.4.8 | 1.8 MB | false | false | 7 | 100365 | 100365 | |
±---------------------±-----------------±--------±--------±----------±-----------±----------±-----------±-------------------±-------+
2 check vip #master primary(k8s01)
[root@k8s01 home]# ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:ed:a9:a7 brd ff:ff:ff:ff:ff:ff
inet 192.168.100.101/24 brd 192.168.100.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168.100.200/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::657a:3aa8:cbdd:14a5/64 scope link noprefixroute
valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:30:94:03:21 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
3 关机 #master primary(k8s01)
[root@k8s01 home]# init 0

4 check etcd #master standby(k8s02)
[root@k8s02 ~]# source /etc/profile
[root@k8s02 ~]# etcdctl --write-out=table --endpoints=$ENDPOINTS --cacert=/etc/kubernetes/pki/ca.pem --cert=/etc/kubernetes/pki/kubernetes.pem --key=/etc/kubernetes/pki/kubernetes-key.pem endpoint status
{“level”:“warn”,“ts”:“2020-08-21T08:57:59.830+0800”,“caller”:“clientv3/retry_interceptor.go:62”,“msg”:“retrying of unary invoker failed”,“target”:“passthrough:///192.168.100.101:2379”,“attempt”:0,“error”:“rpc error: code = DeadlineExceeded desc = latest balancer error: connection error: desc = “transport: Error while dialing dial tcp 192.168.100.101:2379: connect: no route to host””}
Failed to get the status of endpoint 192.168.100.101:2379 (context deadline exceeded)
±---------------------±-----------------±--------±--------±----------±-----------±----------±-----------±-------------------±-------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
±---------------------±-----------------±--------±--------±----------±-----------±----------±-----------±-------------------±-------+
| 192.168.100.102:2379 | dc51f874259f7894 | 3.4.8 | 1.8 MB | true | false | 8 | 100702 | 100702 | |
| 192.168.100.103:2379 | 570689c9b7ce17ab | 3.4.8 | 1.8 MB | false | false | 8 | 100702 | 100702 | |
±---------------------±-----------------±--------±--------±----------±-----------±----------±-----------±-------------------±-------+
5 check vip #master standby(k8s02)
[root@k8s02 ~]# ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:21:e8:0e brd ff:ff:ff:ff:ff:ff
inet 192.168.100.102/24 brd 192.168.100.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168.100.200/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::140f:f359:181f:7144/64 scope link noprefixroute
valid_lft forever preferred_lft forever

6 其它操作测试 #master standby(k8s02)
[root@k8s02 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s02 Ready 9h v1.18.6
k8s03 Ready 9h v1.18.6
[root@k8s02 ~]# kubectl run busybox --image=busybox:1.28.3 --command – sleep 3600
pod/busybox created
[root@k8s02 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
busybox 1/1 Running 0 65s
mysql-5kkjh 1/1 Running 0 9h

六. 附录

k8s_config.sh脚本

#!/bin/bash
#****************************************************
#created by xlzhu@ips.com at PuDong ShangHai 202008 
#for k8s(1.18.6) three nodes cluster
#----------------------------------------------------
#NODE1:ETCD Node1,Master Primary
#NODE2:ETCD Node2,Master Standby,Worker1/Node1
#NODE3:ETCD Node3,               Worker2/Node2
#****************************************************

#!!!!!!!!variables must be checked&changed before running!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#########################################################################################
PATH_SCRIPT='/home/'                                    #path of this script
IMAGE_PULL='cargo.caicloud.io/caicloud/pause-amd64:3.1' #image refer to flannel problem
HNAME_E1='k8s01'                                        #hostname of etcd1
HNAME_E2='k8s02'                                        #hostname of etcd2
HNAME_E3='k8s03'                                        #hostname of etcd3
HIP_E1='192.168.100.101'                                #IP of etcd1
HIP_E2='192.168.100.102'                                #IP of etcd2
HIP_E3='192.168.100.103'                                #IP of etcd3

HNAME_MP='k8s01'                                        #hostname of master primary
HIP_MP='192.168.100.101'                                #IP of master primary
HVIP_MP='192.168.100.200'                               #vip of master primary
HNAME_MS='k8s02'                                        #hostname of master standby
HIP_MS='192.168.100.102'                                #IP of master standby

HNAME_NF='k8s02'                                        #hostname of first Worker/Node
HIP_NF='192.168.100.102'                                #IP of first Worker/Node
HNAME_NS='k8s03'                                        #hostname of second Worker/Node
HIP_NS='192.168.100.103'                                #IP of second Worker/Node
#########################################################################################

if [ $# -eq 0 ] || [ `echo $0|grep $PATH_SCRIPT|wc -l` -eq 0 ]; then
  echo '#################################################'
  echo "Usage:$0(include path) etcd/master/worker"
  echo '#################################################'
  exit
fi

echo "#--start..."
#--1.1.set etcd env variable& mkdir
sed -i "/export ENDPOINTS=$HIP_E1:2379,$HIP_E2:2379,$HIP_E3:2379/d" /etc/profile
sed -i "/unset i/i export ENDPOINTS=$HIP_E1:2379,$HIP_E2:2379,$HIP_E3:2379" /etc/profile
mkdir -p /etc/kubernetes/pki #ca files dir
mkdir -p /var/lib/etcd
mkdir ~/.kube
mkdir -p /opt/cni/bin  #need upload cni files
mkdir -p /etc/cni/net.d
mkdir -p /var/log/kubernetes
echo '#--1.2.1 etcd env&mkdir ok'

#--2.ca files,only run at master primary node
function config_ca(){
  if [ `which docker|grep '/docker'|wc -l` -eq 0 ]; then
    while true;do
	  echo "all nodes docker install ok(yes or quit)?"
	  read Arg
	  case $Arg in
	    Y|y|YES|yes)
	      break;;
	    Q|q|QUIT|quit)
	      exit;;
	  esac
	done
  else 
    echo '#--2.13.1 docker check ok'
  fi
  if [ `which cfssl|grep '/cfssl'|wc -l` -eq 0 ]; then
    while true;do
	  echo "this node cfssl config ok(yes or quit)?"
	  read Arg
	  case $Arg in
	    Y|y|YES|yes)
	      break;;
	    Q|q|QUIT|quit)
	      exit;;
	  esac
	done
  else 
    echo '#--2.13.2 cfssl check ok'
  fi
	
echo '#--2.13.3 start config ca json in /tmp'
cd /tmp
cat > ca-config.json <<EOF
{
  "signing": {
	"default": {
	  "expiry": "87600h"
	},
	"profiles": {
	  "kubernetes": {
		"usages": [
			"signing",
			"key encipherment",
			"server auth",
			"client auth"
		],
		"expiry": "87600h"
	  }
	}
  }
}
EOF
cat>ca-csr.json << EOF
{
  "CN": "kubernetes",
  "hosts": [
	  "127.0.0.1",
	  "196.169.0.1",
	  "$HIP_E1",
	  "$HIP_E2",
	  "$HIP_E3",
	  "$HIP_MP",
	  "$HIP_MS",
	  "$HVIP_MP",
	  "$HIP_NF",
	  "$HIP_NS"
  ],
  "key": {
	"algo": "rsa",
	"size": 2048
  },
  "names": [
	{
	  "C": "CN",
	  "ST": "ShangHai",
	  "L": "PuDong",
	  "O": "k8s",
	  "OU": "System"
	}
  ],
	"ca": {
	   "expiry": "87600h"
	}
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
echo '#--2.13.4 cfssl gencert ca ok'
cat>kubernetes-csr.json <<EOF
{
	"CN": "kubernetes",
	"hosts": [
	  "127.0.0.1",
	  "196.169.0.1",
	  "$HIP_E1",
	  "$HIP_E2",
	  "$HIP_E3",
	  "$HIP_MP",
	  "$HIP_MS",
	  "$HVIP_MP",
	  "$HIP_NF",
	  "$HIP_NS",
	  "kubernetes",
	  "kubernetes.default",
	  "kubernetes.default.svc",
	  "kubernetes.default.svc.cluster",
	  "kubernetes.default.svc.cluster.local"
	],
	"key": {
		"algo": "rsa",
		"size": 2048
	},
	"names": [
		{
			"C": "CN",
			"ST": "ShangHai",
			"L": "PuDong",
			"O": "k8s",
			"OU": "System"
		}
	]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
echo '#--2.13.5 cfssl gencert kubernetes ok'
cat > $HNAME_NF.json <<EOF 
{
  "CN": "system:node:$HNAME_NF",
  "key": {
	"algo": "rsa",
	"size": 2048
  },
  "hosts": [
	 "$HNAME_NF",
	 "$HIP_NF",
	 "$HIP_MP",
	 "$HVIP_MP",
	 "$HNAME_MP",
	 "$HIP_MS",
	 "$HNAME_MS"
  ],
  "names": [
	{
	  "C": "China",
	  "L": "PuDong",
	  "O": "system:nodes",
	  "OU": "Kubernetes",
	  "ST": "ShangHai"
	}
  ]
}
EOF
cat > $HNAME_NS.json <<EOF 
{
  "CN": "system:node:$HNAME_NS",
  "key": {
	"algo": "rsa",
	"size": 2048
  },
  "hosts": [
	 "$HNAME_NS",
	 "$HIP_NS",
	 "$HIP_MP",
	 "$HVIP_MP",
	 "$HNAME_MP",
	 "$HIP_MS",
	 "$HNAME_MS"
  ],
  "names": [
	{
	  "C": "China",
	  "L": "PuDong",
	  "O": "system:nodes",
	  "OU": "Kubernetes",
	  "ST": "ShangHai"
	}
  ]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes $HNAME_NF.json | cfssljson -bare $HNAME_NF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes $HNAME_NS.json | cfssljson -bare $HNAME_NS
echo '#--2.13.6 cfssl gencert nodeN ok'
cat>admin-csr.json<<EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
	"algo": "rsa",
	"size": 2048
  },
  "names": [
	{
	  "C": "CN",
	  "ST": "ShangHai",
	  "L": "PuDong",
	  "O": "system:masters",
	  "OU": "System"
	}
  ]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
echo '#--2.13.7 cfssl gencert admin ok'
cat>kube-controller-manager-csr.json<<EOF
{
  "CN": "system:kube-controller-manager",
  "key": {
	"algo": "rsa",
	"size": 2048
  },
  "names": [
	{
	  "C": "CN",
	  "L": "PuDong",
	  "O": "system:kube-controller-manager",
	  "OU": "Kubernetes",
	  "ST": "ShangHai"
	}
  ]
}
EOF
cfssl gencert -ca=ca.pem  -ca-key=ca-key.pem -config=ca-config.json  -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
echo '#--2.13.8 cfssl gencert kube-controller-manager ok'
cat >kube-proxy-csr.json<<EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
	"algo": "rsa",
	"size": 2048
  },
  "names": [
	{
	  "C": "CN",
	  "ST": "ShangHai",
	  "L": "PuDong",
	  "O": "k8s",
	  "OU": "System"
	}
  ]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes  kube-proxy-csr.json | cfssljson -bare kube-proxy
echo '#--2.13.9 cfssl gencert kube-proxy ok'
cat >kube-scheduler-csr.json<<EOF
{
  "CN": "system:kube-scheduler",
  "key": {
	"algo": "rsa",
	"size": 2048
  },
  "names": [
	{
	  "C": "CN",
	  "L": "PuDong",
	  "O": "system:kube-scheduler",
	  "OU": "Kubernetes",
	  "ST": "ShangHai"
	}
  ]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
echo '#--2.13.10 cfssl gencert kube-scheduler ok'
cat >service-account-csr.json<<EOF
{
  "CN": "service-accounts",
  "key": {
	"algo": "rsa",
	"size": 2048
  },
  "names": [
	{
	  "C": "CN",
	  "L": "PuDong",
	  "O": "Kubernetes",
	  "OU": "Kubernetes",
	  "ST": "ShangHai"
	}
  ]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes service-account-csr.json | cfssljson -bare service-account
echo '#--2.13.11 cfssl gencert service-account ok'
mv /tmp/*.pem /etc/kubernetes/pki/
echo '#--2.13.12 mv *.pem to /etc/kubernetes/pki/'
cd /etc/kubernetes/pki/
cat > token.csv <<EOF
dd317d4ca9436b8d80d2e475c6c42639,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
echo '#--2.13.13 token.csv ok'

}

function config_userenv(){
    cd ~/.kube/
	kubectl config set-cluster k8s-ips --certificate-authority=/etc/kubernetes/pki/ca.pem \
	  --embed-certs=true --server=https://$HVIP_MP:6443 --kubeconfig=config
	kubectl config set-credentials admin \
	  --client-certificate=/etc/kubernetes/pki/admin.pem --client-key=/etc/kubernetes/pki/admin-key.pem --embed-certs=true --kubeconfig=config
	kubectl config set-context default \
	  --cluster=k8s-ips --user=admin --kubeconfig=config
	kubectl config use-context default --kubeconfig=config
	echo '#--3.1.1 kube/config ok'
}
#--3.config service
function config_etcd(){
cat>/usr/lib/systemd/system/etcd.service<<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos
[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
Restart=always
RestartSec=5s
LimitNOFILE=40000
TimeoutStartSec=0
ExecStart=/usr/bin/etcd --name etcd$2 --data-dir /var/lib/etcd \
  --initial-advertise-peer-urls https://$1:2380 --listen-peer-urls https://$1:2380 \
  --listen-client-urls https://$1:2379 --advertise-client-urls https://$1:2379 \
  --initial-cluster-token etcd-cluster --initial-cluster etcd${HNAME_E1}=https://$HIP_E1:2380,etcd${HNAME_E2}=https://$HIP_E2:2380,etcd${HNAME_E3}=https://$HIP_E3:2380 \
  --initial-cluster-state new --client-cert-auth --trusted-ca-file=/etc/kubernetes/pki/ca.pem \
  --cert-file=/etc/kubernetes/pki/kubernetes.pem --key-file=/etc/kubernetes/pki/kubernetes-key.pem \
  --peer-client-cert-auth --peer-trusted-ca-file=/etc/kubernetes/pki/ca.pem \
  --peer-cert-file=/etc/kubernetes/pki/kubernetes.pem  --peer-key-file=/etc/kubernetes/pki/kubernetes-key.pem

[Install]
WantedBy=multi-user.target
EOF
echo '#--4.2.1 config etcd service ok!start...'
systemctl daemon-reload
#--tempremark systemctl start etcd
echo '#--4.2.2 start etcd(skip)'
}


function config_master(){
cat>/usr/lib/systemd/system/kube-apiserver.service<<EOF
[Unit]
Description=Kubernetes API Service
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
After=etcd.service
[Service]
ExecStart=/usr/bin/kube-apiserver \
  --allow-privileged=true --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 \
  --audit-log-path=/var/log/audit.log --authorization-mode=Node,RBAC --bind-address=0.0.0.0 \
  --client-ca-file=/etc/kubernetes/pki/ca.pem \
  --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
  --enable-swagger-ui=true \
  --etcd-cafile=/etc/kubernetes/pki/ca.pem \
  --etcd-certfile=/etc/kubernetes/pki/kubernetes.pem \
  --etcd-keyfile=/etc/kubernetes/pki/kubernetes-key.pem \
  --etcd-servers=https://$HIP_E1:2379,https://$HIP_E2:2379,https://$HIP_E3:2379 \
  --event-ttl=1h \
  --insecure-bind-address=127.0.0.1 \
  --kubelet-certificate-authority=/etc/kubernetes/pki/ca.pem \
  --kubelet-client-certificate=/etc/kubernetes/pki/$2.pem \
  --kubelet-client-key=/etc/kubernetes/pki/$2-key.pem \
  --kubelet-https=true \
  --service-account-key-file=/etc/kubernetes/pki/service-account.pem \
  --service-cluster-ip-range=196.169.0.0/16 \
  --tls-cert-file=/etc/kubernetes/pki/kubernetes.pem \
  --tls-private-key-file=/etc/kubernetes/pki/kubernetes-key.pem \
  --logtostderr=false --log-dir=/var/log/kubernetes \
  --v=2
Restart=always
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
echo '#--5.9.1 config apiserver service ok'
systemctl daemon-reload
#--tempremark systemctl start kube-apiserver
echo '#--5.9.2 start kube-apiserver(skip)'
cat>/usr/lib/systemd/system/kube-controller-manager.service<<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/usr/bin/kube-controller-manager \
  --address=0.0.0.0 \
  --allocate-node-cidrs=true \
  --cluster-cidr=196.159.0.0/16 \
  --cluster-name=kubernetes \
  --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
  --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
  --kubeconfig=/etc/kubernetes/kube-controller-manager.config\
  --leader-elect=true \
  --root-ca-file=/etc/kubernetes/pki/ca.pem \
  --service-account-private-key-file=/etc/kubernetes/pki/service-account-key.pem \
  --service-cluster-ip-range=196.169.0.0/16 \
  --use-service-account-credentials=true \
  --logtostderr=false --log-dir=/var/log/kubernetes \
  --v=2
Restart=always
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
echo '#--5.9.3 config controller-manager service ok'
cd /etc/kubernetes/
kubectl config set-cluster k8s-ips \
  --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true \
  --server=https://$HVIP_MP:6443 --kubeconfig=kube-controller-manager.config
kubectl config set-credentials system:kube-controller-manager \
  --client-certificate=/etc/kubernetes/pki/kube-controller-manager.pem \
  --client-key=/etc/kubernetes/pki/kube-controller-manager-key.pem \
  --embed-certs=true --kubeconfig=kube-controller-manager.config
kubectl config set-context default \
  --cluster=k8s-ips --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.config
kubectl config use-context default --kubeconfig=kube-controller-manager.config
echo '#--5.9.4 controller-manager.config ok'
systemctl daemon-reload
#--tempremark systemctl start kube-controller-manager
echo '#--5.9.5 start kube-controller-manager(skip)'
kubectl config set-cluster k8s-ips \
  --certificate-authority=/etc/kubernetes/pki/ca.pem \
  --embed-certs=true --server=https://$HVIP_MP:6443 --kubeconfig=kube-scheduler.config
kubectl config set-credentials system:kube-scheduler \
  --client-certificate=/etc/kubernetes/pki/kube-scheduler.pem \
  --client-key=/etc/kubernetes/pki/kube-scheduler-key.pem \
  --embed-certs=true  --kubeconfig=kube-scheduler.config
kubectl config set-context default \
  --cluster=k8s-ips --user=system:kube-scheduler --kubeconfig=kube-scheduler.config
kubectl config use-context default --kubeconfig=kube-scheduler.config
echo '#--5.9.6 scheduler.config ok'
cat >kube-scheduler.yaml<<EOF
apiVersion: kubescheduler.config.k8s.io/v1alpha1
kind: KubeSchedulerConfiguration
clientConnection:
  kubeconfig: "/etc/kubernetes/kube-scheduler.config"
leaderElection:
  leaderElect: true
EOF
echo '#--5.9.7 scheduler.yaml ok'
cat>/usr/lib/systemd/system/kube-scheduler.service<<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/bin/kube-scheduler \
  --config=/etc/kubernetes/kube-scheduler.yaml \
  --logtostderr=false --log-dir=/var/log/kubernetes \
  --v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
echo '#--5.9.8 config scheduler.service ok'
systemctl daemon-reload
#--tempremark systemctl start kube-scheduler
echo '#--5.9.9 start kube-scheduler(skip)'
}

function config_kubelet(){
echo "#--6.8.1 docker pull images refer to flannel"
docker pull $IMAGE_PULL
kubectl config set-cluster k8s-ips \
  --certificate-authority=/etc/kubernetes/pki/ca.pem \
  --embed-certs=true --server=https://$HVIP_MP:6443 --kubeconfig=/etc/kubernetes/kubelet.config
kubectl config set-credentials system:node:$1 \
  --client-certificate=/etc/kubernetes/pki/$1.pem \
  --client-key=/etc/kubernetes/pki/$1-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/kubelet.config
kubectl config set-context default \
  --cluster=k8s-ips --user=system:node:$1 --kubeconfig=/etc/kubernetes/kubelet.config
kubectl config use-context default --kubeconfig=/etc/kubernetes/kubelet.config

echo '#--6.8.2 kubelet.config ok'
cat >/etc/kubernetes/kubelet.yaml <<EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
  webhook:
    enabled: true
  x509:
    clientCAFile: "/etc/kubernetes/pki/ca.pem"
authorization:
  mode: Webhook
clusterDomain: "cluster.local"
clusterDNS:
  - "196.169.0.10"
runtimeRequestTimeout: "15m"
tlsCertFile: "/etc/kubernetes/pki/${1}.pem"
tlsPrivateKeyFile: "/etc/kubernetes/pki/${1}-key.pem"
EOF
echo '#--6.8.3 kubelet.yaml ok'
cat>/usr/lib/systemd/system/kubelet.service<<EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service
[Service]
ExecStart=/usr/bin/kubelet \
  --config=/etc/kubernetes/kubelet.yaml \
  --image-pull-progress-deadline=2m \
  --kubeconfig=/etc/kubernetes/kubelet.config \
  --pod-infra-container-image=cargo.caicloud.io/caicloud/pause-amd64:3.1 \
  --network-plugin=cni \
  --register-node=true \
  --cni-conf-dir=/etc/cni/net.d \
  --cni-bin-dir=/opt/cni/bin \
  --logtostderr=false --log-dir=/var/log/kubernetes \
  --v=2
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
echo '#--6.8.4 config kubelet.service ok'
kubectl config set-cluster k8s-ips \
  --certificate-authority=/etc/kubernetes/pki/ca.pem \
  --embed-certs=true --server=https://$HVIP_MP:6443 --kubeconfig=/etc/kubernetes/kube-proxy.config
kubectl config set-credentials system:kube-proxy \
  --client-certificate=/etc/kubernetes/pki/kube-proxy.pem \
  --client-key=/etc/kubernetes/pki/kube-proxy-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/kube-proxy.config
kubectl config set-context default --cluster=k8s-ips --user=system:kube-proxy --kubeconfig=/etc/kubernetes/kube-proxy.config
kubectl config use-context default --kubeconfig=/etc/kubernetes/kube-proxy.config
echo '#--6.8.5 kube-proxy.config ok'
cat >/etc/kubernetes/kube-proxy-config.yaml<<EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
clientConnection:
  kubeconfig: "/etc/kubernetes/kube-proxy.config"
mode: "iptables"
clusterCIDR: "196.159.0.0/16"
EOF
echo '#--6.8.6 kube-proxy-config.yaml ok'
cat>/usr/lib/systemd/system/kube-proxy.service<<EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/bin/kube-proxy --logtostderr=false --log-dir=/var/log/kubernetes \
  --config=/etc/kubernetes/kube-proxy-config.yaml
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
echo "#--6.8.7 config kube-proxy.service ok"
systemctl daemon-reload
#--tempremark systemctl start kubelet kube-proxy
echo "#--6.8.8 start kubelet&kube-proxy(skip)"
echo "#--end--"
}

###########################################################
# bash entry
###########################################################
echo "#--1.2.2 bash parameter:$1"
case $1 in
  'etcd') 
    if [ `hostname` == $HNAME_E1 ]; then 
      config_etcd $HIP_E1 `hostname`
	fi
    if [ `hostname` == $HNAME_E2 ]; then 
      config_etcd $HIP_E2 `hostname`
	fi
	if [ `hostname` == $HNAME_E3 ]; then 
      config_etcd $HIP_E3 `hostname`
	fi
  ;;
  'master')
    if [ `hostname` == $HNAME_MP ]; then
      config_ca
      config_userenv
      ssh $HIP_MS 'mkdir -p /etc/kubernetes/pki/'
      scp /etc/kubernetes/pki/* $HIP_MS:/etc/kubernetes/pki/
      ssh $HIP_NF 'mkdir -p /etc/kubernetes/pki/'
      scp /etc/kubernetes/pki/* $HIP_NF:/etc/kubernetes/pki/
      ssh $HIP_NS 'mkdir -p /etc/kubernetes/pki/'
      scp /etc/kubernetes/pki/* $HIP_NS:/etc/kubernetes/pki/
	  config_master $HNAME_MP $HNAME_NF
	  
	  if [ $HIP_E1 == $HIP_MP ]; then
	     sh $0 'etcd'
	  else
	    scp $0 $HIP_E1:$PATH_SCRIPT
	    ssh $HIP_E1 "sh $0 'etcd'"
	  fi
	  scp $0 $HIP_E2:$PATH_SCRIPT
	  ssh $HIP_E2 "sh $0 'etcd'"
	  scp $0 $HIP_E3:$PATH_SCRIPT
	  ssh $HIP_E3 "sh $0 'etcd'"
	  scp $0 $HIP_MS:$PATH_SCRIPT
	  ssh $HIP_MS "sh $0 'master'"
	  scp $0 $HIP_NF:$PATH_SCRIPT
	  ssh $HIP_NF "sh $0 'worker'"
	  scp $0 $HIP_NS:$PATH_SCRIPT
	  ssh $HIP_NS "sh $0 'worker'"
	  exit
	fi
	if [ `hostname` == $HNAME_MS ]; then 
      config_userenv
	  config_master $HNAME_MS $HNAME_NS
	fi
  ;;
  'worker') 
    config_kubelet `hostname`
  ;;
esac
###########################################################
Logo

华为开发者空间,是为全球开发者打造的专属开发空间,汇聚了华为优质开发资源及工具,致力于让每一位开发者拥有一台云主机,基于华为根生态开发、创新。

更多推荐