K8S-day02

2. 安装部署私有仓库Harbor

#私有仓库下载地址
https://github.com/goharbor/harbor
#也可以上传tar包的方式
rz harbor-offline-installer-v1.8.3.tgz

#200(运维主机)安装私有仓库
[root@gcc-200 ~]$  wget https://storage.googleapis.com/harbor-releases/release-1.8.0/harbor-offline-installer-v1.8.3.tgz

[root@gcc-200 ~]$  ll
total 566432
-rw-------. 1 root root      1258 Jun 19  2019 anaconda-ks.cfg
-rw-r--r--  1 root root 580021898 Sep 18  2019 harbor-offline-installer-v1.8.3.tgz

[root@gcc-200 ~]$   tar xf harbor-offline-installer-v1.8.3.tgz   -C /opt
[root@gcc-200 ~]$   mv /opt/harbor  /opt/harbor-v1.8.3
[root@gcc-200 ~]$   ln -s /opt/harbor-v1.8.3 /opt/harbor       

#编辑harbor的yml文件(切换到harbor目录下编辑)
[root@gcc-200 /opt/harbor]$  vim harbor.yml
hostname: harbor.od.com
  port: 180 
harbor_admin_password: 12345
data_volume: /data/harbor
  location: /data/harbor/logs

#根据以上配置文件创建相关性目录
[root@gcc-200 /opt/harbor]$  mkdir -p  /data/harbor/logs

#下载安装docker-compose(因为创建harbor需要compose管理)
[root@gcc-200 /opt/harbor]$  yum install -y docker-compose
[root@gcc-200 /opt/harbor]$ ./install.sh

[root@gcc-200 /opt/harbor]$  docker-compose ps
      Name                     Command               State             Ports          
--------------------------------------------------------------------------------------
harbor-core         /harbor/start.sh                 Up                               
harbor-db           /entrypoint.sh postgres          Up      5432/tcp                 
harbor-jobservice   /harbor/start.sh                 Up                               
............................. 

[root@gcc-200 /opt/harbor]$  docker ps -a
CONTAINER ID        IMAGE                                               COMMAND                  CREATED             STATUS                        PORTS                       NAMES
391b78368217        goharbor/nginx-photon:v1.8.3                        "nginx -g 'daemon of…"   2 minutes ago       Up About a minute (healthy)   0.0.0.0:180->80/tcp         nginx
203bfb8f592d        goharbor/harbor-portal:v1.8.3                       "nginx -g 'daemon of…"   2 minutes ago       Up 2 minutes (healthy)        80/tcp                      harbor-portal
f590fd43bc77        goharbor/harbor-jobservice:v1.8.3                   "/harbor/start.sh"       2 ...............................



#安装部署nginx(此处使用yum下载的nginx来做一个代理后端的harbor仓库地址,方便管理)
[root@gcc-200 /opt/harbor]$  yum install -y nginx

[root@gcc-200 /opt/harbor]$  cat /etc/nginx/conf.d/harbor.od.com.conf
server {
    listen 80;
    server_name harbor.od.com;
    client_max_body_size 1000m;
    location / {
        proxy_pass http://127.0.0.1:180;
    }
}

[root@gcc-200 /opt/harbor]$  nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
[root@gcc-200 /opt/harbor]$  systemctl  restart nginx
[root@gcc-200 /opt/harbor]$  systemctl  enable  nginx

#配置DNS解析(在部署了私有dns的机器上面添加最后一行)
[root@gcc-11 ~]$  cat /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600    ; 10 minutes
@           IN SOA    dns.od.com. dnsadmin.od.com. (
                2019060402 ; serial
                10800      ; refresh (3 hours)
                900        ; retry (15 minutes)
                604800     ; expire (1 week)
                86400      ; minimum (1 day)
                )
                NS   dns.od.com.
$TTL 60    ; 1 minute
dns                A    10.0.0.11
harbor             A    10.0.0.200

#注意重启named服务

#浏览器访问
harbor.od.com  
admin    12345

3. 从私有仓库上传和下载镜像

#从官方仓库下载一个nginx的镜像
[root@gcc-200 /opt/harbor]$  docker pull nginx:1.18.0

#对下载下来的镜像进行tag,改为私有仓库镜像格式的例子(harbor.od.com/public/xxxxx)
[root@gcc-200 ~]$  docker  tag  741d47c34fe0  harbor.od.com/public/nginx:v1.18.0

#将上面tag过的镜像推送到我们的私有仓库中
[root@gcc-200 ~]$  docker push harbor.od.com/public/nginx:v1.18.0
The push refers to repository [harbor.od.com/public/nginx]
3c445cf708a5: Preparing 
3e1e3bb78a57: Preparing 
91776dace4ca: Preparing 
ffc9b21953f4: Preparing 
#因为没有登录,被拒绝上传
denied: requested access to the resource is denied

#进行登录
[root@gcc-200 ~]$  docker login harbor.od.com
Username: admin
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
Login Succeeded

#再次推送成功
[root@gcc-200 ~]$  docker push harbor.od.com/public/nginx:v1.18.0
The push refers to repository [harbor.od.com/public/nginx]
3c445cf708a5: Pushed 
3e1e3bb78a57: Pushed 
91776dace4ca: Pushed 
ffc9b21953f4: Pushed 
v1.18.0: digest: sha256:637488545a21a1ff771549ef65f5e3c1a8dbd92c98d360ac489d76b857021a55 size: 1155

#拉去我们之前定制的镜像备用
[root@gcc-200 ~]$  docker pull 335317638/nginx:1.18.0-curl
1.18.0-curl: Pulling from qls123/nginx
afb6ec6fdc1c: Already exists 
2e231683bfde: Already exists 
511e2efefada: Already exists 
e8fd0ec105c9: Already exists 
ca64879d5edd: Pull complete 
Digest: sha256:3ce416551486669167f4e9e02e9c297f209c94c4c3c1cc952ee0cc89f8a12f80
Status: Downloaded newer image for qls123/nginx:1.18.0-curl
docker.io/qls123/nginx:1.18.0-curl

#将刚刚拉去的镜像打tag,格式为私有仓库的,然后进行上传
[root@gcc-200 ~]$  docker images | grep curl
qls123/nginx                    1.18.0-curl                eb440e397100        2 days ago          153MB
[root@gcc-200 ~]$  docker tag eb440e397100  harbor.od.com/public/nginx:v1.18.0-curl
[root@gcc-200 ~]$  docker push harbor.od.com/public/nginx:v1.18.0-curl
The push refers to repository [harbor.od.com/public/nginx]
17cf1ea49bfe: Pushed 
3c445cf708a5: Layer already exists 
3e1e3bb78a57: Layer already exists 
91776dace4ca: Layer already exists 
ffc9b21953f4: Layer already exists 
v1.18.0-curl: digest: sha256:3ce416551486669167f4e9e02e9c297f209c94c4c3c1cc952ee0cc89f8a12f80 size: 1367

#其他主机测试是否可以下载镜像(下载后不要删除)
[root@gcc-21 ~]$  docker pull harbor.od.com/public/nginx:v1.18.0
v1.18.0: Pulling from public/nginx
afb6ec6fdc1c: Pull complete 
2e231683bfde: Pull complete 
511e2efefada: Pull complete 
e8fd0ec105c9: Pull complete 
Digest: sha256:637488545a21a1ff771549ef65f5e3c1a8dbd92c98d360ac489d76b857021a55
Status: Downloaded newer image for harbor.od.com/public/nginx:v1.18.0
harbor.od.com/public/nginx:v1.18.0

[root@gcc-21 ~]$  docker images
REPOSITORY                   TAG                 IMAGE ID            CREATED             SIZE
harbor.od.com/public/nginx   v1.18.0             741d47c34fe0        2 weeks ago         132MB

4. 安装部署主节点Etcd

gcc-12         lead   
gcc-21          follow
gcc-22          follow


#200(运维主机)上面操作 配置证书 
[root@gcc-200 /opt/certs]$  vi  ca-config.json
{
    "signing": {
        "default": {
            "expiry": "175200h"
        },
        "profiles": {
            "server": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth"
                ]
            },
            "client": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "client auth"
                ]
            },
            "peer": {
                "expiry": "175200h",
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ]
            }
        }
    }
}


#创建生成证书签名请求(csr)的JSON配置文件
[root@gcc-200 /opt/certs]$   cat etcd-peer-csr.json
{
    "CN": "k8s-etcd",
    "hosts": [
        "10.0.0.11",
        "10.0.0.12",
        "10.0.0.21",
        "10.0.0.22"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "shanghai",
            "L": "shanghai",
            "O": "od",
            "OU": "ops"
        }
    ]
}


#生成etcd证书和私钥
[root@gcc-200 /opt/certs]$  cfssl  gencert  -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer  etcd-peer-csr.json | cfssl-json  -bare  etcd-peer
2020/06/05 10:42:09 [INFO] generate received request
2020/06/05 10:42:09 [INFO] received CSR
2020/06/05 10:42:09 [INFO] generating key: rsa-2048
2020/06/05 10:42:09 [INFO] encoded CSR
2020/06/05 10:42:09 [INFO] signed certificate with serial number 40664855453404084274231429057985768039050703509
2020/06/05 10:42:09 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

[root@gcc-200 /opt/certs]$  ll
total 36
-rw-r--r-- 1 root root  837 Jun  5 10:36 ca-config.json
-rw-r--r-- 1 root root  330 Jun  4 20:23 ca-csr.json
-rw------- 1 root root 1679 Jun  4 20:24 ca-key.pem
-rw-r--r-- 1 root root  997 Jun  4 20:24 ca.csr
-rw-r--r-- 1 root root 1350 Jun  4 20:24 ca.pem
-rw-r--r-- 1 root root  365 Jun  5 10:38 etcd-peer-csr.json
-rw------- 1 root root 1679 Jun  5 10:42 etcd-peer-key.pem
-rw-r--r-- 1 root root 1066 Jun  5 10:42 etcd-peer.csr
-rw-r--r-- 1 root root 1432 Jun  5 10:42 etcd-peer.pem



#(架构中etcd所有机器)操作 
[root@gcc-12 ~]$ useradd -s /sbin/nologin -M etcd 
[root@gcc-12 ~]$ id etcd
uid=1000(etcd) gid=1000(etcd) groups=1000(etcd)

#etcd数据库下载地址(架构中etcd所有机器)
https://github.com/etcd-io/etcd/
[root@gcc-12 ~]$ wget https://github.com/etcd-io/etcd/releases/download/v3.1.20/etcd-v3.1.20-linux-amd64.tar.gz

#解压安装创建软连接(架构中etcd所有机器)
[root@gcc-12 ~]$ tar xf etcd-v3.1.20-linux-amd64.tar.gz  -C /opt/
[root@gcc-12 ~]$ ll /opt/
total 0
drwxr-xr-x 3 478493 89939 123 Oct 11  2018 etcd-v3.1.20-linux-amd64
[root@gcc-12 ~]$ ln -s /opt/etcd-v3.1.20-linux-amd64/  /opt/etcd
[root@gcc-12 ~]$ ll /opt/
total 0
lrwxrwxrwx 1 root   root   30 Jun  5 10:49 etcd -> /opt/etcd-v3.1.20-linux-amd64/
drwxr-xr-x 3 478493 89939 123 Oct 11  2018 etcd-v3.1.20-linux-amd64

#(架构中etcd所有机器)
[root@gcc-12 ~]$ cd /opt/etcd
[root@gcc-12 /opt/etcd]$ mkdir certs
[root@gcc-12 /opt/etcd]$ cd certs
[root@gcc-12 /opt/etcd/certs]$ scp -rp gcc-200:/opt/certs/etcd-peer-key.pem  ./
[root@gcc-12 /opt/etcd/certs]$ scp -rp gcc-200:/opt/certs/etcd-peer.pem  ./
[root@gcc-12 /opt/etcd/certs]$ scp -rp gcc-200:/opt/certs/ca.pem  ./
[root@gcc-12 /opt/etcd/certs]$ ll
total 12
-rw-r--r-- 1 root root 1350 Jun  4 20:24 ca.pem
-rw------- 1 root root 1679 Jun  5 10:42 etcd-peer-key.pem
-rw-r--r-- 1 root root 1432 Jun  5 10:42 etcd-peer.pem

#(架构中etcd所有机器)
[root@gcc-12 /opt/etcd/certs]$  mkdir -p  /data/etcd  /data/logs/etcd-server  && chown -R  etcd.etcd  /opt/etcd/certs/  /data/etcd  /data/logs/etcd-server



#编写etcd服务启动脚本(架构中etcd所有机器)
[root@gcc-12 /opt/etcd/certs]$ cat /opt/etcd/etcd-server-startup.sh

#!/bin/sh
#下面的etcd-server-()-(),括号中填写的是本机ip的后两位,如10.0.0.12则括号填写7和12
./etcd --name etcd-server-7-12 \
       --data-dir /data/etcd/etcd-server \
       #下面的ip地址要对应本机的ip
       --listen-peer-urls https://10.0.0.12:2380 \
       #下面第一个ip地址要对应本机的ip
       --listen-client-urls https://10.0.0.12:2379,http://127.0.0.1:2379 \
       --quota-backend-bytes 8000000000 \
       #下面的两行ip地址要对应本机的ip
       --initial-advertise-peer-urls https://10.0.0.12:2380 \
       --advertise-client-urls https://10.0.0.12:2379,http://127.0.0.1:2379 \
       #此处的server后面的数字跟第一行同理,,ip地址分别是相对应的etcd地址ip
       --initial-cluster  etcd-server-7-12=https://10.0.0.12:2380,etcd-server-7-21=https://10.0.0.21:2380,etcd-server-7-22=https://10.0.0.22:2380 \
       --ca-file ./certs/ca.pem \
       --cert-file ./certs/etcd-peer.pem \
       --key-file ./certs/etcd-peer-key.pem \
       --client-cert-auth  \
       --trusted-ca-file ./certs/ca.pem \
       --peer-ca-file ./certs/ca.pem \
       --peer-cert-file ./certs/etcd-peer.pem \
       --peer-key-file ./certs/etcd-peer-key.pem \
       --peer-client-cert-auth \
       --peer-trusted-ca-file ./certs/ca.pem \
       --log-output stdout

 [root@gcc-12 /opt/etcd]$  chmod +x etcd-server-startup.sh


#下载安装supervisor(架构中etcd所有机器)
[root@gcc-12 /opt/etcd]$  yum install -y supervisor  -y 
[root@gcc-12 /opt/etcd]$  systemctl  start supervisord.service && systemctl  enable  supervisord.service 

#创建etcd的启动配置(此文件只需要改动第一行)(架构中etcd所有机器)
[root@gcc-12 /opt/etcd/certs]$  vim /etc/supervisord.d/etcd-server.ini
#注意如下server处同理
[program:etcd-server-0-12]
command=/opt/etcd/etcd-server-startup.sh                        ; the program (relative uses PATH, can take args)
numprocs=1                                                      ; number of processes copies to start (def 1)
directory=/opt/etcd                                             ; directory to cwd to before exec (def no cwd)
autostart=true                                                  ; start at supervisord start (default: true)
autorestart=true                                                ; retstart at unexpected quit (default: true)
startsecs=30                                                    ; number of secs prog must stay running (def. 1)
startretries=3                                                  ; max # of serial start failures (default 3)
exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
user=etcd                                                       ; setuid to this UNIX account to run the program
redirect_stderr=true                                            ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/etcd-server/etcd.stdout.log           ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                     ; emit events on stdout writes (default false)


#启动服务并检查(架构中etcd所有机器)
[root@gcc-12 /opt/etcd]$ supervisorctl  update
etcd-server-7-12: added process group
[root@gcc-12 /opt/etcd]$ supervisorctl  status
etcd-server-7-12                 RUNNING   pid 9724, uptime 0:00:31

#(架构中etcd所有机器)均可测试
[root@gcc-12 /opt/etcd]$ ./etcdctl   cluster-health
member 988139385f78284 is healthy: got healthy result from http://127.0.0.1:2379
member 5a0ef2a004fc4349 is healthy: got healthy result from http://127.0.0.1:2379
member f4a0cb0a765574a8 is healthy: got healthy result from http://127.0.0.1:2379
cluster is healthy

#(架构中etcd所有机器)均可测试,最后面为true的就是主库
[root@gcc-12 /opt/etcd]$ ./etcdctl  member  list   
988139385f78284: name=etcd-server-7-22 peerURLs=https://10.0.0.22:2380 clientURLs=http://127.0.0.1:2379,https://10.0.0.22:2379 isLeader=false
5a0ef2a004fc4349: name=etcd-server-7-21 peerURLs=https://10.0.0.21:2380 clientURLs=http://127.0.0.1:2379,https://10.0.0.21:2379 isLeader=false
f4a0cb0a765574a8: name=etcd-server-7-12 peerURLs=https://10.0.0.12:2380 clientURLs=http://127.0.0.1:2379,https://10.0.0.12:2379 isLeader=true

#(架构中etcd所有机器)均可测
[root@gcc-12 /opt/etcd]$ netstat  -lntp 
tcp        0      0 10.0.0.12:2379          0.0.0.0:*               LISTEN      9725/./etcd         
tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      9725/./etcd         
tcp        0      0 10.0.0.12:2380          0.0.0.0:*               LISTEN      9725/./etcd

5. 安装部署主节点Apiserver

#此处步骤规划为安装21,22主机上,如果有条件是可以单独部署的server主节点,此处我们部署在node节点,也就是server和node在同一个节点----然后在11,12(代理)主机配置4层负载


#部署安装apiserver(架构中规划的所有server主节点都要操作)
wget https://dl.k8s.io/v1.15.12/kubernetes-server-linux-amd64.tar.gz
[root@gcc-22 /opt/etcd/certs]$ wget https://dl.k8s.io/v1.15.12/kubernetes-server-linux-amd64.tar.gz

#解压创建软连接(架构中规划的所有server主节点都要操作)
[root@gcc-21 ~]$ tar xf kubernetes-server-linux-amd64.tar.gz  -C  /opt && mv /opt/kubernetes  /opt/kubernetes-v1.15.12   && ln -s /opt/kubernetes-v1.15.12 /opt/kubernetes 

#下面的操作是删除了一些可以不留着的文件,可选操作(架构中规划的所有server主节点都要操作)
[root@gcc-21 ~]$ cd /opt/kubernetes  && rm -f kubernetes-src.tar.gz  &&  cd server/bin/ && rm -f *_tag   && rm -f *.tar

#签发证书  
#200(运维)主机  
[root@gcc-200 /opt/certs]$ vi client-csr.json
{
    "CN": "k8s-node",
    "hosts": [
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "shanghai",
            "L": "shangahi",
            "O": "od",
            "OU": "ops"
        }
    ]
}

#生成证书 私钥文件
[root@gcc-200 /opt/certs]$ cfssl gencert  -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client-csr.json | cfssl-json  -bare client
2020/06/05 12:08:47 [INFO] generate received request
2020/06/05 12:08:47 [INFO] received CSR
2020/06/05 12:08:47 [INFO] generating key: rsa-2048
2020/06/05 12:08:49 [INFO] encoded CSR
2020/06/05 12:08:49 [INFO] signed certificate with serial number 708081291377090734211766588824251935698712311788
2020/06/05 12:08:49 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@gcc-200 /opt/certs]$ ll | grep client
-rw-r--r-- 1 root root  282 Jun  5 12:06 client-csr.json
-rw------- 1 root root 1679 Jun  5 12:08 client-key.pem
-rw-r--r-- 1 root root  997 Jun  5 12:08 client.csr
-rw-r--r-- 1 root root 1371 Jun  5 12:08 client.pem


#签发kube-apiserver证书
[root@gcc-200 /opt/certs]$  vim apiserver-csr.json
{
    "CN": "k8s-apiserver",
    "hosts": [
        "127.0.0.1",
        "192.168.0.1",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local",
        #下面这个ip是代理主机上的vip
        "10.0.0.10",
        #下面两个就是当前使用的k8s server  ip
        "10.0.0.21",
        "10.0.0.22",
        #下面这个是预留的主节点k8s server  ip   ,防止后期增加节点
        "10.0.0.23"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "shanghai",
            "L": "shangahi",
            "O": "od",
            "OU": "ops"
        }
    ]
}


#生成证书和私钥
[root@gcc-200 /opt/certs]$ cfssl gencert -ca=ca.pem  -ca-key=ca-key.pem  -config=ca-config.json  -profile=server  apiserver-csr.json| cfssl-json -bare apiserver
2020/06/05 12:16:11 [INFO] generate received request
2020/06/05 12:16:11 [INFO] received CSR
2020/06/05 12:16:11 [INFO] generating key: rsa-2048
2020/06/05 12:16:12 [INFO] encoded CSR
2020/06/05 12:16:12 [INFO] signed certificate with serial number 277481596778287505893241360711745472937951359932
2020/06/05 12:16:12 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@gcc-200 /opt/certs]$ ll | grep apiserver
-rw-r--r-- 1 root root  568 Jun  5 12:12 apiserver-csr.json
-rw------- 1 root root 1679 Jun  5 12:16 apiserver-key.pem
-rw-r--r-- 1 root root 1253 Jun  5 12:16 apiserver.csr
-rw-r--r-- 1 root root 1602 Jun  5 12:16 apiserver.pem


#21主机  22主机操作(架构中规划的所有server主节点都要操作) 

#cd到/kubernetes/server/bin下面创建证书目录和配置文件目录(架构中规划的所有server主节点都要操作)
[root@gcc-21 /opt/kubernetes/server/bin]$ mkdir -p  certs   conf

#cd到证书目录之后scp运维主机上刚刚签发的一些证书(架构中规划的所有server主节点都要操作)
[root@gcc-21 /opt/kubernetes/server/bin]$ cd certs/
[root@gcc-21 /opt/kubernetes/server/bin/certs]$ scp -rp gcc-200:/opt/certs/apiserver-key.pem  ./  
[root@gcc-21 /opt/kubernetes/server/bin/certs]$ scp -rp gcc-200:/opt/certs/apiserver.pem  ./    
[root@gcc-21 /opt/kubernetes/server/bin/certs]$ scp -rp gcc-200:/opt/certs/ca.pem  ./    
[root@gcc-21 /opt/kubernetes/server/bin/certs]$ scp -rp gcc-200:/opt/certs/ca-key.pem  ./  
[root@gcc-21 /opt/kubernetes/server/bin/certs]$ scp -rp gcc-200:/opt/certs/client-key.pem  ./  
[root@gcc-21 /opt/kubernetes/server/bin/certs]$ scp -rp gcc-200:/opt/certs/client.pem  ./
之后目录内文件如下
-rw------- 1 root root 1679 Jun  5 12:16 apiserver-key.pem
-rw-r--r-- 1 root root 1602 Jun  5 12:16 apiserver.pem
-rw------- 1 root root 1679 Jun  4 20:24 ca-key.pem
-rw-r--r-- 1 root root 1350 Jun  4 20:24 ca.pem
-rw------- 1 root root 1679 Jun  5 12:08 client-key.pem
-rw-r--r-- 1 root root 1371 Jun  5 12:08 client.pem


#cd到刚刚创建的conf目录创建配置 (架构中规划的所有server主节点都要操作)
[root@gcc1-21 /opt/kubernetes/server/bin/conf]$  vi audit.yaml
apiVersion: audit.k8s.io/v1beta1 # This is required.
kind: Policy
# Don't generate audit events for all requests in RequestReceived stage.
omitStages:
  - "RequestReceived"
rules:
  # Log pod changes at RequestResponse level
  - level: RequestResponse
    resources:
    - group: ""
      # Resource "pods" doesn't match requests to any subresource of pods,
      # which is consistent with the RBAC policy.
      resources: ["pods"]
  # Log "pods/log", "pods/status" at Metadata level
  - level: Metadata
    resources:
    - group: ""
      resources: ["pods/log", "pods/status"]

  # Don't log requests to a configmap called "controller-leader"
  - level: None
    resources:
    - group: ""
      resources: ["configmaps"]
      resourceNames: ["controller-leader"]

  # Don't log watch requests by the "system:kube-proxy" on endpoints or services
  - level: None
    users: ["system:kube-proxy"]
    verbs: ["watch"]
    resources:
    - group: "" # core API group
      resources: ["endpoints", "services"]

  # Don't log authenticated requests to certain non-resource URL paths.
  - level: None
    userGroups: ["system:authenticated"]
    nonResourceURLs:
    - "/api*" # Wildcard matching.
    - "/version"

  # Log the request body of configmap changes in kube-system.
  - level: Request
    resources:
    - group: "" # core API group
      resources: ["configmaps"]
    # This rule only applies to resources in the "kube-system" namespace.
    # The empty string "" can be used to select non-namespaced resources.
    namespaces: ["kube-system"]

  # Log configmap and secret changes in all other namespaces at the Metadata level.
  - level: Metadata
    resources:
    - group: "" # core API group
      resources: ["secrets", "configmaps"]

  # Log all other resources in core and extensions at the Request level.
  - level: Request
    resources:
    - group: "" # core API group
    - group: "extensions" # Version of group should NOT be included.

  # A catch-all rule to log all other requests at the Metadata level.
  - level: Metadata
    # Long-running requests like watches that fall under this rule will not
    # generate an audit event in RequestReceived.
    omitStages:
      - "RequestReceived"


#创建启动脚本(架构中规划的所有server主节点都要操作)
cd ../
[root@gcc-21 /opt/kubernetes/server/bin]$  vim kube-apiserver.sh  
#!/bin/bash
./kube-apiserver \
  --apiserver-count 2 \
  --audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log \
  --audit-policy-file ./conf/audit.yaml \
  --authorization-mode RBAC \
  --client-ca-file ./certs/ca.pem \
  --requestheader-client-ca-file ./certs/ca.pem \
  --enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
  --etcd-cafile ./certs/ca.pem \
  --etcd-certfile ./certs/client.pem \
  --etcd-keyfile ./certs/client-key.pem \
  #注意下面的ip地址要改,端口不要改是etcd的ip
  --etcd-servers https://10.0.0.12:2379,https://10.0.0.21:2379,https://10.0.0.22:2379 \
  --service-account-key-file ./certs/ca-key.pem \
  #此处为services网络,不用更改
  --service-cluster-ip-range 192.168.0.0/16 \
  --service-node-port-range 3000-29999 \
  --target-ram-mb=1024 \
  --kubelet-client-certificate ./certs/client.pem \
  --kubelet-client-key ./certs/client-key.pem \
  --log-dir  /data/logs/kubernetes/kube-apiserver \
  --tls-cert-file ./certs/apiserver.pem \
  --tls-private-key-file ./certs/apiserver-key.pem \
  --v 2

[root@gcc-21 /opt/kubernetes/server/bin]$ chmod +x kube-apiserver.sh



#创建supervisor配置(架构中规划的所有server主节点都要操作)
[root@gcc-21 /opt/kubernetes/server/bin]$ mkdir -p /data/logs/kubernetes/kube-apiserver
[root@gcc-21 /opt/kubernetes/server/bin]$ cat /etc/supervisord.d/kube-apiserver.ini
#下面注意
[program:kube-apiserver-0-21]
command=/opt/kubernetes/server/bin/kube-apiserver.sh            ; the program (relative uses PATH, can take args)
numprocs=1                                                      ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                            ; directory to cwd to before exec (def no cwd)
autostart=true                                                  ; start at supervisord start (default: true)
autorestart=true                                                ; retstart at unexpected quit (default: true)
startsecs=30                                                    ; number of secs prog must stay running (def. 1)
startretries=3                                                  ; max # of serial start failures (default 3)
exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                       ; setuid to this UNIX account to run the program
redirect_stderr=true                                            ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log        ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                     ; emit events on stdout writes (default false)

#(架构中规划的所有server主节点都要操作)
[root@gcc-21 /opt/kubernetes/server/bin]$ supervisorctl  update
kube-apiserver-7-21: added process group
[root@gcc-21 /opt/kubernetes/server/bin]$ supervisorctl  status
etcd-server-7-21                 RUNNING   pid 12144, uptime 0:53:24
kube-apiserver-7-21              STARTING  
[root@gcc-21 /opt/kubernetes/server/bin]$ supervisorctl  status
etcd-server-7-21                 RUNNING   pid 12144, uptime 0:53:26
kube-apiserver-7-21              RUNNING   pid 12340, uptime 0:00:30

6. 安装部署4层反向代理及高可用

#下面是所有的代理机器全部做,注意ip地址可能需要更改
[root@gcc-11 ~]$ yum install -y nginx
[root@gcc-11 ~]$ vim /etc/nginx/nginx.conf

#放在最后面 
stream {
    upstream kube-apiserver {
        server 10.0.0.21:6443  max_fails=3 fail_timeout=30s;
        server 10.0.0.22:6443  max_fails=3 fail_timeout=30s;
    }
    server {
        listen       7443;
        proxy_connect_timeout 2s;
        proxy_timeout 900s;
        proxy_pass kube-apiserver;
    }
}

[root@gcc-11 ~]$ nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
[root@gcc-11 ~]$ systemctl  start nginx
[root@gcc-11 ~]$ systemctl  enable nginx


#安装部署高可用
[root@gcc-11 ~]$ yum install -y keepalived

[root@gcc-11 ~]$ cat /etc/keepalived/check_port.sh
#keepalived 监控端口脚本
#使用方法:
#在keepalived的配置文件中
#vrrp_script check_port {#创建一个vrrp_script脚本,检查配置
#    script "/etc/keepalived/check_port.sh 6379" #配置监听的端口
#    interval 2 #检查脚本的频率,单位(秒)
#}
#!/bin/bash
CHK_PORT=$1
if [ -n "$CHK_PORT" ];then
        PORT_PROCESS=`ss -lnt|grep $CHK_PORT|wc -l`
        if [ $PORT_PROCESS -eq 0 ];then
                echo "Port $CHK_PORT Is Not Used,End."
                exit 1
        fi
else
        echo "Check Port Cant Be Empty!"
fi

[root@gcc-11 ~]$ chmod +x /etc/keepalived/check_port.sh


#配置Keepalived配置文件
#配置主从
#keepalived 主:
[root@gcc-11 ~]$ vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   router_id 10.0.0.11

}

vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 7443"
    interval 2
    weight -20
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 251
    priority 100
    advert_int 1
    mcast_src_ip 10.0.0.11
    nopreempt

    authentication {
        auth_type PASS
        auth_pass 11111111
    }
    track_script {
         chk_nginx
    }
    virtual_ipaddress {
        10.0.0.10
    }
}

#keepalived从:
[root@gcc0-12 ~]$ cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
    router_id 10.0.0.12
}
vrrp_script chk_nginx {
    script "/etc/keepalived/check_port.sh 7443"
    interval 2
    weight -20
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 251
    mcast_src_ip 10.0.0.12
    priority 90
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 11111111
    }
    track_script {
        chk_nginx
    }
    virtual_ipaddress {
        10.0.0.10
    }
}

#启动
[root@gcc-11 ~]$ systemctl start keepalived.service  
[root@gcc-11 ~]$ systemctl enable keepalived.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@gcc-11 ~]$ ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:15:5d:32:c5:01 brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.11/24 brd 10.0.0.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet 10.0.0.10/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::215:5dff:fe32:c501/64 scope link 
       valid_lft forever preferred_lft forever

7. 安装部署Controller-manager

#安装  21  22   (架构中规划的所有server主节点都要操作)
#准备启动脚本
#创建启动脚本
[root@gcc0-21 ~]$  vim /opt/kubernetes/server/bin/kube-controller-manager.sh
#!/bin/sh
./kube-controller-manager \
  --cluster-cidr 172.7.0.0/16 \
  --leader-elect true \
  --log-dir /data/logs/kubernetes/kube-controller-manager \
  --master http://127.0.0.1:8080 \
  --service-account-private-key-file ./certs/ca-key.pem \
  --service-cluster-ip-range 192.168.0.0/16 \
  --root-ca-file ./certs/ca.pem \
  --v 2


[root@gcc0-21 ~]$ chmod +x /opt/kubernetes/server/bin/kube-controller-manager.sh


#创建supervisor配置 (架构中规划的所有server主节点都要操作)
[root@gcc0-21 ~]$  mkdir -p /data/logs/kubernetes/kube-controller-manager
[root@gcc0-21 ~]$  vim  /etc/supervisord.d/kube-conntroller-manager.ini
#注意下面的server
[program:kube-controller-manager-0-21]
command=/opt/kubernetes/server/bin/kube-controller-manager.sh                     ; the program (relative uses PATH, can take args)
numprocs=1                                                                        ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                                              ; directory to cwd to before exec (def no cwd)
autostart=true                                                                    ; start at supervisord start (default: true)
autorestart=true                                                                  ; retstart at unexpected quit (default: true)
startsecs=22                                                                      ; number of secs prog must stay running (def. 1)
startretries=3                                                                    ; max # of serial start failures (default 3)
exitcodes=0,2                                                                     ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                                   ; signal used to kill process (default TERM)
stopwaitsecs=10                                                                   ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                                         ; setuid to this UNIX account to run the program
redirect_stderr=false                                                             ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controll.stdout.log  ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                                      ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                                          ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                                       ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                                       ; emit events on stdout writes (default false)
stderr_logfile=/data/logs/kubernetes/kube-controller-manager/controll.stderr.log  ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB                                                      ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4                                                          ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB                                                       ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false                                                       ; emit events on stderr writes (default false)

#(架构中规划的所有server主节点都要操作)
[root@gcc-21 /opt/kubernetes/server/bin]$ supervisorctl  update
kube-controller-manager-7-21: added process group
[root@gcc-21 /opt/kubernetes/server/bin]$ supervisorctl  status
etcd-server-7-21                 RUNNING   pid 12144, uptime 3:44:38
kube-apiserver-7-21              RUNNING   pid 12340, uptime 2:51:42
kube-controller-manager-7-21     RUNNING   pid 12685, uptime 0:00:45
[root@gcc-22 /opt/kubernetes/server/bin]$ supervisorctl  update
kube-controller-manager-7-22: added process group
[root@gcc-22 /opt/kubernetes/server/bin]$ supervisorctl  status
etcd-server-7-22                 RUNNING   pid 11812, uptime 3:38:10
kube-apiserver-7-22              RUNNING   pid 11954, uptime 2:46:47
kube-controller-manager-7-22     RUNNING   pid 12298, uptime 0:00:34

8. 安装部署Kube-scheduler

#在21  22主机上面配置  (架构中规划的所有server主节点都要操作)
#创建启动脚本 (注意每一句后面的\符号后面不能跟空格)
[root@gcc-21 ~]$ cat /opt/kubernetes/server/bin/kube-scheduler.sh
#!/bin/sh
./kube-scheduler \
  --leader-elect  \
  --log-dir /data/logs/kubernetes/kube-scheduler \
  --master http://127.0.0.1:8080 \
  --v 2

[root@gcc0-21 ~]$ chmod +x /opt/kubernetes/server/bin/kube-scheduler.sh

#创建supervisor配置(架构中规划的所有server主节点都要操作)
[root@gcc0-21 ~]$  mkdir -p /data/logs/kubernetes/kube-scheduler
[root@gcc0-21 ~]$  vim /etc/supervisord.d/kube-scheduler.ini
#注意下面7-21
[program:kube-scheduler-0-21]
command=/opt/kubernetes/server/bin/kube-scheduler.sh                     ; the program (relative uses PATH, can take args)
numprocs=1                                                               ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                                     ; directory to cwd to before exec (def no cwd)
autostart=true                                                           ; start at supervisord start (default: true)
autorestart=true                                                         ; retstart at unexpected quit (default: true)
startsecs=22                                                             ; number of secs prog must stay running (def. 1)
startretries=3                                                           ; max # of serial start failures (default 3)
exitcodes=0,2                                                            ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                          ; signal used to kill process (default TERM)
stopwaitsecs=10                                                          ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                                ; setuid to this UNIX account to run the program
redirect_stderr=false                                                    ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                             ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                                 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                              ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                              ; emit events on stdout writes (default false)
stderr_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stderr.log ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB                                             ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4                                                 ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB                                              ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false                                              ; emit events on stderr writes (default false)

#(架构中规划的所有server主节点都要操作)
[root@gcc-21 /opt/kubernetes/server/bin]$ supervisorctl  update
kube-scheduler-7-21: added process group
[root@gcc-21 /opt/kubernetes/server/bin]$ supervisorctl  status
etcd-server-7-21                 RUNNING   pid 11812, uptime 4:17:43
kube-apiserver-7-21             RUNNING   pid 11954, uptime 3:26:20
kube-controller-manager-7-21     RUNNING   pid 12298, uptime 0:40:07
kube-scheduler-7-21              RUNNING   pid 12393, uptime 0:01:23


#(架构中规划的所有server主节点都要操作)为了能够直接使用kubectl
[root@gcc-21 /opt/kubernetes/server/bin]$ ln -s /opt/kubernetes/server/bin/kubectl /usr/bin/kubectl
[root@gcc-21 ~]$ kubectl get  cs
NAME                 STATUS    MESSAGE              ERROR
scheduler            Healthy   ok                   
controller-manager   Healthy   ok                   
etcd-1               Healthy   {"health": "true"}   
etcd-2               Healthy   {"health": "true"}   
etcd-0               Healthy   {"health": "true"}

9. 部署Node节点Kubelet

#此次是在21,22主机上部署kubelet(架构中规划的所有node节点都要操作)

#首先创建证书  签发kubelet证书 
#200主机上面签发证书  
    [root@gcc-200 ~]$ cd /opt/certs/
[root@gcc-200 /opt/certs]$ vi kubelet-csr.json

    {
        "CN": "kubelet-node",
        "hosts": [
        "127.0.0.1",
        #上面不要改动,下面的ip是正在使用的node节点ip和预留的node节点ip
        "10.0.0.10",
        "10.0.0.21",
        "10.0.0.22",
        "10.0.0.23",
        "10.0.0.24",
        "10.0.0.25",
        "10.0.0.26",
        "10.0.0.27",
        "10.0.0.28"
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "ST": "shanghai",
                "L": "shanghai",
                "O": "od",
                "OU": "ops"
            }
        ]
    }


#生成证书及私钥 
[root@gcc-200 /opt/certs]$ cfssl gencert  -ca=ca.pem  -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json|cfssl-json -bare kubelet
2020/06/05 16:21:29 [INFO] generate received request
2020/06/05 16:21:29 [INFO] received CSR
2020/06/05 16:21:29 [INFO] generating key: rsa-2048
2020/06/05 16:21:30 [INFO] encoded CSR
2020/06/05 16:21:30 [INFO] signed certificate with serial number 623121733493774593674761612818452339217510672422
2020/06/05 16:21:30 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@gcc-200 /opt/certs]$ ll | grep kubelet
-rw-r--r-- 1 root root  455 Jun  5 16:19 kubelet-csr.json
-rw------- 1 root root 1675 Jun  5 16:21 kubelet-key.pem
-rw-r--r-- 1 root root 1119 Jun  5 16:21 kubelet.csr
-rw-r--r-- 1 root root 1472 Jun  5 16:21 kubelet.pem


#在21 和 22主机上面操作 (架构中规划的所有node节点都要操作----注意目录位置必须是kubernetes/server/bin/certs)
[root@gcc-21 ~]$ cd /opt/kubernetes/server/bin/certs/
[root@gcc-21 /opt/kubernetes/server/bin/certs]$ scp -rp gcc-200:/opt/certs/kubelet-key.pem  ./ 
[root@gcc-21 /opt/kubernetes/server/bin/certs]$ scp -rp gcc-200:/opt/certs/kubelet.pem  ./


#分发证书
#配置set-cluster(架构中规划的所有node节点中只需一台操作即可----注意目录位置必须是kubernetes/server/bin/conf)
[root@gcc0-21 ~]$ cd /opt/kubernetes/server/bin/conf
[root@gcc0-21 /opt/kubernetes/server/bin/conf]$  kubectl config set-cluster myk8s \
 --certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem \
 --embed-certs=true \
 --server=https://10.0.0.10:7443 \
 --kubeconfig=kubelet.kubeconfig

Cluster "myk8s" set.

#配置set-credentials(架构中规划的所有node节点中只需一台操作即可----注意目录位置必须是kubernetes/server/bin/conf)
[root@gcc-21 /opt/kubernetes/server/bin/conf]$  kubectl config set-credentials k8s-node  --client-certificate=/opt/kubernetes/server/bin/certs/client.pem  \
--client-key=/opt/kubernetes/server/bin/certs/client-key.pem   \
--embed-certs=true  \
--kubeconfig=kubelet.kubeconfig 


User "k8s-node" set.


#配置set-context(架构中规划的所有node节点中只需一台操作即可----注意目录位置必须是kubernetes/server/bin/conf)
[root@gcc0-21 /opt/kubernetes/server/bin/conf]$   kubectl config set-context myk8s-context  --cluster=myk8s \
--user=k8s-node \
--kubeconfig=kubelet.kubeconfig



Context "myk8s-context" created.


#配置use-context(架构中规划的所有node节点中只需一台操作即可----注意目录位置必须是kubernetes/server/bin/conf)
[root@gcc0-21 /opt/kubernetes/server/bin/conf]$  kubectl config use-context myk8s-context --kubeconfig=kubelet.kubeconfig

Switched to context "myk8s-context".

#创建资源配置文件 k8s-node.yaml  作为集群绑定(架构中规划的所有node节点中只需一台操作即可----注意目录位置必须是kubernetes/server/bin/conf)
[root@gcc-21 /opt/kubernetes/server/bin/conf]$  vim k8s-node.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: k8s-node
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: k8s-node

 #应用资源配置文件(架构中规划的所有node节点中只需一台操作即可----注意目录位置必须是kubernetes/server/bin/conf) 
 [root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl  create -f k8s-node.yaml 


clusterrolebinding.rbac.authorization.k8s.io/k8s-node created

#检查结果 
[root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl  get  clusterrolebinding  k8s-node 
NAME       AGE
k8s-node   2m8s

#查看node节点的yaml配置信息
[root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl  get  clusterrolebinding  k8s-node  -o yaml

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  creationTimestamp: "2020-06-05T08:36:40Z"
  name: k8s-node
  resourceVersion: "6210"
  selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/k8s-node
  uid: 77822258-faf5-4733-ad58-15c0efbdfb39
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: k8s-node

#(架构中规划的所有node节点中除了刚刚操作的那一台都需要操作,其实就是把刚刚那台创建好的两个文件拉过来,方便,不用重复----注意目录位置必须是kubernetes/server/bin/conf)
[root@gcc-22 /opt/kubernetes/server/bin/conf]$ scp -rp gcc21:/opt/kubernetes/server/bin/conf/kubelet.kubeconfig  ./
[root@gcc-22 /opt/kubernetes/server/bin/conf]$ scp -rp gcc-21:/opt/kubernetes/server/bin/conf/k8s-node.yaml  ./
[root@gcc-22 /opt/kubernetes/server/bin/conf]$ ll
total 16
-rw-r--r-- 1 root root 2223 Jun  5 12:34 audit.yaml
-rw-r--r-- 1 root root  258 Jun  5 16:35 k8s-node.yaml
-rw------- 1 root root 6211 Jun  5 16:33 kubelet.kubeconfig


#200(运维)主机配置 pause基础镜像 
[root@gcc-200 ~]$ docker pull kubernetes/pause
[root@gcc-200 ~]$ docker images | grep pause
kubernetes/pause                latest                     f9d5de079539        5 years ago         240kB
[root@gcc-200 ~]$ docker tag f9d5de079539  harbor.od.com/public/pause:latest
[root@gcc-200 ~]$ docker push harbor.od.com/public/pause:latest
The push refers to repository [harbor.od.com/public/pause]
5f70bf18a086: Pushed 
e16a89738269: Pushed 
latest: digest: sha256:b31bfb4d0213f254d361e0079deaaebefa4f82ba7aa76ef82e90b4935ad5b105 size: 938


#编辑kubelet的启动脚本(架构中规划的所有node节点都要操作)
[root@gcc-22 ~]$  vim /opt/kubernetes/server/bin/kubelet.sh
#!/bin/sh
./kubelet \
  --anonymous-auth=false \
  --cgroup-driver systemd \
  --cluster-dns 192.168.0.2 \
  --cluster-domain cluster.local \
  --runtime-cgroups=/systemd/system.slice \
  --kubelet-cgroups=/systemd/system.slice \
  --fail-swap-on="false" \
  --client-ca-file ./certs/ca.pem \
  --tls-cert-file ./certs/kubelet.pem \
  --tls-private-key-file ./certs/kubelet-key.pem \
  #注意下面的域名需要更改,改成相对应的运算节点主机名
  --hostname-override gcc-22.host.com \
  --image-gc-high-threshold 20 \
  --image-gc-low-threshold 10 \
  --kubeconfig ./conf/kubelet.kubeconfig \
  --log-dir /data/logs/kubernetes/kube-kubelet \
  --pod-infra-container-image harbor.od.com/public/pause:latest \
  --root-dir /data/kubelet

#检查配置,权限,创建日志目录(架构中规划的所有node节点都要操作)
[root@gcc0-22 ~]$ chmod +x /opt/kubernetes/server/bin/kubelet.sh

#创建supervisor配置(架构中规划的所有node节点都要操作)
[root@gcc0-22 ~]$ mkdir -p /data/logs/kubernetes/kube-kubelet /data/kubelet
[root@gcc0-22 ~]$  vim /etc/supervisord.d/kube-kubelet.ini
[program:kube-kubelet-0-22]
command=/opt/kubernetes/server/bin/kubelet.sh     ; the program (relative uses PATH, can take args)
numprocs=1                                        ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin              ; directory to cwd to before exec (def no cwd)
autostart=true                                    ; start at supervisord start (default: true)
autorestart=true                                ; retstart at unexpected quit (default: true)
startsecs=30                                      ; number of secs prog must stay running (def. 1)
startretries=3                                    ; max # of serial start failures (default 3)
exitcodes=0,2                                     ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                   ; signal used to kill process (default TERM)
stopwaitsecs=10                                   ; max num secs to wait b4 SIGKILL (default 10)
user=root                                         ; setuid to this UNIX account to run the program
redirect_stderr=true                              ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log   ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                      ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                          ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                       ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                       ; emit events on stdout writes (default false)

#(架构中规划的所有node节点都要操作)
[root@gcc-21 /opt/kubernetes/server/bin/conf]$ supervisorctl  update
kube-kubelet-7-21: added process group
[root@gcc-22 /opt/kubernetes/server/bin/certs]$ supervisorctl  status
etcd-server-7-22                 RUNNING   pid 11812, uptime 5:12:24
kube-apiserver-7-22              RUNNING   pid 11954, uptime 4:21:01
kube-controller-manager-7-22     RUNNING   pid 12298, uptime 1:34:48
kube-kubelet-7-22                RUNNING   pid 13149, uptime 0:00:36
kube-scheduler-7-22              RUNNING   pid 12393, uptime 0:56:04


#检查运算节点(架构中规划的所有node节点都要操作)
[root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl get node
NAME                STATUS   ROLES    AGE     VERSION
gcc-21.host.com   Ready    <none>   5m18s   v1.15.12
gcc-22.host.com   Ready    <none>   78s     v1.15.12


#对运算节点进行身份添加.只需要在一台node节点操作即可
[root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl label node gcc-21.host.com node-role.kubernetes.io/master=

[root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl label node gcc-21.host.com node-role.kubernetes.io/node=

[root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl label node gcc-22.host.com node-role.kubernetes.io/node=

[root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl label node gcc-22.host.com node-role.kubernetes.io/master=


[root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl get node
NAME                STATUS   ROLES         AGE   VERSION
#此时已变成master,node身份
gcc-21.host.com   Ready    master,node   16m   v1.15.12
gcc-22.host.com   Ready    master,node   12m   v1.15.12
Copyright © 高程程 all right reserved,powered by Gitbook修订于: 2021-05-18 21:14:48

results matching ""

    No results matching ""