K8S-day02
2. 安装部署私有仓库Harbor
https://github.com/goharbor/harbor
rz harbor-offline-installer-v1.8.3.tgz
[root@gcc-200 ~]$ wget https://storage.googleapis.com/harbor-releases/release-1.8.0/harbor-offline-installer-v1.8.3.tgz
[root@gcc-200 ~]$ ll
total 566432
-rw-------. 1 root root 1258 Jun 19 2019 anaconda-ks.cfg
-rw-r--r-- 1 root root 580021898 Sep 18 2019 harbor-offline-installer-v1.8.3.tgz
[root@gcc-200 ~]$ tar xf harbor-offline-installer-v1.8.3.tgz -C /opt
[root@gcc-200 ~]$ mv /opt/harbor /opt/harbor-v1.8.3
[root@gcc-200 ~]$ ln -s /opt/harbor-v1.8.3 /opt/harbor
[root@gcc-200 /opt/harbor]$ vim harbor.yml
hostname: harbor.od.com
port: 180
harbor_admin_password: 12345
data_volume: /data/harbor
location: /data/harbor/logs
[root@gcc-200 /opt/harbor]$ mkdir -p /data/harbor/logs
[root@gcc-200 /opt/harbor]$ yum install -y docker-compose
[root@gcc-200 /opt/harbor]$ ./install.sh
[root@gcc-200 /opt/harbor]$ docker-compose ps
Name Command State Ports
--------------------------------------------------------------------------------------
harbor-core /harbor/start.sh Up
harbor-db /entrypoint.sh postgres Up 5432/tcp
harbor-jobservice /harbor/start.sh Up
.............................
[root@gcc-200 /opt/harbor]$ docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
391b78368217 goharbor/nginx-photon:v1.8.3 "nginx -g 'daemon of…" 2 minutes ago Up About a minute (healthy) 0.0.0.0:180->80/tcp nginx
203bfb8f592d goharbor/harbor-portal:v1.8.3 "nginx -g 'daemon of…" 2 minutes ago Up 2 minutes (healthy) 80/tcp harbor-portal
f590fd43bc77 goharbor/harbor-jobservice:v1.8.3 "/harbor/start.sh" 2 ...............................
[root@gcc-200 /opt/harbor]$ yum install -y nginx
[root@gcc-200 /opt/harbor]$ cat /etc/nginx/conf.d/harbor.od.com.conf
server {
listen 80;
server_name harbor.od.com;
client_max_body_size 1000m;
location / {
proxy_pass http://127.0.0.1:180;
}
}
[root@gcc-200 /opt/harbor]$ nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
[root@gcc-200 /opt/harbor]$ systemctl restart nginx
[root@gcc-200 /opt/harbor]$ systemctl enable nginx
[root@gcc-11 ~]$ cat /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2019060402 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.0.0.11
harbor A 10.0.0.200
harbor.od.com
admin 12345
3. 从私有仓库上传和下载镜像
[root@gcc-200 /opt/harbor]$ docker pull nginx:1.18.0
[root@gcc-200 ~]$ docker tag 741d47c34fe0 harbor.od.com/public/nginx:v1.18.0
[root@gcc-200 ~]$ docker push harbor.od.com/public/nginx:v1.18.0
The push refers to repository [harbor.od.com/public/nginx]
3c445cf708a5: Preparing
3e1e3bb78a57: Preparing
91776dace4ca: Preparing
ffc9b21953f4: Preparing
denied: requested access to the resource is denied
[root@gcc-200 ~]$ docker login harbor.od.com
Username: admin
Password:
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/
Login Succeeded
[root@gcc-200 ~]$ docker push harbor.od.com/public/nginx:v1.18.0
The push refers to repository [harbor.od.com/public/nginx]
3c445cf708a5: Pushed
3e1e3bb78a57: Pushed
91776dace4ca: Pushed
ffc9b21953f4: Pushed
v1.18.0: digest: sha256:637488545a21a1ff771549ef65f5e3c1a8dbd92c98d360ac489d76b857021a55 size: 1155
[root@gcc-200 ~]$ docker pull 335317638/nginx:1.18.0-curl
1.18.0-curl: Pulling from qls123/nginx
afb6ec6fdc1c: Already exists
2e231683bfde: Already exists
511e2efefada: Already exists
e8fd0ec105c9: Already exists
ca64879d5edd: Pull complete
Digest: sha256:3ce416551486669167f4e9e02e9c297f209c94c4c3c1cc952ee0cc89f8a12f80
Status: Downloaded newer image for qls123/nginx:1.18.0-curl
docker.io/qls123/nginx:1.18.0-curl
[root@gcc-200 ~]$ docker images | grep curl
qls123/nginx 1.18.0-curl eb440e397100 2 days ago 153MB
[root@gcc-200 ~]$ docker tag eb440e397100 harbor.od.com/public/nginx:v1.18.0-curl
[root@gcc-200 ~]$ docker push harbor.od.com/public/nginx:v1.18.0-curl
The push refers to repository [harbor.od.com/public/nginx]
17cf1ea49bfe: Pushed
3c445cf708a5: Layer already exists
3e1e3bb78a57: Layer already exists
91776dace4ca: Layer already exists
ffc9b21953f4: Layer already exists
v1.18.0-curl: digest: sha256:3ce416551486669167f4e9e02e9c297f209c94c4c3c1cc952ee0cc89f8a12f80 size: 1367
[root@gcc-21 ~]$ docker pull harbor.od.com/public/nginx:v1.18.0
v1.18.0: Pulling from public/nginx
afb6ec6fdc1c: Pull complete
2e231683bfde: Pull complete
511e2efefada: Pull complete
e8fd0ec105c9: Pull complete
Digest: sha256:637488545a21a1ff771549ef65f5e3c1a8dbd92c98d360ac489d76b857021a55
Status: Downloaded newer image for harbor.od.com/public/nginx:v1.18.0
harbor.od.com/public/nginx:v1.18.0
[root@gcc-21 ~]$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
harbor.od.com/public/nginx v1.18.0 741d47c34fe0 2 weeks ago 132MB
4. 安装部署主节点Etcd
gcc-12 lead
gcc-21 follow
gcc-22 follow
[root@gcc-200 /opt/certs]$ vi ca-config.json
{
"signing": {
"default": {
"expiry": "175200h"
},
"profiles": {
"server": {
"expiry": "175200h",
"usages": [
"signing",
"key encipherment",
"server auth"
]
},
"client": {
"expiry": "175200h",
"usages": [
"signing",
"key encipherment",
"client auth"
]
},
"peer": {
"expiry": "175200h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
[root@gcc-200 /opt/certs]$ cat etcd-peer-csr.json
{
"CN": "k8s-etcd",
"hosts": [
"10.0.0.11",
"10.0.0.12",
"10.0.0.21",
"10.0.0.22"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "shanghai",
"L": "shanghai",
"O": "od",
"OU": "ops"
}
]
}
[root@gcc-200 /opt/certs]$ cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer etcd-peer-csr.json | cfssl-json -bare etcd-peer
2020/06/05 10:42:09 [INFO] generate received request
2020/06/05 10:42:09 [INFO] received CSR
2020/06/05 10:42:09 [INFO] generating key: rsa-2048
2020/06/05 10:42:09 [INFO] encoded CSR
2020/06/05 10:42:09 [INFO] signed certificate with serial number 40664855453404084274231429057985768039050703509
2020/06/05 10:42:09 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@gcc-200 /opt/certs]$ ll
total 36
-rw-r--r-- 1 root root 837 Jun 5 10:36 ca-config.json
-rw-r--r-- 1 root root 330 Jun 4 20:23 ca-csr.json
-rw------- 1 root root 1679 Jun 4 20:24 ca-key.pem
-rw-r--r-- 1 root root 997 Jun 4 20:24 ca.csr
-rw-r--r-- 1 root root 1350 Jun 4 20:24 ca.pem
-rw-r--r-- 1 root root 365 Jun 5 10:38 etcd-peer-csr.json
-rw------- 1 root root 1679 Jun 5 10:42 etcd-peer-key.pem
-rw-r--r-- 1 root root 1066 Jun 5 10:42 etcd-peer.csr
-rw-r--r-- 1 root root 1432 Jun 5 10:42 etcd-peer.pem
[root@gcc-12 ~]$ useradd -s /sbin/nologin -M etcd
[root@gcc-12 ~]$ id etcd
uid=1000(etcd) gid=1000(etcd) groups=1000(etcd)
https://github.com/etcd-io/etcd/
[root@gcc-12 ~]$ wget https://github.com/etcd-io/etcd/releases/download/v3.1.20/etcd-v3.1.20-linux-amd64.tar.gz
[root@gcc-12 ~]$ tar xf etcd-v3.1.20-linux-amd64.tar.gz -C /opt/
[root@gcc-12 ~]$ ll /opt/
total 0
drwxr-xr-x 3 478493 89939 123 Oct 11 2018 etcd-v3.1.20-linux-amd64
[root@gcc-12 ~]$ ln -s /opt/etcd-v3.1.20-linux-amd64/ /opt/etcd
[root@gcc-12 ~]$ ll /opt/
total 0
lrwxrwxrwx 1 root root 30 Jun 5 10:49 etcd -> /opt/etcd-v3.1.20-linux-amd64/
drwxr-xr-x 3 478493 89939 123 Oct 11 2018 etcd-v3.1.20-linux-amd64
[root@gcc-12 ~]$ cd /opt/etcd
[root@gcc-12 /opt/etcd]$ mkdir certs
[root@gcc-12 /opt/etcd]$ cd certs
[root@gcc-12 /opt/etcd/certs]$ scp -rp gcc-200:/opt/certs/etcd-peer-key.pem ./
[root@gcc-12 /opt/etcd/certs]$ scp -rp gcc-200:/opt/certs/etcd-peer.pem ./
[root@gcc-12 /opt/etcd/certs]$ scp -rp gcc-200:/opt/certs/ca.pem ./
[root@gcc-12 /opt/etcd/certs]$ ll
total 12
-rw-r--r-- 1 root root 1350 Jun 4 20:24 ca.pem
-rw------- 1 root root 1679 Jun 5 10:42 etcd-peer-key.pem
-rw-r--r-- 1 root root 1432 Jun 5 10:42 etcd-peer.pem
[root@gcc-12 /opt/etcd/certs]$ mkdir -p /data/etcd /data/logs/etcd-server && chown -R etcd.etcd /opt/etcd/certs/ /data/etcd /data/logs/etcd-server
[root@gcc-12 /opt/etcd/certs]$ cat /opt/etcd/etcd-server-startup.sh
#!/bin/sh
./etcd --name etcd-server-7-12 \
--data-dir /data/etcd/etcd-server \
--listen-peer-urls https://10.0.0.12:2380 \
--listen-client-urls https://10.0.0.12:2379,http://127.0.0.1:2379 \
--quota-backend-bytes 8000000000 \
--initial-advertise-peer-urls https://10.0.0.12:2380 \
--advertise-client-urls https://10.0.0.12:2379,http://127.0.0.1:2379 \
--initial-cluster etcd-server-7-12=https://10.0.0.12:2380,etcd-server-7-21=https://10.0.0.21:2380,etcd-server-7-22=https://10.0.0.22:2380 \
--ca-file ./certs/ca.pem \
--cert-file ./certs/etcd-peer.pem \
--key-file ./certs/etcd-peer-key.pem \
--client-cert-auth \
--trusted-ca-file ./certs/ca.pem \
--peer-ca-file ./certs/ca.pem \
--peer-cert-file ./certs/etcd-peer.pem \
--peer-key-file ./certs/etcd-peer-key.pem \
--peer-client-cert-auth \
--peer-trusted-ca-file ./certs/ca.pem \
--log-output stdout
[root@gcc-12 /opt/etcd]$ chmod +x etcd-server-startup.sh
[root@gcc-12 /opt/etcd]$ yum install -y supervisor -y
[root@gcc-12 /opt/etcd]$ systemctl start supervisord.service && systemctl enable supervisord.service
[root@gcc-12 /opt/etcd/certs]$ vim /etc/supervisord.d/etcd-server.ini
[program:etcd-server-0-12]
command=/opt/etcd/etcd-server-startup.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/etcd ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=etcd ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/etcd-server/etcd.stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max
stdout_logfile_backups=4 ;
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
[root@gcc-12 /opt/etcd]$ supervisorctl update
etcd-server-7-12: added process group
[root@gcc-12 /opt/etcd]$ supervisorctl status
etcd-server-7-12 RUNNING pid 9724, uptime 0:00:31
[root@gcc-12 /opt/etcd]$ ./etcdctl cluster-health
member 988139385f78284 is healthy: got healthy result from http://127.0.0.1:2379
member 5a0ef2a004fc4349 is healthy: got healthy result from http://127.0.0.1:2379
member f4a0cb0a765574a8 is healthy: got healthy result from http://127.0.0.1:2379
cluster is healthy
[root@gcc-12 /opt/etcd]$ ./etcdctl member list
988139385f78284: name=etcd-server-7-22 peerURLs=https://10.0.0.22:2380 clientURLs=http://127.0.0.1:2379,https://10.0.0.22:2379 isLeader=false
5a0ef2a004fc4349: name=etcd-server-7-21 peerURLs=https://10.0.0.21:2380 clientURLs=http://127.0.0.1:2379,https://10.0.0.21:2379 isLeader=false
f4a0cb0a765574a8: name=etcd-server-7-12 peerURLs=https://10.0.0.12:2380 clientURLs=http://127.0.0.1:2379,https://10.0.0.12:2379 isLeader=true
[root@gcc-12 /opt/etcd]$ netstat -lntp
tcp 0 0 10.0.0.12:2379 0.0.0.0:* LISTEN 9725/./etcd
tcp 0 0 127.0.0.1:2379 0.0.0.0:* LISTEN 9725/./etcd
tcp 0 0 10.0.0.12:2380 0.0.0.0:* LISTEN 9725/./etcd
5. 安装部署主节点Apiserver
wget https://dl.k8s.io/v1.15.12/kubernetes-server-linux-amd64.tar.gz
[root@gcc-22 /opt/etcd/certs]$ wget https://dl.k8s.io/v1.15.12/kubernetes-server-linux-amd64.tar.gz
[root@gcc-21 ~]$ tar xf kubernetes-server-linux-amd64.tar.gz -C /opt && mv /opt/kubernetes /opt/kubernetes-v1.15.12 && ln -s /opt/kubernetes-v1.15.12 /opt/kubernetes
[root@gcc-21 ~]$ cd /opt/kubernetes && rm -f kubernetes-src.tar.gz && cd server/bin/ && rm -f *_tag && rm -f *.tar
[root@gcc-200 /opt/certs]$ vi client-csr.json
{
"CN": "k8s-node",
"hosts": [
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "shanghai",
"L": "shangahi",
"O": "od",
"OU": "ops"
}
]
}
[root@gcc-200 /opt/certs]$ cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client-csr.json | cfssl-json -bare client
2020/06/05 12:08:47 [INFO] generate received request
2020/06/05 12:08:47 [INFO] received CSR
2020/06/05 12:08:47 [INFO] generating key: rsa-2048
2020/06/05 12:08:49 [INFO] encoded CSR
2020/06/05 12:08:49 [INFO] signed certificate with serial number 708081291377090734211766588824251935698712311788
2020/06/05 12:08:49 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@gcc-200 /opt/certs]$ ll | grep client
-rw-r--r-- 1 root root 282 Jun 5 12:06 client-csr.json
-rw------- 1 root root 1679 Jun 5 12:08 client-key.pem
-rw-r--r-- 1 root root 997 Jun 5 12:08 client.csr
-rw-r--r-- 1 root root 1371 Jun 5 12:08 client.pem
[root@gcc-200 /opt/certs]$ vim apiserver-csr.json
{
"CN": "k8s-apiserver",
"hosts": [
"127.0.0.1",
"192.168.0.1",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local",
"10.0.0.10",
"10.0.0.21",
"10.0.0.22",
"10.0.0.23"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "shanghai",
"L": "shangahi",
"O": "od",
"OU": "ops"
}
]
}
[root@gcc-200 /opt/certs]$ cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server apiserver-csr.json| cfssl-json -bare apiserver
2020/06/05 12:16:11 [INFO] generate received request
2020/06/05 12:16:11 [INFO] received CSR
2020/06/05 12:16:11 [INFO] generating key: rsa-2048
2020/06/05 12:16:12 [INFO] encoded CSR
2020/06/05 12:16:12 [INFO] signed certificate with serial number 277481596778287505893241360711745472937951359932
2020/06/05 12:16:12 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@gcc-200 /opt/certs]$ ll | grep apiserver
-rw-r--r-- 1 root root 568 Jun 5 12:12 apiserver-csr.json
-rw------- 1 root root 1679 Jun 5 12:16 apiserver-key.pem
-rw-r--r-- 1 root root 1253 Jun 5 12:16 apiserver.csr
-rw-r--r-- 1 root root 1602 Jun 5 12:16 apiserver.pem
[root@gcc-21 /opt/kubernetes/server/bin]$ mkdir -p certs conf
[root@gcc-21 /opt/kubernetes/server/bin]$ cd certs/
[root@gcc-21 /opt/kubernetes/server/bin/certs]$ scp -rp gcc-200:/opt/certs/apiserver-key.pem ./
[root@gcc-21 /opt/kubernetes/server/bin/certs]$ scp -rp gcc-200:/opt/certs/apiserver.pem ./
[root@gcc-21 /opt/kubernetes/server/bin/certs]$ scp -rp gcc-200:/opt/certs/ca.pem ./
[root@gcc-21 /opt/kubernetes/server/bin/certs]$ scp -rp gcc-200:/opt/certs/ca-key.pem ./
[root@gcc-21 /opt/kubernetes/server/bin/certs]$ scp -rp gcc-200:/opt/certs/client-key.pem ./
[root@gcc-21 /opt/kubernetes/server/bin/certs]$ scp -rp gcc-200:/opt/certs/client.pem ./
之后目录内文件如下
-rw------- 1 root root 1679 Jun 5 12:16 apiserver-key.pem
-rw-r--r-- 1 root root 1602 Jun 5 12:16 apiserver.pem
-rw------- 1 root root 1679 Jun 4 20:24 ca-key.pem
-rw-r--r-- 1 root root 1350 Jun 4 20:24 ca.pem
-rw------- 1 root root 1679 Jun 5 12:08 client-key.pem
-rw-r--r-- 1 root root 1371 Jun 5 12:08 client.pem
[root@gcc1-21 /opt/kubernetes/server/bin/conf]$ vi audit.yaml
apiVersion: audit.k8s.io/v1beta1
kind: Policy
omitStages:
- "RequestReceived"
rules:
- level: RequestResponse
resources:
- group: ""
resources: ["pods"]
- level: Metadata
resources:
- group: ""
resources: ["pods/log", "pods/status"]
- level: None
resources:
- group: ""
resources: ["configmaps"]
resourceNames: ["controller-leader"]
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: ""
resources: ["endpoints", "services"]
- level: None
userGroups: ["system:authenticated"]
nonResourceURLs:
- "/api*"
- "/version"
- level: Request
resources:
- group: ""
resources: ["configmaps"]
namespaces: ["kube-system"]
- level: Metadata
resources:
- group: ""
resources: ["secrets", "configmaps"]
- level: Request
resources:
- group: ""
- group: "extensions"
- level: Metadata
omitStages:
- "RequestReceived"
cd ../
[root@gcc-21 /opt/kubernetes/server/bin]$ vim kube-apiserver.sh
#!/bin/bash
./kube-apiserver \
--apiserver-count 2 \
--audit-log-path /data/logs/kubernetes/kube-apiserver/audit-log \
--audit-policy-file ./conf/audit.yaml \
--authorization-mode RBAC \
--client-ca-file ./certs/ca.pem \
--requestheader-client-ca-file ./certs/ca.pem \
--enable-admission-plugins NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \
--etcd-cafile ./certs/ca.pem \
--etcd-certfile ./certs/client.pem \
--etcd-keyfile ./certs/client-key.pem \
--etcd-servers https://10.0.0.12:2379,https://10.0.0.21:2379,https://10.0.0.22:2379 \
--service-account-key-file ./certs/ca-key.pem \
--service-cluster-ip-range 192.168.0.0/16 \
--service-node-port-range 3000-29999 \
--target-ram-mb=1024 \
--kubelet-client-certificate ./certs/client.pem \
--kubelet-client-key ./certs/client-key.pem \
--log-dir /data/logs/kubernetes/kube-apiserver \
--tls-cert-file ./certs/apiserver.pem \
--tls-private-key-file ./certs/apiserver-key.pem \
--v 2
[root@gcc-21 /opt/kubernetes/server/bin]$ chmod +x kube-apiserver.sh
[root@gcc-21 /opt/kubernetes/server/bin]$ mkdir -p /data/logs/kubernetes/kube-apiserver
[root@gcc-21 /opt/kubernetes/server/bin]$ cat /etc/supervisord.d/kube-apiserver.ini
[program:kube-apiserver-0-21]
command=/opt/kubernetes/server/bin/kube-apiserver.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max
stdout_logfile_backups=4 ;
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
[root@gcc-21 /opt/kubernetes/server/bin]$ supervisorctl update
kube-apiserver-7-21: added process group
[root@gcc-21 /opt/kubernetes/server/bin]$ supervisorctl status
etcd-server-7-21 RUNNING pid 12144, uptime 0:53:24
kube-apiserver-7-21 STARTING
[root@gcc-21 /opt/kubernetes/server/bin]$ supervisorctl status
etcd-server-7-21 RUNNING pid 12144, uptime 0:53:26
kube-apiserver-7-21 RUNNING pid 12340, uptime 0:00:30
6. 安装部署4层反向代理及高可用
[root@gcc-11 ~]$ yum install -y nginx
[root@gcc-11 ~]$ vim /etc/nginx/nginx.conf
stream {
upstream kube-apiserver {
server 10.0.0.21:6443 max_fails=3 fail_timeout=30s;
server 10.0.0.22:6443 max_fails=3 fail_timeout=30s;
}
server {
listen 7443;
proxy_connect_timeout 2s;
proxy_timeout 900s;
proxy_pass kube-apiserver;
}
}
[root@gcc-11 ~]$ nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
[root@gcc-11 ~]$ systemctl start nginx
[root@gcc-11 ~]$ systemctl enable nginx
[root@gcc-11 ~]$ yum install -y keepalived
[root@gcc-11 ~]$ cat /etc/keepalived/check_port.sh
#!/bin/bash
CHK_PORT=$1
if [ -n "$CHK_PORT" ];then
PORT_PROCESS=`ss -lnt|grep $CHK_PORT|wc -l`
if [ $PORT_PROCESS -eq 0 ];then
echo "Port $CHK_PORT Is Not Used,End."
exit 1
fi
else
echo "Check Port Cant Be Empty!"
fi
[root@gcc-11 ~]$ chmod +x /etc/keepalived/check_port.sh
[root@gcc-11 ~]$ vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id 10.0.0.11
}
vrrp_script chk_nginx {
script "/etc/keepalived/check_port.sh 7443"
interval 2
weight -20
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 251
priority 100
advert_int 1
mcast_src_ip 10.0.0.11
nopreempt
authentication {
auth_type PASS
auth_pass 11111111
}
track_script {
chk_nginx
}
virtual_ipaddress {
10.0.0.10
}
}
[root@gcc0-12 ~]$ cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id 10.0.0.12
}
vrrp_script chk_nginx {
script "/etc/keepalived/check_port.sh 7443"
interval 2
weight -20
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 251
mcast_src_ip 10.0.0.12
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 11111111
}
track_script {
chk_nginx
}
virtual_ipaddress {
10.0.0.10
}
}
[root@gcc-11 ~]$ systemctl start keepalived.service
[root@gcc-11 ~]$ systemctl enable keepalived.service
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@gcc-11 ~]$ ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 00:15:5d:32:c5:01 brd ff:ff:ff:ff:ff:ff
inet 10.0.0.11/24 brd 10.0.0.255 scope global eth0
valid_lft forever preferred_lft forever
inet 10.0.0.10/32 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::215:5dff:fe32:c501/64 scope link
valid_lft forever preferred_lft forever
7. 安装部署Controller-manager
[root@gcc0-21 ~]$ vim /opt/kubernetes/server/bin/kube-controller-manager.sh
#!/bin/sh
./kube-controller-manager \
--cluster-cidr 172.7.0.0/16 \
--leader-elect true \
--log-dir /data/logs/kubernetes/kube-controller-manager \
--master http://127.0.0.1:8080 \
--service-account-private-key-file ./certs/ca-key.pem \
--service-cluster-ip-range 192.168.0.0/16 \
--root-ca-file ./certs/ca.pem \
--v 2
[root@gcc0-21 ~]$ chmod +x /opt/kubernetes/server/bin/kube-controller-manager.sh
[root@gcc0-21 ~]$ mkdir -p /data/logs/kubernetes/kube-controller-manager
[root@gcc0-21 ~]$ vim /etc/supervisord.d/kube-conntroller-manager.ini
[program:kube-controller-manager-0-21]
command=/opt/kubernetes/server/bin/kube-controller-manager.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=22 ; number of secs prog must stay running (def. 1)
startretries=3 ; max
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=false ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controll.stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max
stdout_logfile_backups=4 ;
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
stderr_logfile=/data/logs/kubernetes/kube-controller-manager/controll.stderr.log ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB ; max
stderr_logfile_backups=4 ;
stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false ; emit events on stderr writes (default false)
[root@gcc-21 /opt/kubernetes/server/bin]$ supervisorctl update
kube-controller-manager-7-21: added process group
[root@gcc-21 /opt/kubernetes/server/bin]$ supervisorctl status
etcd-server-7-21 RUNNING pid 12144, uptime 3:44:38
kube-apiserver-7-21 RUNNING pid 12340, uptime 2:51:42
kube-controller-manager-7-21 RUNNING pid 12685, uptime 0:00:45
[root@gcc-22 /opt/kubernetes/server/bin]$ supervisorctl update
kube-controller-manager-7-22: added process group
[root@gcc-22 /opt/kubernetes/server/bin]$ supervisorctl status
etcd-server-7-22 RUNNING pid 11812, uptime 3:38:10
kube-apiserver-7-22 RUNNING pid 11954, uptime 2:46:47
kube-controller-manager-7-22 RUNNING pid 12298, uptime 0:00:34
8. 安装部署Kube-scheduler
[root@gcc-21 ~]$ cat /opt/kubernetes/server/bin/kube-scheduler.sh
#!/bin/sh
./kube-scheduler \
--leader-elect \
--log-dir /data/logs/kubernetes/kube-scheduler \
--master http://127.0.0.1:8080 \
--v 2
[root@gcc0-21 ~]$ chmod +x /opt/kubernetes/server/bin/kube-scheduler.sh
[root@gcc0-21 ~]$ mkdir -p /data/logs/kubernetes/kube-scheduler
[root@gcc0-21 ~]$ vim /etc/supervisord.d/kube-scheduler.ini
[program:kube-scheduler-0-21]
command=/opt/kubernetes/server/bin/kube-scheduler.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=22 ; number of secs prog must stay running (def. 1)
startretries=3 ; max
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=false ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max
stdout_logfile_backups=4 ;
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
stderr_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stderr.log ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB ; max
stderr_logfile_backups=4 ;
stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false ; emit events on stderr writes (default false)
[root@gcc-21 /opt/kubernetes/server/bin]$ supervisorctl update
kube-scheduler-7-21: added process group
[root@gcc-21 /opt/kubernetes/server/bin]$ supervisorctl status
etcd-server-7-21 RUNNING pid 11812, uptime 4:17:43
kube-apiserver-7-21 RUNNING pid 11954, uptime 3:26:20
kube-controller-manager-7-21 RUNNING pid 12298, uptime 0:40:07
kube-scheduler-7-21 RUNNING pid 12393, uptime 0:01:23
[root@gcc-21 /opt/kubernetes/server/bin]$ ln -s /opt/kubernetes/server/bin/kubectl /usr/bin/kubectl
[root@gcc-21 ~]$ kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-1 Healthy {"health": "true"}
etcd-2 Healthy {"health": "true"}
etcd-0 Healthy {"health": "true"}
9. 部署Node节点Kubelet
[root@gcc-200 ~]$ cd /opt/certs/
[root@gcc-200 /opt/certs]$ vi kubelet-csr.json
{
"CN": "kubelet-node",
"hosts": [
"127.0.0.1",
"10.0.0.10",
"10.0.0.21",
"10.0.0.22",
"10.0.0.23",
"10.0.0.24",
"10.0.0.25",
"10.0.0.26",
"10.0.0.27",
"10.0.0.28"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "shanghai",
"L": "shanghai",
"O": "od",
"OU": "ops"
}
]
}
[root@gcc-200 /opt/certs]$ cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json|cfssl-json -bare kubelet
2020/06/05 16:21:29 [INFO] generate received request
2020/06/05 16:21:29 [INFO] received CSR
2020/06/05 16:21:29 [INFO] generating key: rsa-2048
2020/06/05 16:21:30 [INFO] encoded CSR
2020/06/05 16:21:30 [INFO] signed certificate with serial number 623121733493774593674761612818452339217510672422
2020/06/05 16:21:30 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@gcc-200 /opt/certs]$ ll | grep kubelet
-rw-r--r-- 1 root root 455 Jun 5 16:19 kubelet-csr.json
-rw------- 1 root root 1675 Jun 5 16:21 kubelet-key.pem
-rw-r--r-- 1 root root 1119 Jun 5 16:21 kubelet.csr
-rw-r--r-- 1 root root 1472 Jun 5 16:21 kubelet.pem
[root@gcc-21 ~]$ cd /opt/kubernetes/server/bin/certs/
[root@gcc-21 /opt/kubernetes/server/bin/certs]$ scp -rp gcc-200:/opt/certs/kubelet-key.pem ./
[root@gcc-21 /opt/kubernetes/server/bin/certs]$ scp -rp gcc-200:/opt/certs/kubelet.pem ./
[root@gcc0-21 ~]$ cd /opt/kubernetes/server/bin/conf
[root@gcc0-21 /opt/kubernetes/server/bin/conf]$ kubectl config set-cluster myk8s \
--certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem \
--embed-certs=true \
--server=https://10.0.0.10:7443 \
--kubeconfig=kubelet.kubeconfig
Cluster "myk8s" set.
[root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl config set-credentials k8s-node --client-certificate=/opt/kubernetes/server/bin/certs/client.pem \
--client-key=/opt/kubernetes/server/bin/certs/client-key.pem \
--embed-certs=true \
--kubeconfig=kubelet.kubeconfig
User "k8s-node" set.
[root@gcc0-21 /opt/kubernetes/server/bin/conf]$ kubectl config set-context myk8s-context --cluster=myk8s \
--user=k8s-node \
--kubeconfig=kubelet.kubeconfig
Context "myk8s-context" created.
[root@gcc0-21 /opt/kubernetes/server/bin/conf]$ kubectl config use-context myk8s-context --kubeconfig=kubelet.kubeconfig
Switched to context "myk8s-context".
[root@gcc-21 /opt/kubernetes/server/bin/conf]$ vim k8s-node.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: k8s-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: k8s-node
[root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl create -f k8s-node.yaml
clusterrolebinding.rbac.authorization.k8s.io/k8s-node created
[root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl get clusterrolebinding k8s-node
NAME AGE
k8s-node 2m8s
[root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl get clusterrolebinding k8s-node -o yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: "2020-06-05T08:36:40Z"
name: k8s-node
resourceVersion: "6210"
selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/k8s-node
uid: 77822258-faf5-4733-ad58-15c0efbdfb39
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: k8s-node
[root@gcc-22 /opt/kubernetes/server/bin/conf]$ scp -rp gcc21:/opt/kubernetes/server/bin/conf/kubelet.kubeconfig ./
[root@gcc-22 /opt/kubernetes/server/bin/conf]$ scp -rp gcc-21:/opt/kubernetes/server/bin/conf/k8s-node.yaml ./
[root@gcc-22 /opt/kubernetes/server/bin/conf]$ ll
total 16
-rw-r--r-- 1 root root 2223 Jun 5 12:34 audit.yaml
-rw-r--r-- 1 root root 258 Jun 5 16:35 k8s-node.yaml
-rw------- 1 root root 6211 Jun 5 16:33 kubelet.kubeconfig
[root@gcc-200 ~]$ docker pull kubernetes/pause
[root@gcc-200 ~]$ docker images | grep pause
kubernetes/pause latest f9d5de079539 5 years ago 240kB
[root@gcc-200 ~]$ docker tag f9d5de079539 harbor.od.com/public/pause:latest
[root@gcc-200 ~]$ docker push harbor.od.com/public/pause:latest
The push refers to repository [harbor.od.com/public/pause]
5f70bf18a086: Pushed
e16a89738269: Pushed
latest: digest: sha256:b31bfb4d0213f254d361e0079deaaebefa4f82ba7aa76ef82e90b4935ad5b105 size: 938
[root@gcc-22 ~]$ vim /opt/kubernetes/server/bin/kubelet.sh
#!/bin/sh
./kubelet \
--anonymous-auth=false \
--cgroup-driver systemd \
--cluster-dns 192.168.0.2 \
--cluster-domain cluster.local \
--runtime-cgroups=/systemd/system.slice \
--kubelet-cgroups=/systemd/system.slice \
--fail-swap-on="false" \
--client-ca-file ./certs/ca.pem \
--tls-cert-file ./certs/kubelet.pem \
--tls-private-key-file ./certs/kubelet-key.pem \
--hostname-override gcc-22.host.com \
--image-gc-high-threshold 20 \
--image-gc-low-threshold 10 \
--kubeconfig ./conf/kubelet.kubeconfig \
--log-dir /data/logs/kubernetes/kube-kubelet \
--pod-infra-container-image harbor.od.com/public/pause:latest \
--root-dir /data/kubelet
[root@gcc0-22 ~]$ chmod +x /opt/kubernetes/server/bin/kubelet.sh
[root@gcc0-22 ~]$ mkdir -p /data/logs/kubernetes/kube-kubelet /data/kubelet
[root@gcc0-22 ~]$ vim /etc/supervisord.d/kube-kubelet.ini
[program:kube-kubelet-0-22]
command=/opt/kubernetes/server/bin/kubelet.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max
stdout_logfile_backups=4 ;
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
[root@gcc-21 /opt/kubernetes/server/bin/conf]$ supervisorctl update
kube-kubelet-7-21: added process group
[root@gcc-22 /opt/kubernetes/server/bin/certs]$ supervisorctl status
etcd-server-7-22 RUNNING pid 11812, uptime 5:12:24
kube-apiserver-7-22 RUNNING pid 11954, uptime 4:21:01
kube-controller-manager-7-22 RUNNING pid 12298, uptime 1:34:48
kube-kubelet-7-22 RUNNING pid 13149, uptime 0:00:36
kube-scheduler-7-22 RUNNING pid 12393, uptime 0:56:04
[root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl get node
NAME STATUS ROLES AGE VERSION
gcc-21.host.com Ready <none> 5m18s v1.15.12
gcc-22.host.com Ready <none> 78s v1.15.12
[root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl label node gcc-21.host.com node-role.kubernetes.io/master=
[root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl label node gcc-21.host.com node-role.kubernetes.io/node=
[root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl label node gcc-22.host.com node-role.kubernetes.io/node=
[root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl label node gcc-22.host.com node-role.kubernetes.io/master=
[root@gcc-21 /opt/kubernetes/server/bin/conf]$ kubectl get node
NAME STATUS ROLES AGE VERSION
gcc-21.host.com Ready master,node 16m v1.15.12
gcc-22.host.com Ready master,node 12m v1.15.12