k8s-day04
[TOC]
1. K8S核心资源管理方法
三种方法:
陈述式管理方法---主要依赖于命令行CLI工具进行管理
声明式管理方法---主要依赖于统一资源配置清单进行管理
GUI式管理方法---主要依赖于图形界面进行管理
陈述式管理方法---主要依赖于命令行CLI工具进行管理
#查看集群状态
[root@gcc-22 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-1 Healthy {"health": "true"}
etcd-2 Healthy {"health": "true"}
etcd-0 Healthy {"health": "true"}
#查看node节点集群信息
[root@gcc-22 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
gcc-21.host.com Ready master,node 3d22h v1.15.12
gcc-22.host.com Ready master,node 3d22h v1.15.12
#查看集群名称空间
[root@gcc-22 ~]# kubectl get ns ==== namespace
NAME STATUS AGE
default Active 4d2h
kube-node-lease Active 4d2h
kube-public Active 4d2h
kube-system Active 4d2h
#查看具体某个名称空间中资源
[root@gcc-22 ~]# kubectl get all -n default
NAME READY STATUS RESTARTS AGE
pod/nginx-ds-9ss9d 1/1 Running 0 21h
pod/nginx-ds-knztg 1/1 Running 0 21h
#创建名称空间
[root@gcc-22 ~]# kubectl create ns app
namespace/app created
[root@gcc-22 ~]# kubectl get ns
NAME STATUS AGE
app Active 7s
default Active 4d2h
kube-node-lease Active 4d2h
kube-public Active 4d2h
kube-system Active 4d2h
#删除名称空间
[root@gcc-22 ~]# kubectl delete ns app
namespace "app" deleted
#创建一个deploymen资源
[root@gcc-22 ~]# kubectl create deployment nginx-da --image=harbor.od.com/public/nginx:v1.18.0-curl -n kube-public
deployment.apps/nginx-da created
#简单查看
[root@gcc-22 ~]# kubectl get deployment -n kube-public
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-da 1/1 1 1 2m27s
#详细查看
[root@gcc-22 ~]# kubectl describe deployment nginx-da -n kube-public
#查看pod资源
[root@gcc-22 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-ds-9ss9d 1/1 Running 0 22h
nginx-ds-knztg 1/1 Running 0 22h
[root@gcc-22 ~]# kubectl get pods -n kube-public
NAME READY STATUS RESTARTS AGE
nginx-da-75b8b84549-rtl8s 1/1 Running 0 6m16s
#详细的查看pod信息
[root@gcc-22 ~]# kubectl get pod nginx-da-75b8b84549-rtl8s -o wide -n kube-public
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-da-75b8b84549-rtl8s 1/1 Running 0 8m2s 172.7.21.3 gcc-21.host.com <none> <none>
[root@gcc-22 ~]# kubectl get pod nginx-da-75b8b84549-rtl8s -o yaml -n kube-public
#进入pod
[root@gcc-22 ~]# kubectl exec -ti nginx-da-75b8b84549-rtl8s /bin/bash -n kube-public
root@nginx-da-75b8b84549-rtl8s:/#
#删除,重启
[root@gcc-22 ~]# kubectl delete pod nginx-da-75b8b84549-rtl8s -n kube-public
pod "nginx-da-75b8b84549-rtl8s" deleted
[root@gcc-22 ~]# kubectl get pods -n kube-public
NAME READY STATUS RESTARTS AGE
nginx-da-75b8b84549-f85wq 1/1 Running 0 31s
#强制
[root@gcc-22 ~]# kubectl delete pod nginx-da-75b8b84549-f85wq -n kube-public --force --grace-period=0
#删除deployment资源
[root@gcc-22 ~]# kubectl delete deployment nginx-da -n kube-public
deployment.extensions "nginx-da" deleted
#创建service资源
kubectl create deployment nginx-da --image=harbor.od.com/public/nginx:v1.18.0-curl -n kube-public
root@gcc-22 ~]# kubectl expose deployment nginx-da --port=80 -n kube-public
service/nginx-da exposed
#查看资源信息
[root@gcc-22 ~]# kubectl describe svc nginx-da -n kube-public
Name: nginx-da
Namespace: kube-public
Labels: app=nginx-da
Annotations: <none>
Selector: app=nginx-da
Type: ClusterIP
IP: 192.168.35.220
Port: <unset> 80/TCP
TargetPort: 80/TCP
Endpoints: 172.7.22.3:80
Session Affinity: None
Events: <none>
#添加容器节点
[root@gcc-22 ~]# kubectl scale deploy nginx-da --replicas=2 -n kube-public
deployment.extensions/nginx-da scaled
陈述式资源管理方法小结:
通过apiserver的接口进行管理的
kubectl是官方的CLI命令工具
kubectl --help
http://docs.kubernetes.org.cn/683.html
2. 声明式资源管理方法
主要依赖于 资源配置清单 yaml json 文件
#查看资源配置清单的方法
[root@gcc-22 ~]# kubectl get pod nginx-ds-9ss9d -o yaml -n default
[root@gcc-22 ~]# kubectl get svc nginx-da -o yaml -n kube-public == service
#查看详细说明
[root@gcc-22 ~]# kubectl explain service.metadata
#创建一个资源配置清单
[root@gcc-21 ~]# cat nginx-ds-svc.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: nginx-ds
name: nginx-ds
namespace: default
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: nginx-ds
sessionAffinity: None
type: ClusterIP
[root@gcc-22 ~]# kubectl create -f nginx-ds-svc.yaml
service/nginx-ds created
#在线修改资源配置清单
[root@gcc-22 ~]# kubectl edit -f nginx-ds-svc.yaml
service/nginx-ds edited
#离线修改
[root@gcc-22 ~]# kubectl apply -f nginx-ds-svc.yaml
service/nginx-ds created
#删除资源配置清单
[root@gcc-22 ~]# kubectl delete -f nginx-ds-svc.yaml
service "nginx-ds" deleted
[root@gcc-22 ~]# kubectl delete svc nginx-ds
service "nginx-ds" deleted
学习资源配置清单:
看官方 能读懂
自己尝试简单修改
遇到不懂 看帮助
3. 安装部署CNI插件
网络插件 功能: 实现Pod资源能够跨宿主机进行通信
常见网络插件:
Flannel
Calico
Canal
Contiv
OpenContrail
NSX-T
kube-route
https://github.com/coreos/flannel/tags
#21 22 主机上面操作
[root@gcc-21 ~]# wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
[root@gcc-21 ~]# mkdir /opt/flannel-v0.11.0 && tar xf flannel-v0.11.0-linux-amd64.tar.gz -C /opt/flannel-v0.11.0 && ln -s /opt/flannel-v0.11.0/ /opt/flannel
[root@gcc-21 ~]# cd /opt/flannel && mkdir certs && cd certs
[root@gcc-21 /opt/flannel/certs]# scp -rp gcc-200:/opt/certs/ca.pem ./
[root@gcc-21 /opt/flannel/certs]# scp -rp gcc-200:/opt/certs/client.pem ./
[root@gcc-21 /opt/flannel/certs]# scp -rp gcc-200:/opt/certs/client-key.pem ./
#定义一个变量文件
#定义变量文件
[root@gcc-21 /opt/flannel]# cd ../ && vi subnet.env
FLANNEL_NETWORK=172.0.0.0/16
FLANNEL_SUBNET=172.0.21.1/24
FLANNEL_MTU=1500
FLANNEL_IPMASQ=false
#创建启动脚本
[root@gcc-21 /opt/flannel]# vi flanneld.sh
#!/bin/sh
./flanneld \
--public-ip=10.0.0.21 \
--etcd-endpoints=https://10.0.0.12:2379,https://10.0.0.21:2379,https://10.0.0.22:2379 \
--etcd-keyfile=./certs/client-key.pem \
--etcd-certfile=./certs/client.pem \
--etcd-cafile=./certs/ca.pem \
--iface=eth0 \
--subnet-file=./subnet.env \
--healthz-port=2401
[root@gcc-21 /opt/flannel]# chmod +x flanneld.sh
[root@gcc-21 /opt/flannel]# mkdir -p /data/logs/flanneld/
#往etcd中增加host-gw网络
[root@gcc-21 /opt/flannel]# cd /opt/etcd
[root@gcc-21 /opt/etcd]# ./etcdctl member list
988139385f78284: name=etcd-server-7-22 peerURLs=https://10.4.7.22:2380 clientURLs=http://127.0.0.1:2379,https://10.4.7.22:2379 isLeader=false
5a0ef2a004fc4349: name=etcd-server-7-21 peerURLs=https://10.4.7.21:2380 clientURLs=http://127.0.0.1:2379,https://10.4.7.21:2379 isLeader=false
f4a0cb0a765574a8: name=etcd-server-7-12 peerURLs=https://10.4.7.12:2380 clientURLs=http://127.0.0.1:2379,https://10.4.7.12:2379 isLeader=true
[root@gcc-21 /opt/etcd]# ./etcdctl set /coreos.com/network/config '{"Network": "172.0.0.0/16", "Backend": {"Type": "host-gw"}}' (172.0.0.0中的第二位是本机网络的第三位)
{"Network": "172.0.0.0/16", "Backend": {"Type": "host-gw"}}
[root@gcc-21 /opt/etcd]# ./etcdctl get /coreos.com/network/config
{"Network": "172.0.0.0/16", "Backend": {"Type": "host-gw"}}
#创建supervisor配置
[root@czlc0、7-21 /opt/etcd]# vi /etc/supervisord.d/flannel.ini
[program:flanneld-0-21]
command=/opt/flannel/flanneld.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/flannel ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/flanneld/flanneld.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
#启动检查
[root@gcc-21 /opt/etcd]# supervisorctl update
flanneld-7-21: added process group
[root@gcc-21 /opt/etcd]# supervisorctl status
etcd-server-7-21 RUNNING pid 3883, uptime 2 days, 7:12:12
flanneld-7-21 RUNNING pid 573, uptime 0:00:33
kube-apiserver-7-21 RUNNING pid 3884, uptime 2 days, 7:12:12
kube-controller-manager-7-21 RUNNING pid 3880, uptime 2 days, 7:12:12
kube-kubelet-7-21 RUNNING pid 16162, uptime 1 day, 17:18:07
kube-proxy-7-21 RUNNING pid 14063, uptime 1 day, 0:31:04
kube-scheduler-7-21 RUNNING pid 3882, uptime 2 days, 7:12:12
[root@gcc-21 /opt/flannel]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-ds-9ss9d 1/1 Running 0 23h 172.7.22.2 gcc-22.host.com <none> <none>
nginx-ds-knztg 1/1 Running 0 23h 172.7.21.2 gcc-21.host.com <none> <none>
[root@gcc-21 /opt/flannel]# ping 172.7.22.2
PING 172.7.22.2 (172.7.22.2) 56(84) bytes of data.
64 bytes from 172.7.22.2: icmp_seq=1 ttl=63 time=9.31 ms
64 bytes from 172.7.22.2: icmp_seq=2 ttl=63 time=1.93 ms
64 bytes from 172.7.22.2: icmp_seq=3 ttl=63 time=1.18 ms
^C
--- 172.7.22.2 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2102ms
rtt min/avg/max/mdev = 1.185/4.143/9.314/3.669 ms
[root@gcc-22 /opt/flannel]# ping 172.7.21.2
PING 172.7.21.2 (172.7.21.2) 56(84) bytes of data.
64 bytes from 172.7.21.2: icmp_seq=1 ttl=63 time=2.65 ms
64 bytes from 172.7.21.2: icmp_seq=2 ttl=63 time=0.968 ms
^C
--- 172.7.21.2 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1014ms
rtt min/avg/max/mdev = 0.968/1.809/2.651/0.842 ms
[root@gcc-21 /opt/flannel]# kubectl exec -ti nginx-ds-knztg /bin/bash
root@nginx-ds-knztg:/#
root@nginx-ds-knztg:/#
root@nginx-ds-knztg:/# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
6: eth0@if7: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:07:15:02 brd ff:ff:ff:ff:ff:ff
inet 172.7.21.2/24 brd 172.7.21.255 scope global eth0
valid_lft forever preferred_lft forever
root@nginx-ds-knztg:/# ping 172.7.22.2
PING 172.7.22.2 (172.7.22.2): 56 data bytes
64 bytes from 172.7.22.2: icmp_seq=0 ttl=62 time=6.213 ms
64 bytes from 172.7.22.2: icmp_seq=1 ttl=62 time=1.350 ms
^C--- 172.7.22.2 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max/stddev = 1.350/3.782/6.213/2.432 ms
root@nginx-ds-knztg:/#
[root@gcc-21 /opt/flannel]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 10.4.7.1 0.0.0.0 UG 0 0 0 eth0
10.4.7.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
169.254.0.0 0.0.0.0 255.255.0.0 U 1002 0 0 eth0
172.7.21.0 0.0.0.0 255.255.255.0 U 0 0 0 docker0
172.7.22.0 10.4.7.22 255.255.255.0 UG 0 0 0 eth0
内核转发是否开启
echo 'net.ipv4.ip_forward = 1 ' > /etc/sysctl.conf
sysctl -p
Flannel工作原理
VxLAN网络模型 生成一块单独网卡 通过自己指定的网络隧道进行通信
Type: VxLAN
直接路由模型 host-gw和V型LAN的组合
#SNAT规则优化
[root@gcc-21 /opt/flannel]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-ds-9ss9d 1/1 Running 0 24h 172.7.22.2 gcc-22.host.com <none> <none>
nginx-ds-knztg 1/1 Running 0 24h 172.7.21.2 gcc-21.host.com <none> <none>
[root@gcc-21 /opt/flannel]# kubectl exec -ti nginx-ds-knztg /bin/bash
root@nginx-ds-knztg:/# curl 172.7.22.2
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
[root@gcc-22 /opt/flannel]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-ds-9ss9d 1/1 Running 0 24h 172.7.22.2 gcc-22.host.com <none> <none>
nginx-ds-knztg 1/1 Running 0 24h 172.7.21.2 gcc-21.host.com <none> <none>
[root@gcc-22 /opt/flannel]# kubectl logs -f nginx-ds-9ss9d
10.4.7.21 - - [09/Jun/2020:09:50:46 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.64.0" "-"
[root@gcc-21 /opt/flannel]# yum install -y iptables-services
systemctl start iptables.service
#下面两条可不操作,不然后面会有问题
[root@gcc-22 ~]# iptables -t nat -D POSTROUTING -s 172.7.22.0/24 ! -o docker0 -j MASQUERADE
[root@gcc-22 ~]# iptables -t nat -I POSTROUTING -s 172.7.22.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE
[root@gcc-22 ~]# iptables-save |grep REJECT
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A FORWARD -j REJECT --reject-with icmp-host-prohibited
[root@gcc-22 ~]# iptables -t filter -D INPUT -j REJECT --reject-with icmp-host-prohibited
[root@gcc-22 ~]# iptables -t filter -D FORWARD -j REJECT --reject-with icmp-host-prohibited
#最终实现结果
[root@gcc-22 ~]# kubectl logs -f nginx-ds-9ss9d
10.4.7.21 - - [09/Jun/2020:09:50:46 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.64.0" "-"
172.7.21.2 - - [09/Jun/2020:10:01:58 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.64.0" "-"
4. K8S的服务发现
服务发现就是服务应用之间相互定位的过程
集群网络 Cluster IP
Service资源
Coredns软件 实现了 集群网络 和 Service资源 的自动关联
什么时候使用服务发现:
服务应用动态性强
服务应用更新发布频繁
服务应用支持自动伸缩
K8S服务发现实现的方式 DNS
#部署Coredns
#配置内网的资源配置清单http服务
[root@gcc-200 /opt/certs]# vi /etc/nginx/conf.d/k8s-yaml.od.com.conf
server {
listen 80;
server_name k8s-yaml.od.com;
location / {
autoindex on;
default_type text/plain;
root /data/k8s-yaml;
}
}
[root@gcc-200 /opt/certs]#
[root@gcc-200 /opt/certs]# nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
[root@gcc-200 /opt/certs]# systemctl reload nginx
[root@gcc-200 /opt/certs]# mkdir -p /data/k8s-yaml
[root@gcc-200 /opt/certs]# mkdir -p /data/k8s-yaml/coredns
#配置dns解析
[root@gcc-11 ~]# cat /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2019060403 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.4.7.11
harbor A 10.4.7.200
k8s-yaml A 10.4.7.200
[root@gcc-11 ~]# systemctl restart named
#准备coredns镜像
[root@gcc-200 ~]# docker pull coredns/coredns:1.6.1
[root@gcc-200 ~]# docker images | grep coredns
coredns/coredns 1.6.1 c0f6e815079e 10 months ago 42.2MB
[root@gcc-200 ~]# docker tag c0f6e815079e harbor.od.com/public/coredns:v1.6.1
[root@gcc-200 ~]# docker push harbor.od.com/public/coredns:v1.6.1
The push refers to repository [harbor.od.com/public/coredns]
da1ec456edc8: Pushed
225df95e717c: Pushed
v1.6.1: digest: sha256:c7bf0ce4123212c87db74050d4cbab77d8f7e0b49c041e894a35ef15827cf938 size: 739
[root@gcc-200 ~]#
#准备资源配置清单
https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/coredns/coredns.yaml.base
[root@gcc-200 /data/k8s-yaml/coredns]# vi cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
log
health
ready
kubernetes cluster.local 192.168.0.0/16
forward . 10.0.0.11
cache 30
loop
reload
loadbalance
}
[root@gcc-200 /data/k8s-yaml/coredns]# vi dp.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: coredns
template:
metadata:
labels:
k8s-app: coredns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
containers:
- name: coredns
image: harbor.od.com/public/coredns:v1.6.1
args:
- -conf
- /etc/coredns/Corefile
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
[root@gcc-200 /data/k8s-yaml/coredns]# vi rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
[root@gcc-200 /data/k8s-yaml/coredns]# vi svc.yaml
apiVersion: v1
kind: Service
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: coredns
clusterIP: 192.168.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
- name: metrics
port: 9153
protocol: TCP
[root@gcc-200 /data/k8s-yaml/coredns]# ll
total 16
-rw-r--r-- 1 root root 319 Jun 9 18:36 cm.yaml
-rw-r--r-- 1 root root 1294 Jun 9 18:37 dp.yaml
-rw-r--r-- 1 root root 954 Jun 9 18:39 rbac.yaml
-rw-r--r-- 1 root root 387 Jun 9 18:39 svc.yaml
#应用资源配置清单
# 21 或者 22
[root@gcc-21 ~]# kubectl create -f http://k8s-yaml.od.com/coredns/rbac.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
[root@gcc-21 ~]# kubectl create -f http://k8s-yaml.od.com/coredns/cm.yaml
configmap/coredns created
[root@gcc-21 ~]# kubectl create -f http://k8s-yaml.od.com/coredns/dp.yaml
deployment.apps/coredns created
[root@gcc-21 ~]# kubectl create -f http://k8s-yaml.od.com/coredns/svc.yaml
service/coredns created
[root@gcc-21 ~]#
#检查
[root@gcc-21 ~]# kubectl get all -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/coredns-6b6c4f9648-7t5tm 1/1 Running 0 51s 172.7.22.4 gcc-22.host.com <none> <none>
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/coredns ClusterIP 192.168.0.2 <none> 53/UDP,53/TCP,9153/TCP 41s k8s-app=coredns
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
deployment.apps/coredns 1/1 1 1 51s coredns harbor.od.com/public/coredns:v1.6.1 k8s-app=coredns
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
replicaset.apps/coredns-6b6c4f9648 1 1 1 51s coredns harbor.od.com/public/coredns:v1.6.1 k8s-app=coredns,pod-template-hash=6b6c4f9648
[root@gcc-21 ~]# kubectl create deployment nginx-da --image=harbor.od.com/public/nginx:v1.18.0-curl -n kube-public
root@gcc-22 ~]# kubectl expose deployment nginx-da --port=80 -n kube-public
[root@gcc-21 ~]# kubectl get pods -n kube-public
NAME READY STATUS RESTARTS AGE
nginx-da-75b8b84549-6qg7c 1/1 Running 0 3h4m
[root@gcc-21 ~]# dig -t A nginx-da.kube-public.svc.cluster.local. @192.168.0.2 +short
192.168.35.220
[root@gcc-21 ~]# kubectl exec -ti nginx-da-75b8b84549-6qg7c /bin/bash -n kube-public
root@nginx-da-75b8b84549-6qg7c:/#
root@nginx-da-75b8b84549-6qg7c:/#
root@nginx-da-75b8b84549-6qg7c:/#
root@nginx-da-75b8b84549-6qg7c:/# cat /etc/resolv.conf
nameserver 192.168.0.2
search kube-public.svc.cluster.local svc.cluster.local cluster.local host.com
options ndots:5
root@nginx-da-75b8b84549-6qg7c:/# ping nginx-da
PING nginx-da.kube-public.svc.cluster.local (192.168.35.220): 56 data bytes
64 bytes from 192.168.35.220: icmp_seq=0 ttl=64 time=0.238 ms
64 bytes from 192.168.35.220: icmp_seq=1 ttl=64 time=0.160 ms
64 bytes from 192.168.35.220: icmp_seq=2 ttl=64 time=0.111 ms
^C--- nginx-da.kube-public.svc.cluster.local ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max/stddev = 0.111/0.170/0.238/0.052 ms
root@nginx-da-75b8b84549-6qg7c:/# exit
[root@gcc-21 ~]#