官方文档: https://kubernetes.io/

kuboard官网: https://kuboard.cn/

traefik: https://doc.traefik.io/traefik/

Istio: https://istio.io/latest/zh/docs/setup/getting-started/

helm3: https://helm.sh/zh/docs/ https://www.bilibili.com/video/BV12D4y1Y7Z7/?p=5&vd_source=9cc13448e910ab1ffd162e81e473a56e

清理之前安装的k8s

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
# 清理集群
kubeadm reset -f
modprobe -r ipip
lsmod
rm -rf ~/.kube/
rm -rf /etc/kubernetes/
rm -rf /etc/systemd/system/kubelet.service.d
rm -rf /etc/systemd/system/kubelet.service
rm -rf /usr/bin/kube*
rm -rf /etc/cni
rm -rf /opt/cni
rm -rf /var/lib/etcd
rm -rf /var/etcd
yum clean all
yum remove kube*

# 清理防火墙规则
iptables -t nat -F
iptables -t nat -X
iptables -t nat -P PREROUTING ACCEPT
iptables -t nat -P POSTROUTING ACCEPT
iptables -t nat -P OUTPUT ACCEPT
iptables -t mangle -F
iptables -t mangle -X
iptables -t mangle -P PREROUTING ACCEPT
iptables -t mangle -P INPUT ACCEPT
iptables -t mangle -P FORWARD ACCEPT
iptables -t mangle -P OUTPUT ACCEPT
iptables -t mangle -P POSTROUTING ACCEPT
iptables -F
iptables -X
iptables -P FORWARD ACCEPT
iptables -P INPUT ACCEPT
iptables -P OUTPUT ACCEPT
iptables -t raw -F
iptables -t raw -X
iptables -t raw -P PREROUTING ACCEPT
iptables -t raw -P OUTPUT ACCEPT


卸载flannel网络步骤:
#第一步,在master节点删除flannel
kubectl delete -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

#第二步,在node节点清理flannel网络留下的文件
ifconfig cni0 down
ip link delete cni0
ifconfig flannel.1 down
ip link delete flannel.1
rm -rf /var/lib/cni/
rm -f /etc/cni/net.d/*
注:执行完上面的操作,重启kubelet

安装篇

按照以下文档安装的集群,一般都没啥问题

二进制方式安装

Kubdeadm方式安装

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
# 检查项目 
# centos7.5及以上 最好7.9
# cpu > 2 && mem > 4G
# hostname 有设置,不是localhost,拥有固定ip地址
# 关闭交换分区
swapoff -a && sed -i '/swap/s/^/#/' /etc/fstab
setenforce 0; sed -ri '/^SELINUX=/s/SELINUX=.+/SELINUX=disabled/' /etc/selinux/config
# 启用br_netfilter模块
modprobe br_netfilter
# 配置内核参数
echo -e 'net.bridge.bridge-nf-call-ip6tables = 1\nnet.bridge.bridge-nf-call-iptables = 1' > /etc/sysctl.d/k8s.conf && sysctl --system
#启用ipvs
grep -e ipvs -e nf_conntrack_ipv4 /lib/modules/$(uname -r)/modules.builtin
modprobe -- ip_vs;modprobe -- ip_vs_rr;modprobe -- ip_vs_wrr;modprobe -- ip_vs_sh;modprobe -- nf_conntrack_ipv4 && cut -f1 -d ' ' /proc/modules | grep -e ip_vs -e nf_conntrack_ipv4
yum install -y ipset ipvsadm

yum install -y containerd.io-1.3.9 docker-ce-19.03.14 docker-ce-cli-19.03.14

# 修改containerd模块启动脚本,让其先加载ipvs模块,这种方式是我测试多次后发现比较有效的
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStartPre=-/sbin/modprobe ip_vs
ExecStartPre=-/sbin/modprobe ip_vs_rr
ExecStartPre=-/sbin/modprobe ip_vs_wrr
ExecStartPre=-/sbin/modprobe ip_vs_sh
ExecStartPre=-/sbin/modprobe nf_conntrack_ipv4
ExecStart=/usr/bin/containerd

# 初始化集群
kubeadm init --control-plane-endpoint "192.168.50.116:6443" --pod-network-cidr 172.16.0.0/16 --service-cidr 10.96.0.0/16 --image-repository registry.aliyuncs.com/google_containers --upload-cert

scp -r /etc/kubernetes/pki/* 192.168.50.112:/etc/kubernetes/pki

# 这些文件,需要清除,自己生成,否则验证不通过
rm /etc/kubernetes/pki/{apiserver.crt,apiserver.key,apiserver-etcd-client.crt,apiserver-etcd-client.key,apiserver-kubelet-client.crt,apiserver-kubelet-client.key,etcd/peer.crt,etcd/peer.key,etcd/server.crt,etcd/server.key}

TRAEFIK 篇

traefik官方文档

一文搞懂 Traefik2.1 的使用

traefik2.2.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
---
## 资源定义 IngressRoute 可以在官网找到这些解释
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ingressroutes.traefik.containo.us
spec:
scope: Namespaced
group: traefik.containo.us
version: v1alpha1
names:
kind: IngressRoute
plural: ingressroutes
singular: ingressroute
---
## 资源定义 IngressRouteTCP
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ingressroutetcps.traefik.containo.us
spec:
scope: Namespaced
group: traefik.containo.us
version: v1alpha1
names:
kind: IngressRouteTCP
plural: ingressroutetcps
singular: ingressroutetcp
---
## Middleware
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: middlewares.traefik.containo.us
spec:
scope: Namespaced
group: traefik.containo.us
version: v1alpha1
names:
kind: Middleware
plural: middlewares
singular: middleware
---
## TLSOption
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: tlsoptions.traefik.containo.us
spec:
scope: Namespaced
group: traefik.containo.us
version: v1alpha1
names:
kind: TLSOption
plural: tlsoptions
singular: tlsoption
---
## TraefikService
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: traefikservices.traefik.containo.us
spec:
scope: Namespaced
group: traefik.containo.us
version: v1alpha1
names:
kind: TraefikService
plural: traefikservices
singular: traefikservice
---
## TLSStore
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: tlsstores.traefik.containo.us
spec:
group: traefik.containo.us
version: v1alpha1
names:
kind: TLSStore
plural: tlsstores
singular: tlsstore
scope: Namespaced
---
## IngressRouteUDP
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ingressrouteudps.traefik.containo.us
spec:
group: traefik.containo.us
version: v1alpha1
names:
kind: IngressRouteUDP
plural: ingressrouteudps
singular: ingressrouteudp
scope: Namespaced


---
## ServiceAccount
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: kube-system
name: traefik-ingress-controller
---
## ClusterRole
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups: [""]
resources: ["services","endpoints","secrets"]
verbs: ["get","list","watch"]
- apiGroups: ["extensions"]
resources: ["ingresses"]
verbs: ["get","list","watch"]
- apiGroups: ["extensions"]
resources: ["ingresses/status"]
verbs: ["update"]
- apiGroups: ["traefik.containo.us"]
resources: ["middlewares","ingressroutes","ingressroutetcps","tlsoptions","ingressrouteudps","traefikservices","tlsstores"]
verbs: ["get","list","watch"]
---
## ClusterRoleBinding
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: traefik-config
namespace: kube-system
data:
traefik.yaml: |-
ping: "" ## 启用 Ping
serversTransport:
insecureSkipVerify: true ## Traefik 忽略验证代理服务的 TLS 证书
api:
insecure: true ## 允许 HTTP 方式访问 API
dashboard: true ## 启用 Dashboard
debug: false ## 启用 Debug 调试模式
metrics:
prometheus: "" ## 配置 Prometheus 监控指标数据,并使用默认配置
entryPoints:
web:
address: ":80" ## 配置 80 端口,并设置入口名称为 web
websecure:
address: ":443" ## 配置 443 端口,并设置入口名称为 websecure
scene_seed:
#address: "192.168.50.116:8201/udp" # 固定监听8201端口在192.168.50.112这台node上,因业务原因需要
address: ":8201/udp" # seed-game go游戏项目用到,这里先添加了
gate_seed:
address: ":8101/tcp" # address: [host]:port[/tcp|/udp] 以这种写法

providers:
kubernetesCRD: "" ## 启用 Kubernetes CRD 方式来配置路由规则
kubernetesIngress: "" ## 启动 Kubernetes Ingress 方式来配置路由规则
log:
filePath: "" ## 设置调试日志文件存储路径,如果为空则输出到控制台
level: error ## 设置调试日志级别
format: json ## 设置调试日志格式
accessLog:
filePath: "" ## 设置访问日志文件存储路径,如果为空则输出到控制台
format: json ## 设置访问调试日志格式
bufferingSize: 0 ## 设置访问日志缓存行数
filters:
#statusCodes: ["200"] ## 设置只保留指定状态码范围内的访问日志
retryAttempts: true ## 设置代理访问重试失败时,保留访问日志
minDuration: 20 ## 设置保留请求时间超过指定持续时间的访问日志
fields: ## 设置访问日志中的字段是否保留(keep 保留、drop 不保留)
defaultMode: keep ## 设置默认保留访问日志字段
names: ## 针对访问日志特别字段特别配置保留模式
ClientUsername: drop
headers: ## 设置 Header 中字段是否保留
defaultMode: keep ## 设置默认保留 Header 中字段
names: ## 针对 Header 中特别字段特别配置保留模式
User-Agent: redact
Authorization: drop
Content-Type: keep
#tracing: ## 链路追踪配置,支持 zipkin、datadog、jaeger、instana、haystack 等
# serviceName: ## 设置服务名称(在链路追踪端收集后显示的服务名)
# zipkin: ## zipkin配置
# sameSpan: true ## 是否启用 Zipkin SameSpan RPC 类型追踪方式
# id128Bit: true ## 是否启用 Zipkin 128bit 的跟踪 ID
# sampleRate: 0.1 ## 设置链路日志采样率(可以配置0.0到1.0之间的值)
# httpEndpoint: http://localhost:9411/api/v2/spans ## 配置 Zipkin Server 端点
---
apiVersion: v1
kind: Service
metadata:
name: traefik
namespace: kube-system
spec:
ports:
- name: web
port: 80
- name: websecure
port: 443
- name: admin
port: 8080
selector:
app: traefik
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
app: traefik
spec:
selector:
matchLabels:
app: traefik
template:
metadata:
name: traefik
labels:
app: traefik
spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 1
containers:
- image: traefik:v2.2.8
name: traefik-ingress-lb
ports:
- name: web
containerPort: 80
hostPort: 80 ## 将容器端口绑定所在服务器的 80 端口
- name: websecure
containerPort: 443
hostPort: 443 ## 将容器端口绑定所在服务器的 443 端口
- name: admin
containerPort: 8080 ## Traefik Dashboard 端口
- containerPort: 8101 #之前自定义的两个tcp与udp端口都要加进来,不然无法通过traefix访问
hostPort: 8101
name: gate-seed
protocol: TCP
- containerPort: 8201
hostPort: 8201
name: scene-seed # 注意 只能用小写字母和数字和-
protocol: UDP

resources:
limits:
cpu: 2000m
memory: 1024Mi
requests:
cpu: 1000m
memory: 1024Mi
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --configfile=/config/traefik.yaml
volumeMounts:
- mountPath: "/config"
name: "config"
readinessProbe:
httpGet:
path: /ping
port: 8080
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
livenessProbe:
httpGet:
path: /ping
port: 8080
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
volumes:
- name: config
configMap:
name: traefik-config
# tolerations: ## 设置容忍所有污点,防止节点被设置污点
# - operator: "Exists"
# nodeSelector: ## 设置node筛选器,在特定label的节点上启动
# IngressProxy: "true"
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: traefik-dashboard-ingress
namespace: kube-system
spec:
entryPoints:
- web
routes:
- match: Host(`traefik.seed.com`) && PathPrefix(`/`)
kind: Rule
services:
- name: traefik
port: 8080

排查故障可能用到的命令

1
2
3
4
5
6
kubectl get ingressRoute/traefik-dashboard-ingress -o wide -n kube-system
kubectl describe ingressRoute/traefik-dashboard-ingress -n kube-system
kubectl get service/traefik -o yaml -n kube-systeml
kubectl get daemonset.apps/traefik-ingress-controller -o yaml -n kube-system # 查看daemonset yaml文件
kubectl logs --tail=10 pod/traefik-ingress-controller-gcgzs -n kube-system # 查看pod日志
kubectl exec -it pod/traefik-ingress-controller-gcgzs /bin/sh -n kube-system # 进入容器内部

如果有图形化界面的话,可用直接进入界面,点点点就可以了。更方便些,那么会搭建一套吧。

kuboard 图形化界面

国人开发的K8S学习网站哦,各种教程,图形化界面也是他写的。给大佬递茶,膜拜!

参考文档:https://kuboard.cn/

如果需要使用https访问,那么需要创建证书

1
2
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=kuboard.seed.com" # 相关域名申请证书,或者可以直接用let's encryt 生成
kubectl create secret tls kuboard-tls --cert=tls.crt --key=tls.key -n kube-system #kuboard-tls 需要与ingress里的 tls 对应

kuboard.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
apiVersion: apps/v1
kind: Deployment
metadata:
name: kuboard
namespace: kube-system
annotations:
k8s.kuboard.cn/displayName: kuboard
k8s.kuboard.cn/ingress: "true"
k8s.kuboard.cn/workload: kuboard
labels:
k8s.kuboard.cn/layer: monitor
k8s.kuboard.cn/name: kuboard
spec:
replicas: 2
selector:
matchLabels:
k8s.kuboard.cn/layer: monitor
k8s.kuboard.cn/name: kuboard
template:
metadata:
labels:
k8s.kuboard.cn/layer: monitor
k8s.kuboard.cn/name: kuboard
spec:
containers:
- name: kuboard
#image: harbor.od.com/public/kuboard:v2.0.0
image:
imagePullPolicy: Always
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
operator: Exists

---
apiVersion: v1
kind: Service
metadata:
name: kuboard
namespace: kube-system
spec:
ports:
- name: http
port: 80
targetPort: 80
selector:
k8s.kuboard.cn/layer: monitor
k8s.kuboard.cn/name: kuboard

---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kuboard-user
namespace: kube-system

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kuboard-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kuboard-user
namespace: kube-system

---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kuboard-viewer
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kuboard-viewer
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: view
subjects:
- kind: ServiceAccount
name: kuboard-viewer
namespace: kube-system

---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: kuboard
namespace: kube-system
spec:
entryPoints:
- websecure
- web
routes:
- match: Host(`kuboard.seed.com`) && PathPrefix(`/`)
kind: Rule
services:
- name: kuboard
port: 80
tls:
secretName: kuboard-tls

相关命令

1
2
3
4
5
# 官方一键获取token
echo $(kubectl -n kube-system get secret $(kubectl -n kube-system get secret | grep kuboard-user | awk '{print $1}') -o go-template='{{.data.token}}' | base64 -d)

# 里面的是获取kuboard-user-token名称
kubectl describe secret $(kubectl get secrets -n kube-system |awk '/kuboard-user/{print $1}') -n kube-system|awk '/token:/{print $2}'

Storage篇

nfs篇

Kubernetes 中支持动态申请PV和PVC的功能,但是nfs存储本身不支持这个功能,但是nfs存储又是非常常用的一种共享存储。NFS-Client Provisioner 使得nfs server具备对外提供动态PV的能力

官方主页: https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner。生成的目录为:`${namespace}-${pvcName}-${pvName}`

nfs 使用之前需要先搭建nfs server

1
2
3
4
5
6
7
8
$ yum install -y nfs-utils
$ echo '/data/nfs *(rw,sync,no_wdelay,no_root_squash)' > /etc/exports
$ systemctl start nfs
$ systemctl enable nfs
$ showmount -e
Export list for jenkins:
/data/nfs *
$ mount -t nfs 192.168.50.168:/data/nfs /home/data-nfs # 测试连接

nfs-install.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
---
# kubectl create namespace storage 先创建namespace storage
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: storage
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: storage
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: storage
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: storage
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: storage
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io


---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: storage
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.mirrors.ustc.edu.cn/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
# 指定nfs信息
- name: NFS_SERVER
value: 192.168.50.168
- name: NFS_PATH
value: /data/nfs
volumes:
- name: nfs-client-root
nfs:
# 指定nfs信息
server: 192.168.50.168
path: /data/nfs

nfs-storageclass.yaml

创建一个 StoageClass,声明 NFS 动态卷提供者名称为 managed-nfs-storage

nfs-storageclass.yaml

1
2
3
4
5
6
7
8
 ---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"

创建 PVC 和 Pod 进行测试

test-nfs.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-pvc
spec:
storageClassName: managed-nfs-storage # 需要与上面创建的storageclass的名称一致
accessModes:
- ReadWriteOnce # 单节点读写
resources:
requests:
storage: 1Mi

---
kind: Pod
apiVersion: v1
metadata:
name: test-pod
spec:
containers:
- name: test-pod
image: busybox:latest
command:
- "/bin/sh"
args:
- "-c"
- "touch /mnt/SUCCESS && exit 0 || exit 1" #创建一个名称为"SUCCESS"的文件
volumeMounts:
- name: nfs-pvc
mountPath: "/mnt"
restartPolicy: "Never"
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: test-pvc

进入NFS Server服务器验证是否创建对应文件

1
2
3
4
5
6
7
$ cd /nfs/data
$ ls
kube-public-test-pvc-pvc-3dc54156-b81d-11e9-a8b8-000c29d98697

$ cd /kube-public-test-pvc-pvc-3dc54156-b81d-11e9-a8b8-000c29d98697
$ ls
SUCCESS

实战!Go项目投入K8S

一般centos环境 启动命令

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[program:game-seed_test_o]
command = /app/bin/SeedGame_d
process_name = %(program_name)s_%(process_num)d
numprocs = 1
directory = /app/bin/
autostart = false
startsecs = 3
startretries = 3
autorestart = unexpected
exitcodes = 0
redirect_stderr = false
stdout_logfile_maxbytes = 100MB
stderr_logfile = syslog
stderr_logfile_maxbytes = 100MB
environment = WEB_BOSS_RT_ENV=seed_test_o,WEB_BOSS_GAME_ID=Goblin,WEB_BOSS_SERV_ID=10001,K8S_HOST_IP=192.168.50.115,WEB_BOSS_PF_ID=1

seed.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
kind: Service
apiVersion: v1
metadata:
name: seed
namespace: seed-game
labels:
app.kubernetes.io/instance: seed
app.kubernetes.io/name: seed
spec:
type: ClusterIP
ports:
- name: http # debug的一些日志输出 dev环境开放
port: 80
targetPort: 8090
protocol: TCP
- name: gateport # 网关端口 客户端连接服务端端口
port: 8101
targetPort: 8101
protocol: TCP
- name: clientport # 客户端连接场景 进入游戏副本使用端口 KCP协议
port: 8201
targetPort: 8201
protocol: UDP
selector:
app.kubernetes.io/instance: seed
app.kubernetes.io/name: seed

---
apiVersion: apps/v1
kind: Deployment
metadata:
name: seed-game-deploy
namespace: seed-game
labels:
app.kubernetes.io/instance: seed
app.kubernetes.io/name: seed
app.kubernetes.io/version: v1.0
spec:
replicas: 2
strategy:
rollingUpdate:
maxSurge: 50%
maxUnavailable: 50%
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/instance: seed
app.kubernetes.io/name: seed
template:
metadata:
labels:
app.kubernetes.io/instance: seed
app.kubernetes.io/name: seed
spec:
containers:
- env:
#- name: TZ
# value: Asia/Shanghai
- name: WEB_BOSS_RT_ENV
value: "seed_test_k8s"
- name: WEB_BOSS_GAME_ID
value: "Goblin"
- name: WEB_BOSS_SERV_ID
value: "1001"
- name: K8S_HOST_IP
value: "192.168.50.112"
- name: WEB_BOSS_PF_ID
value: "1"

image: centos:7 # busybox 镜像会报 ./SeedGame not found
imagePullPolicy: IfNotPresent
name: seed-game
workingDir: /app/bin
command:
- /app/bin/SeedGame_d

# readinessProbe:
# exec:
# command: ["grep","-q","SUCCESSFUL","/app/log/Goblin/panic/*.log"]

# 这些端口都需要在traefik.yaml 里修改,增加支持 不然就得使用nodeport暴露
ports:
- containerPort: 8101
name: gateport
protocol: TCP
- containerPort: 8090
name: http
protocol: TCP
- containerPort: 8201
name: clientport
protocol: UDP
volumeMounts:
- mountPath: "/app"
name: nfs-pvc
- mountPath: "/etc/localtime"
name: timezone
volumes:
- name: nfs-pvc
persistentVolumeClaim:
claimName: seed-claim
- name: timezone
hostPath:
path: /etc/localtime