kubernetes 部署后可选择一个节点的某个目录作为集群数据存储目录。
rook-nfs 项目已经过时,未来此文档可能会被替换,正在寻找替代方案,如果你有好的方案可告诉我,暂不考虑 Ceph 等类似的块存储方案 。
|
1. 导入规则
在开始前需要导入RBAC规则和初始化配置,此过程需要拉取镜像 rook/nfs:v1.7.3
,同时需要集群上每个节点部署有 nfs-common
软件包。
点击展开配置
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.5.1-0.20210420220833-f284e2e8098c
creationTimestamp: null
name: nfsservers.nfs.rook.io
spec:
group: nfs.rook.io
names:
kind: NFSServer
listKind: NFSServerList
plural: nfsservers
singular: nfsserver
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .metadata.creationTimestamp
name: AGE
type: date
- description: NFS Server instance state
jsonPath: .status.state
name: State
type: string
name: v1alpha1
schema:
openAPIV3Schema:
description: NFSServer is the Schema for the nfsservers API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: NFSServerSpec represents the spec of NFS daemon
properties:
annotations:
additionalProperties:
type: string
description: The annotations-related configuration to add/set on each Pod related object.
type: object
exports:
description: The parameters to configure the NFS export
items:
description: ExportsSpec represents the spec of NFS exports
properties:
name:
description: Name of the export
type: string
persistentVolumeClaim:
description: PVC from which the NFS daemon gets storage for sharing
properties:
claimName:
description: 'ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
type: string
readOnly:
description: Will force the ReadOnly setting in VolumeMounts. Default false.
type: boolean
required:
- claimName
type: object
server:
description: The NFS server configuration
properties:
accessMode:
description: Reading and Writing permissions on the export Valid values are "ReadOnly", "ReadWrite" and "none"
enum:
- ReadOnly
- ReadWrite
- none
type: string
allowedClients:
description: The clients allowed to access the NFS export
items:
description: AllowedClientsSpec represents the client specs for accessing the NFS export
properties:
accessMode:
description: Reading and Writing permissions for the client to access the NFS export Valid values are "ReadOnly", "ReadWrite" and "none" Gets overridden when ServerSpec.accessMode is specified
enum:
- ReadOnly
- ReadWrite
- none
type: string
clients:
description: The clients that can access the share Values can be hostname, ip address, netgroup, CIDR network address, or all
items:
type: string
type: array
name:
description: Name of the clients group
type: string
squash:
description: Squash options for clients Valid values are "none", "rootid", "root", and "all" Gets overridden when ServerSpec.squash is specified
enum:
- none
- rootid
- root
- all
type: string
type: object
type: array
squash:
description: This prevents the root users connected remotely from having root privileges Valid values are "none", "rootid", "root", and "all"
enum:
- none
- rootid
- root
- all
type: string
type: object
type: object
type: array
replicas:
description: Replicas of the NFS daemon
type: integer
type: object
status:
description: NFSServerStatus defines the observed state of NFSServer
properties:
message:
type: string
reason:
type: string
state:
type: string
type: object
type: object
served: true
storage: true
subresources:
status: { }
status:
acceptedNames:
kind: ""
plural: ""
conditions: [ ]
storedVersions: [ ]
---
apiVersion: v1
kind: Namespace
metadata:
name: rook-nfs-system # namespace:operator
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-nfs-operator
namespace: rook-nfs-system # namespace:operator
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-nfs-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: rook-nfs-operator
subjects:
- kind: ServiceAccount
name: rook-nfs-operator
namespace: rook-nfs-system # namespace:operator
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: rook-nfs-operator
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- get
- watch
- create
- apiGroups:
- ""
resources:
- services
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- nfs.rook.io
resources:
- nfsservers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- nfs.rook.io
resources:
- nfsservers/status
- nfsservers/finalizers
verbs:
- get
- patch
- update
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: rook-nfs-operator
namespace: rook-nfs-system # namespace:operator
labels:
app: rook-nfs-operator
spec:
replicas: 1
selector:
matchLabels:
app: rook-nfs-operator
template:
metadata:
labels:
app: rook-nfs-operator
spec:
serviceAccountName: rook-nfs-operator
containers:
- name: rook-nfs-operator
image: rook/nfs:v1.7.3
imagePullPolicy: IfNotPresent
args: [ "nfs", "operator" ]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
---
apiVersion: v1
kind: Namespace
metadata:
name: rook-nfs
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: rook-nfs-server
namespace: rook-nfs
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-nfs-provisioner-runner
rules:
- apiGroups: [ "" ]
resources: [ "persistentvolumes" ]
verbs: [ "get", "list", "watch", "create", "delete" ]
- apiGroups: [ "" ]
resources: [ "persistentvolumeclaims" ]
verbs: [ "get", "list", "watch", "update" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "list", "watch" ]
- apiGroups: [ "" ]
resources: [ "events" ]
verbs: [ "create", "update", "patch" ]
- apiGroups: [ "" ]
resources: [ "services", "endpoints" ]
verbs: [ "get" ]
- apiGroups: [ "policy" ]
resources: [ "podsecuritypolicies" ]
resourceNames: [ "rook-nfs-policy" ]
verbs: [ "use" ]
- apiGroups: [ "" ]
resources: [ "endpoints" ]
verbs: [ "get", "list", "watch", "create", "update", "patch" ]
- apiGroups:
- nfs.rook.io
resources:
- "*"
verbs:
- "*"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rook-nfs-provisioner-runner
subjects:
- kind: ServiceAccount
name:
rook-nfs-server
# replace with namespace where provisioner is deployed
namespace: rook-nfs
roleRef:
kind: ClusterRole
name: rook-nfs-provisioner-runner
apiGroup: rbac.authorization.k8s.io
部署完成后, 执行 kubectl get pods -n rook-nfs-system
命令查看部署结果。
2. 映射节点目录
2.1. 创建 Local PV
首先在集群的宿主机创建挂载点,这里使用的是 /share
。使用以下配置创建 PersistentVolume
, 如下所示:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-rook-nfs-local
namespace: rook-nfs
labels:
app: rook-nfs
spec:
accessModes:
- ReadWriteOnce
capacity:
storage: 2000Gi
local:
path: /share (1)
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- 'node-master' (2)
persistentVolumeReclaimPolicy: Retain
storageClassName: sc-rook-nfs-local
volumeMode: Filesystem
其中:
1 | 表明这是 Persistent Volume , path 字段指定的正是这个 PV 对应的本地磁盘的路径,即 /share |
2 | 指定此 PV 位于 node-worker-01 节点上,节点名称可通过 kubectl get nodes 查看。 |
创建完成后使用 kubectl get pv
查看创建结果。
2.2. 创建 Host StorageClass
创建 StorageClass 关联刚刚创建的PV, 创建配置如下:
1
2
3
4
5
6
7
8
9
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: sc-rook-nfs-local
labels:
app: rook-nfs
provisioner: kubernetes.io/no-provisioner (1)
reclaimPolicy: Retain
volumeBindingMode: WaitForFirstConsumer (2)
其中:
1 | provisioner : 我们使用 no-provisioner , 这是因为 Local Persistent Volume 目前尚不支持 Dynamic Provisioning,所以它没办法在用户创建 PVC 的时候,就自动创建出对应的PV。 |
2 | volumeBindingMode: WaitForFirstConsumer 指定了延迟绑定。因为 Local Persistent 需要等到调度时才可以进行绑定操作。 |
2.3. 创建 Host PersistentVolumeClaim
创建 PVC,供之后创建的 NFS Server 使用。可使用以下配置创建:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-rook-nfs-local
namespace: rook-nfs
labels:
app: rook-nfs
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2000Gi
storageClassName: sc-rook-nfs-local
volumeMode: Filesystem
volumeName: pv-rook-nfs-local # 指定关联的PV 名称
配置导入完成后使用 kubectl get pvc -n rook-nfs
查看导入结果。
3. 创建NFS Server
我们可以通过创建 nfsservers.nfs.rook.io
资源的实例来创建NFS服务器的实例。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
apiVersion: nfs.rook.io/v1alpha1
kind: NFSServer
metadata:
name: rook-nfs
namespace: rook-nfs
labels:
app: rook-nfs
spec:
replicas: 1
exports:
- name: local-share
server:
accessMode: ReadWrite
squash: none
persistentVolumeClaim:
claimName: pvc-rook-nfs-local
annotations:
rook: nfs
创建完成后,可使用 kubectl get pods -n rook-nfs
和 kubectl get nfsservers.nfs.rook.io -n rook-nfs
查看 Server 状态,如果 Pod 结果为 RUN
则表明创建成功。
4. 创建NFS Storage Class 关联
部署 Operator 和 NFS Server 实例之后, 须创建 StrorageClass
来动态配置 Volume
。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
labels:
app: rook-nfs
name: 'sc-nfs-share'
annotations:
storageclass.kubernetes.io/is-default-class: "true"
parameters:
exportName: local-share
nfsServerName: rook-nfs
nfsServerNamespace: rook-nfs
provisioner: nfs.rook.io/rook-nfs-provisioner
reclaimPolicy: Retain
volumeBindingMode: Immediate
5. 验证部署情况
5.1. 导入测试配置
导入以下配置以创建测试 Pod。
点击展开配置
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
## 测试
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-test-rook-nfs
labels:
app: rook-nfs
mode: test
spec:
storageClassName: 'sc-nfs-share'
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rook-nfs
mode: test
role: web-frontend
name: deploy-test-rook-nfs-web
spec:
replicas: 2
selector:
matchLabels:
app: rook-nfs
mode: test
role: web-frontend
template:
metadata:
labels:
app: rook-nfs
mode: test
role: web-frontend
spec:
containers:
- name: web
image: nginx
ports:
- name: web
containerPort: 80
volumeMounts:
# name must match the volume name below
- name: rook-nfs-vol
mountPath: "/usr/share/nginx/html"
volumes:
- name: rook-nfs-vol
persistentVolumeClaim:
claimName: pvc-test-rook-nfs
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: rook-nfs
mode: test
role: busybox
name: deploy-test-rook-nfs-busybox
spec:
replicas: 2
selector:
matchLabels:
app: rook-nfs
mode: test
role: busybox
template:
metadata:
labels:
app: rook-nfs
mode: test
role: busybox
spec:
containers:
- image: busybox
command:
- sh
- -c
- "while true; do date > /mnt/index.html; hostname >> /mnt/index.html; sleep $(($RANDOM % 5 + 5)); done"
imagePullPolicy: IfNotPresent
name: busybox
volumeMounts:
# name must match the volume name below
- name: rook-nfs-vol
mountPath: "/mnt"
volumes:
- name: rook-nfs-vol
persistentVolumeClaim:
claimName: pvc-test-rook-nfs
---
kind: Service
apiVersion: v1
metadata:
name: svc-test-rook-nfs
labels:
app: rook-nfs
mode: test
spec:
ports:
- port: 80
selector:
role: web-frontend
5.2. 验证测试
创建完成后,执行以下命令,有数据返回则表示成功:
1
2
3
4
5
6
# 验证创建结果
kubectl get deploy,pvc,pods -l mode=test,app=rook-nfs
# 测试结果
CLUSTER_IP=$(kubectl get services svc-test-rook-nfs -o jsonpath='{.spec.clusterIP}')
POD_NAME=$(kubectl get pod -l app=rook-nfs,role=busybox -o jsonpath='{.items[0].metadata.name}')
kubectl exec $POD_NAME -- wget -qO- http://$CLUSTER_IP
如果返回403错误,可能是因为文件系统权限问题,可通过 kubectl exec 手动查看。
|