Background: kubernets cluster: 1.18 3 nodes:
master with taint: node-role.kubernetes.io/master:Noschedule
My case:
create dir on node1: /data/oss0
; /data/oss1
;
create dir on node2: /data/oss2
; /data/oss3
;
create pv/pvc with local volume
# define local pv/pvc
apiVersion: v1
kind: PersistentVolume
metadata:
name: local-oss-0
labels:
pvname: local-oss-0
spec:
capacity:
storage: 50Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-oss
local:
path: /data/oss0
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- host_node1
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: local-oss-minio-test-0
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
storageClassName: local-oss
selector:
matchLabels:
pvname: local-oss-0
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: local-oss-1
labels:
pvname: local-oss-1
spec:
capacity:
storage: 50Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-oss
local:
path: /data/oss1
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- host_node1
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: local-oss-minio-test-1
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
storageClassName: local-oss
selector:
matchLabels:
pvname: local-oss-1
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: local-oss-2
labels:
pvname: local-oss-2
spec:
capacity:
storage: 50Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-oss
local:
path: /data/oss2
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- host_node2
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: local-oss-minio-test-2
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
storageClassName: local-oss
selector:
matchLabels:
pvname: local-oss-2
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: local-oss-3
labels:
pvname: local-oss-3
spec:
capacity:
storage: 50Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-oss
local:
path: /data/oss3
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- host_node2
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: local-oss-minio-test-3
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
storageClassName: local-oss
selector:
matchLabels:
pvname: local-oss-3
As U see, create 4 pvcs, that's name match the statefulSet dynamic pvc, pvcname-{container-name}-{ordinal}
when apply a statefulSet with 4 replicates got error: Warning FailedScheduling 82s (x7 over 8m37s) default-scheduler 0/3 nodes are available: 1 node(s) had taint {node-role.kubernetes.io/master: }, that the pod didn't tolerate, 2 node(s) had volume node affinity conflict.
Some body can help me know, why?
2 nodes can not carry 4 replicates StatefulSet? or just local pvc cannot use it as that?
I found the problem: nodeAffinity
config conflict. The hostname
in linux with a capital letter , but in Kubernetes is lowercase.