1
活動/聚會區 / Re: 2019 12月份 SA@Tainan 12/8(日) 致敬「米家」 - 我家就是米家
« 於: 2019-12-09 15:04 »
用Pi做IoT,是在學Linux...+1
這裡允許您檢視這個會員的所有文章。請注意, 您只能看見您有權限閱讀的文章。
mkdir descheduler-yaml
cd descheduler-yaml
cat > cluster_role.yaml << END
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: descheduler
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "delete"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
END
kubectl apply -f cluster_role.yaml
kubectl create sa descheduler -n kube-system
kubectl create clusterrolebinding descheduler \
-n kube-system \
--clusterrole=descheduler \
--serviceaccount=kube-system:descheduler
cat > config_map.yaml << END
apiVersion: v1
kind: ConfigMap
metadata:
name: descheduler
namespace: kube-system
data:
policy.yaml: |-
apiVersion: descheduler/v1alpha1
kind: DeschedulerPolicy
strategies:
RemoveDuplicates:
enabled: true
LowNodeUtilization:
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
cpu: 20
memory: 20
pods: 20
targetThresholds:
cpu: 50
memory: 50
pods: 50
RemovePodsViolatingInterPodAntiAffinity:
enabled: true
RemovePodsViolatingNodeAffinity:
enabled: true
params:
nodeAffinityType:
- requiredDuringSchedulingIgnoredDuringExecution
END
kubectl apply -f config_map.yaml
cat > cron_job.yaml << END
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: descheduler
namespace: kube-system
spec:
schedule: "*/30 * * * *"
jobTemplate:
metadata:
name: descheduler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: "true"
spec:
template:
spec:
serviceAccountName: descheduler
containers:
- name: descheduler
image: komljen/descheduler:v0.6.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
command:
- /bin/descheduler
- --v=4
- --max-pods-to-evict-per-node=10
- --policy-config-file=/policy-dir/policy.yaml
restartPolicy: "OnFailure"
volumes:
- name: policy-volume
configMap:
name: descheduler
END
kubectl apply -f cron_job.yaml
kubectl get cronjobs -n kube-system
確定可以看到類似如下的結果:kubectl get pods -n kube-system | grep Completed
會看到類似如下的結果:kubectl -n kube-system logs descheduler-1564671000-g69nc
如果沒觸發任何作動的話,最後一行會類似如下:kubectl drain worker03.localdomain --ignore-daemonsets --delete-local-data --grace-period=0 --force
kubectl get nodes worker03.localdomain
確認節點狀態類似如下:kubectl uncordon worker03.localdomain
kubectl get nodes
確認所有節點都處於 ready 狀態:kubectl get pods -n kube-system | grep Completed
看到類似如下的結果:kubectl -n kube-system logs descheduler-1564672200-sq5zw
將會發現有相當數量的 pods 被 evicted 了:controller
speaker
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: my-ip-space
protocol: layer2
addresses:
- 192.168.100.240-192.168.100.249
apiVersion: v1
kind: Namespace
metadata:
name: metallb-test
labels:
app: metallb
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: metallb-test
name: nginx-deployment
spec:
selector:
matchLabels:
app: nginx
replicas: 3
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx-deployment
namespace: metallb-test
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: nginx
sessionAffinity: None
type: LoadBalancer
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
nginx-deployment LoadBalancer 10.103.250.239 192.168.100.240 80:16656/TCP 2m51s app=nginx
ceph osd pool create kube 32 32 # 具體的 PG number 請根據現況調整
ceph osd pool application enable 'kube' 'rbd'
ceph auth get-or-create client.kube mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=kube'
ceph auth get-key client.admin | base64 # 輸出將對應到 k8s 的 ceph-secret-admin secret
ceph auth get-key client.kube | base64 # 輸出將對應到 k8s 的 ceph-secret-kube secret
yum install -y ceph-common
scp 192.168.100.21:/etc/ceph/ceph.client.admin.keyring /etc/ceph/
mkdir ~/kube-ceph
cd ~/kube-ceph
cat > kube-ceph-secret.yaml << END
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret-admin
type: "kubernetes.io/rbd"
data:
key: QVFEYzF0SmNMaVpkRmhBQWlKbUhNbndaR2tCdldFcThXWDhaaXc9PQ==
---
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret-kube
type: "kubernetes.io/rbd"
data:
key: QVFDSFdUaGROcC9LT2hBQUpkVG5XVUpQUOYrZGtvZ2k3S0Zwc0E9PQ==
END
cat > kube-ceph-sc.yaml << END
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: ceph-rbd
#provisionen: kubernetes.io/rbd
provisioner: ceph.com/rbd
parameters:
monitors: 192.168.100.21:6789,192.168.100.22:6789,192.168.100.23:6789
adminId: admin
adminSecretName: ceph-secret-admin
adminSecretNamespace: default
pool: kube
userId: kube
userSecretName: ceph-secret-kube
userSecretNamespace: default
imageFormat: "2"
imageFeatures: layering
END
cat > kube-ceph-pvc.yaml << END
metadata:
name: ceph-k8s-claim
spec:
accessModes:
- ReadWriteOnce
storageClassName: ceph-rbd
resources:
requests:
storage: 1Gi
END
git clone https://github.com/kubernetes-incubator/external-storage
cd external-storage/ceph/rbd/deploy/rbac/
kubectl apply -f ./
kubectl get pods
# 確定可以看到類似如下的 pod 在 Running 的狀態:cd ~/kube-ceph
kubectl apply -f ./
kubectl patch storageclass ceph-rbd -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
kubectl get StorageClass
# 確認 ceph-rbd 是唯一的 defaultkubectl get pvc
# 確認 pvc 有正確的 bound 起來rbd list -p kube
# 可以看到類似如下的結果:cat > kube-ceph-pod.yaml << END
apiVersion: v1
kind: Pod
metadata:
name: kube-ceph-pod
spec:
containers:
- name: ceph-busybox
image: busybox
command: ["sleep", "60000"]
volumeMounts:
- name: ceph-volume
mountPath: /usr/share/ceph-rbd
readOnly: false
volumes:
- name: ceph-volume
persistentVolumeClaim:
claimName: ceph-k8s-claim
END