kubectl config use-context k8s
kubectl create clusterrole deployment-clusterrole --verb=create --resource=deployments,statefulsets,daemonsets
kubectl create serviceaccount cicd-token --namespace=app-team1
kubectl create rolebinding deployment-clusterrole --clusterrole=deployment-clusterrole --serviceaccount=default:cicd-token --namespace=app-team1
kubectl config use-context k8s
ssh k8s-node-1
sudo -i
vi /var/lib/kubelet/config.yaml
staticPodPath: /etc/kubernetes/manifests
cd /etc/kubernetes/manifests
vi webtool.yaml
apiVersion: v1
kind: Pod
metadata:
name: webtool
spec:
containers:
- image: httpd
name: webtool
systemctl restart kubelet
systemctl enable kubelet
exit
kubectl get pods
kubectl config use-context k8s
kubectl label ns my-app project=my-app
kubectl describe ns my-app
vi policy.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-port-from-namespace
namespace: echo
spec:
podSelector:
matchLabels: {}
policyTypes:
- Ingress
ingress:
- from:
- namespaceSelector:
matchLabels:
project: my-app
ports:
- protocol: TCP
port: 9000
kubectl create -f policy.yaml
kubectl config use-context k8s
kubectl get pvc
vi pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pv-volume
spec:
storageClassName: csi-hostpath-sc
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Mi
kubectl create -f pvc.yaml
kubectl get pvc
kubectl get pods
vi pod-pvc.yaml
apiVersion: v1
kind: Pod
metadata:
name: web-server
spec:
volumes:
- name: pv-storage
persistentVolumeClaim:
claimName: pv-volume
containers:
- image: nginx
name: web-server
volumeMounts:
- mountPath: "/usr/share/nginx/html"
name: pv-storage
kubectl create -f pod-pvc.yaml
kubectl get pods
kubectl edit pvc pv-volume --record
spec:
resources:
requests:
storage: 70Mi
kubectl get pvc
kubectl config use-context k8s
ETCDCTL_API=3 etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/opt/KUIN00601/ca.crt --cert=/opt/KUIN00601/etcd-client.crt --key=/opt/KUIN00601/etcd-client.key snapshot save /var/lib/backup/etcd-snapshot.db
ETCDCTL_API=3 etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/opt/KUIN00601/ca.crt --cert=/opt/KUIN00601/etcd-client.crt --key=/opt/KUIN00601/etcd-client.key snapshot status /var/lib/backup/etcd-snapshot.db
sudo systemctl stop etcd.service
sudo ETCDCTL_API=3 etcdctl snapshot restore /var/lib/backup/etcd-snapshot-previous.db
sudo systemctl restart etcd.service
kubectl config use-context k8s
kubectl get pods -o wide
kubectl delete pods "nginx-dev"
kubectl delete pods "nginx-prod"
kubectl get pods
kubectl config use-context k8s
kubectl run nginx --image=nginx --restart=Never --labels=env=test --namespace=engineering --dry-run=client -o yaml > nginx-pod.yaml
vi nginx-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx
namespace: engineering
labels: env:test
spec:
containers:
- image: nginx
name: nginx
imagePullPolicy: IfNotPresent
restartPolicy: Never
kubectl create -f nginx-pod.yaml
kubectl get pods
kubectl config use-context k8s
ssh k8s-node-1
kubectl get nodes
sudo -i
vi /var/lib/kubelet/config.yaml
staticPodPath: /etc/kubernetes/manifests
systemctl restart kubelet
systemctl enable kubelet
kubectl get nodes
exit
kubectl run nginx --image=nginx --restart=Never --port=80
kubectl config use-context k8s
kubectl get pods --show-labels | grep "name=overloaded-cpu"
kubectl top pods --sort-by=cpu
echo "overloaded-cpu-xxx" > /opt/KUTR00401/KUTR00401.txt
cat /opt/KUTR00401/KUTR00401.txt
kubectl get pods --sort-by=.metadata.creationTimestamp