apiVersion: apps/v1
kind: Deployment
metadata:
name: website
namespace: default
# labels and annotations here are only for the deployment resource type
labels:
app: website
annotations:
source_url: "git@gitlab.com:kisphp/example.git"
resource_type: deployment
spec:
# remove if HPA is used
replicas: 1
# the deployments must wait X seconds after seeing a Pod become healthy before moving on to updating the next Pod
minReadySeconds: 10
progressDeadlineSeconds: 60
# keep the last X deployments history in case of rollback
revisionHistoryLimit: 5
# This is the default deployment strategy in Kubernetes.
# It replaces the existing version of pods with a new version, updating pods slowly one by one, without cluster downtime.
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
# This strategy deploys a new version of an application alongside the existing version.
# Once the new version is deployed and verified, traffic can be switched to the new version.
# This can help to avoid downtime, but it requires more infrastructure.
#
# strategy:
# type: Recreate
selector:
matchLabels:
# label for the deployment used by the Service to connect to the deployment
app: website
template:
metadata:
name: website
# labels and annotations here are only for the pods that are created by the deployment -> replicaset -> pods
labels:
app: website
annotations:
source_url: "git@gitlab.com:kisphp/example.git"
resource_type: deployment-pod
spec:
restartPolicy: Always
# Optional
securityContext:
runAsUser: 33
runAsGroup: 33
runAsNonRoot: true
# configure entries in /etc/hosts file inside the pod.
# /etc/hosts file will be a shared volume between the containers of the pod
hostAliases:
- ip: "127.0.0.1"
hostnames:
- "foo.local"
- "bar.local"
- ip: "10.1.2.3"
hostnames:
- "foo.remote"
- "bar.remote"
# target specific nodes
nodeSelector:
type: application
# this can be added into the default service account and will be applied globally
imagePullSecrets:
- name: my-registry-secret
containers:
- name: website # container name in pod
image: nginx:latest # docker image used for this container
imagePullPolicy: Always # always get the docker image from registry
ports:
- containerPort: 80
env:
- name: APP_TYPE
value: application
- name: APP_SECRET
valueFrom:
secretKeyRef:
key: APP_SECRET
name: db-secrets
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: K8S_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: K8S_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: K8S_POD_SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
envFrom:
# load environment variables from config map
- configMapRef:
name: site-configurations
# load encoded secret from Secrets manifest
- secretRef:
name: site-secrets
# set resources
resources:
requests:
memory: "64Mi"
cpu: "10m"
limits:
memory: "256Mi"
cpu: "100m"
livenessProbe:
httpGet:
path: /healthz
port: 8080
httpHeaders:
- name: Custom-Header
value: Awesome
initialDelaySeconds: 3
periodSeconds: 3
successThreshold: 1 # always 1
# livenessProbe:
# exec:
# command:
# - cat
# - /tmp/healthy
# initialDelaySeconds: 5
# periodSeconds: 5
readinessProbe:
exec:
command:
- cat
- /tmp/healthy
initialDelaySeconds: 5
periodSeconds: 30 # default 10, >= 1
successThreshold: 1 # defualt 1, can be higher number
# TCP liveness probe
# readinessProbe:
# tcpSocket:
# port: 8080
# initialDelaySeconds: 5
# periodSeconds: 10
# livenessProbe:
# tcpSocket:
# port: 8080
# initialDelaySeconds: 15
# periodSeconds: 20
# Protect slow starting containers with startup probes
startupProbe:
httpGet:
path: /healthz
port: liveness-port
failureThreshold: 30
periodSeconds: 10
successThreshold: 1 # always 1
# this is a custom application running inside the cluster to post a slack message when a pod is created or terminated
lifecycle:
postStart:
exec:
command:
- "/bin/bash"
- "-c"
- 'curl -s -X GET --max-time 60 http://${SERVICE_NAME}.notifications.svc.cluster.local/start/${HOSTNAME}/php >&1; exit 0'
preStop:
exec:
command:
- "/bin/bash"
- "-c"
- 'curl -s -X GET --max-time 60 http://${SERVICE_NAME}.notifications.svc.cluster.local/stop/${HOSTNAME}/php >&1; exit 0'
volumeMounts:
# thumbnails volume
- mountPath: /app/public/thumbs
name: thumbnails
# file uploads volume
- mountPath: /app/uploads
name: uploads
# from configmap
- name: config
mountPath: "/config"
readOnly: true
initContainers:
- name: update-database
image: php-container
envFrom:
- configMapRef:
name: db-credentials
command:
- "bin/console"
- "setup:install"
volumeMounts:
- mountPath: /opt/test
name: test
securityContext:
privileged: true
runAsUser: 0 # root user
runAsGroup: 0 # root group
# runAsNonRoot: true
# set volumes per deployment that will be used by containers using volumeMounts
volumes:
# define thumbnails directory as empty volume every time
- name: thumbnails
emptyDir: {}
# load uploads directory from PersistentVolumeClaim
- name: uploads
persistentVolumeClaim:
claimName: website-uploads
- name: test
persistentVolumeClaim:
claimName: my-test-volume
# load from AWS EFS
- name: efs-data
nfs:
server: 1a2b3c4d.efs.eu-central-1.amazonaws.com
path: /
# load from configmap
- name: config-volume
configMap:
name: special-config
# optional
items:
- key: SPECIAL_LEVEL
path: keys
- name: config
configMap:
name: my-app-config
items:
- key: "game.properties"
path: "game.properties"
- key: "user-interface.properties"
path: "user-interface.properties"
apps/v1
for Deployment).Deployment
in this case).# create resource(s)
kubectl apply -f ./my-manifest.yaml
# create from multiple files
kubectl apply -f ./my1.yaml -f ./my2.yaml
# create resource(s) in all manifest files in dir
kubectl apply -f ./dir
# create resource(s) from url
kubectl apply -f https://git.io/vPieo
# create a running pod for quick tests
kubectl run nginx --image=nginx --restart=Never
# start a single instance of nginx as a deployment
kubectl create deployment nginx --image=nginx
# create a Job which prints "Hello World"
kubectl create job hello --image=busybox -- echo "Hello World"
# create a CronJob that prints "Hello World" every minute
kubectl create cronjob hello --image=busybox --schedule="*/1 * * * *" -- echo "Hello World"
# Rolling update "www" containers of "frontend" deployment, updating the image
kubectl set image deployment/frontend www=image:v2
# Check the history of deployments including the revision
kubectl rollout history deployment/frontend
# Rollback to the previous deployment
kubectl rollout undo deployment/frontend
# Rollback to a specific revision
kubectl rollout undo deployment/frontend --to-revision=2
# Watch rolling update status of "frontend" deployment until completion
kubectl rollout status -w deployment/frontend
# Rolling restart of the "frontend" deployment
kubectl rollout restart deployment/frontend
# Replace a pod based on the JSON passed into std
cat pod.json | kubectl replace -f -
# Force replace, delete and then re-create the resource. Will cause a service outage.
kubectl replace --force -f ./pod.json
# Create a service for a replicated nginx, which serves on port 80 and connects to the containers on port 8000
kubectl expose rc nginx --port=80 --target-port=8000
# Update a single-container pod's image version (tag) to v4
kubectl get pod mypod -o yaml | sed 's/\(image: myimage\):.*$/\1:v4/' | kubectl replace -f -
# Add a Label
kubectl label pods my-pod new-label=awesome
# Add an annotation
kubectl annotate pods my-pod icon-url=http://goo.gl/XXBTWq
kubectl autoscale deployment foo --min=2 --max=10
# Scale a replicaset named 'foo' to 3
kubectl scale --replicas=3 rs/foo
# Scale a resource specified in "foo.yaml" to 3
kubectl scale --replicas=3 -f foo.yaml
# If the deployment named mysql's current size is 2, scale mysql to 3
kubectl scale --current-replicas=2 --replicas=3 deployment/mysql
# Scale multiple replication controllers
kubectl scale --replicas=5 rc/foo rc/bar rc/baz
Assuming horizontal Pod autoscaling is enabled in your cluster, you can set up an autoscaler for your Deployment and choose the minimum and maximum number of Pods you want to run based on the CPU utilization of your existing Pods.
kubectl autoscale deployment/nginx-deployment --min=10 --max=15 --cpu-percent=80
# Delete a pod using the type and name specified in pod.json
kubectl delete -f ./pod.json
# Delete a pod with no grace period
kubectl delete pod unwanted --now
# Delete pods and services with same names "baz" and "foo"
kubectl delete pod,service baz foo
# Delete pods and services with label name=myLabel
kubectl delete pods,services -l name=myLabel
# Delete all pods and services in namespace my-ns,
kubectl -n my-ns delete pod,svc --all
# Delete all pods matching the awk pattern1 or pattern2
kubectl get pods -n mynamespace --no-headers=true | awk '/pattern1|pattern2/{print $1}' | xargs kubectl delete -n mynamespace pod
# Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in the current namespace
kubectl cp /tmp/foo_dir my-pod:/tmp/bar_dir
# Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific container
kubectl cp /tmp/foo my-pod:/tmp/bar -c my-container
# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace my-namespace
kubectl cp /tmp/foo my-namespace/my-pod:/tmp/bar
# Copy /tmp/foo from a remote pod to /tmp/bar locally
kubectl cp my-namespace/my-pod:/tmp/foo /tmp/bar
Note:kubectl cp
requires that the 'tar' binary is present in your container image. If 'tar' is not present,kubectl cp
will fail. For advanced use cases, such as symlinks, wildcard expansion or file mode preservation consider usingkubectl exec
.
# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace my-namespace
tar cf - /tmp/foo | kubectl exec -i -n my-namespace my-pod -- tar xf - -C /tmp/bar
# Copy /tmp/foo from a remote pod to /tmp/bar locally
kubectl exec -n my-namespace my-pod -- tar cf - /tmp/foo | tar xf - -C /tmp/bar
apiVersion: v1
kind: Service
metadata:
name: my-app-cluster-ip
namespace: default
annotations:
source_url: "git@gitlab.com:kisphp/example.git"
spec:
type: ClusterIP
selector:
app: my_app
ports:
- port: 80
targetPort: 80
apiVersion: v1
kind: Service
metadata:
name: my-app-node-port
namespace: default
annotations:
source_url: "git@gitlab.com:kisphp/example.git"
spec:
type: NodePort
selector:
app: my_app
ports:
- port: 80
targetPort: 80
apiVersion: v1
kind: Service
metadata:
name: my-app-load-balancer
namespace: default
annotations:
source_url: "git@gitlab.com:kisphp/example.git"
spec:
type: LoadBalancer
selector:
app: my-app
ports:
- port: 80
targetPort: 80
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: default
labels:
app: nginx
annotations:
source_url: "git@gitlab.com:kisphp/example.git"
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
apiVersion: v1
kind: Service
metadata:
name: classic-lb-k8s
namespace: demo
annotations:
source_url: "git@gitlab.com:kisphp/example.git"
service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:eu-central-1:000000000000:certificate/11aa22bb-a1s2-1q2w-1234-q1w2e3r4t5t6
service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
service.beta.kubernetes.io/aws-load-balancer-ssl-ports: https
# service.beta.kubernetes.io/aws-load-balancer-access-log-enabled: "true"
# # Specifies whether access logs are enabled for the load balancer
# service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval: "60"
# # The interval for publishing the access logs. You can specify an interval of either 5 or 60 (minutes).
# service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name: "my-bucket"
# # The name of the Amazon S3 bucket where the access logs are stored
# service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix: "my-bucket-prefix/prod"
# # The logical hierarchy you created for your Amazon S3 bucket, for example `my-bucket-prefix/prod`
labels:
app: my-awesome-application
spec:
selector:
app: my-awesome-application
type: LoadBalancer
ports:
- name: http
port: 80
targetPort: 80
protocol: TCP
- name: https
port: 443
targetPort: 80
protocol: TCP
An ExternalName service is a special type of Kubernetes service that does not have selectors or endpoints. Instead, it serves as an alias for an external service. This can be useful for accessing external services from within your Kubernetes cluster without having to modify your application code.
apiVersion: v1
kind: Service
metadata:
name: database-host
namespace: default
annotations:
source_url: "git@gitlab.com:kisphp/example.git"
spec:
type: ExternalName
externalName: "__RDS_HOST__"
ExternalName services can be useful for a variety of use cases, such as:
If you need to access an external service from within your Kubernetes cluster, I recommend using an ExternalName service. It is a simple and effective way to provide a consistent interface to your external services.
apiVersion: v1
kind: Service
metadata:
name: my-service
namespace: default
annotations:
source_url: "git@gitlab.com:kisphp/example.git"
spec:
ports:
- name: mysql
protocol: TCP
port: 3306
targetPort: 3306
externalIPs:
- 10.10.10.10
# Create a service for a replicated nginx, which serves on port 80 and connects to the containers on port 8000
kubectl expose rc nginx --port=80 --target-port=8000
# Create a service of type LoadBalancer
kubectl expose deployment hello-world --type=LoadBalancer --name=my-service
The following are some explanations of the key fields in a Kubernetes service manifest:
The following are some explanations of the key fields in the spec
section of a Kubernetes service manifest:
apiVersion: v1
kind: ConfigMap
metadata:
# config map name, will be used by deployments, cronjobs, etc
name: my-app-config
namespace: default
annotations:
source_url: "git@gitlab.com:kisphp/example.git"
#immutable: true
data:
APP_ENV: prod
MYSQL_HOST: "127.0.0.1"
MYSQL_USER: mysql_username_here
UPLOAD_DIRECTORY: /data/uploads
# file-like keys
game.properties: |
enemy.types=aliens,machines,humans
player.maximum-lives=5
user-interface.properties: |
color.good=purple
color.bad=yellow
color.textmode=true
You can then use those environment variables into your Pods using the env
field in the Pod spec or envFrom
. For example, the following Pod spec would inject the databasehost
and databaseport
environment variables into the container:
apiVersion: v1
kind: Pod
metadata:
name: my-pod
spec:
containers:
- name: my-container
image: my-image
env:
- name: DATABASE_HOST
valueFrom:
configMapKeyRef:
name: my-app-config
key: MYSQL_HOST
- name: DATABASE_USER
valueFrom:
configMapKeyRef:
name: my-app-config
key: MYSQL_USER
# or load all parameters defined in ConfigMap
envFrom:
- configMapRef:
name: my-app-config
Create a configmap interactively
kubectl create configmap my-config --from-literal=my-key=my-value
This is the most common type of Secret and can be used to store any kind of sensitive data, such as passwords, tokens, and certificates. Opaque Secrets are stored in base64-encoded form and are not encrypted at rest by default.
Here is an example of an Opaque Secret manifest:
apiVersion: v1
kind: Secret
metadata:
name: my-app-secrets
namespace: default
annotations:
source_url: "git@gitlab.com:kisphp/example.git"
type: Opaque
data:
my_db_password: a2lzcGhw # base64 encoded
redis_password: a2lzcGhw # base64 encoded
The data
field in the manifest contains the secret data, which is encoded in base64. In this example, the secret data contains a username and password.
apiVersion: v1
kind: Secret
metadata:
name: my-docker-registry-secret
type: DockerRegistry
data:
.dockerconfigjson: |
{
"auths": {
"https://registry.example.com": {
"username": "my-username",
"password": "my-password"
}
}
}
The data field in the manifest contains the Docker registry credentials, which are encoded in base64. In this example, the credentials are for the registry https://registry.example.com.
apiVersion: v1
kind: Secret
metadata:
name: secret-basic-auth
namespace: default
annotations:
source_url: "git@gitlab.com:kisphp/example.git"
type: kubernetes.io/basic-auth
stringData:
username: admin
password: t0p-Secret
The kubernetes.io/ssh-auth
Secret type is a built-in type that is specifically designed for storing SSH authentication credentials. It is recommended to use this type of Secret instead of the generic Opaque
Secret type when storing SSH keys, as it provides additional validation and convenience.
To create an SSH Auth Secret, you can use the following manifest:
apiVersion: v1
kind: Secret
metadata:
name: my-ssh-key-secret
namespace: default
type: kubernetes.io/ssh-auth
data:
ssh-privatekey: |
-----BEGIN RSA PRIVATE KEY-----
MIIEvQIBAAKCAQEA...
-----END RSA PRIVATE KEY-----
The ssh-privatekey
key in the data
field contains the SSH private key.
Once you have created the Secret, you can reference it in Pods, Deployments, and other Kubernetes objects using the env:
or volumeMounts:
fields.
Here is an example of a Pod that mounts the SSH Auth Secret:
apiVersion: v1
kind: Pod
metadata:
name: my-pod
spec:
containers:
- name: my-container
image: my-image
volumeMounts:
- name: ssh-key
mountPath: /etc/ssh/
readOnly: true
volumes:
- name: ssh-key
secret:
secretName: my-ssh-key-secret
defaultMode: 400
The Pod will mount the SSH Auth Secret to the /etc/ssh/
directory in the container. The defaultMode
of 400 ensures that the private key is not readable by other users in the container.
Once the Pod is running, you can use the SSH private key to connect to remote servers. For example, you could use the following command to connect to the server example.com
:
ssh -i /etc/ssh/id_rsa user@example.com
This type of Secret is used to store TLS certificates and private keys. TLS Secrets are stored in base64-encoded form and are not encrypted at rest by default.
Here is an example of a TLS Secret manifest:
apiVersion: v1
kind: Secret
metadata:
name: my-tls-secret
type: TLS
data:
tls.crt: |
-----BEGIN CERTIFICATE-----
MIIC4zCCAjegAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBpjELMAkGA1UEBhMCVVMx
...
-----END CERTIFICATE-----
tls.key: |
-----BEGIN PRIVATE KEY-----
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC...
-----END PRIVATE KEY-----
The data field in the manifest contains the TLS certificate and private key, which are encoded in base64. In this example, the certificate and key are for the domain example.com.
Once you have created a Secret, you can reference it in Pods, Deployments, and other Kubernetes objects using the env:
or volumeMounts:
fields.
It is important to note that Secrets are not encrypted at rest by default. This means that if an attacker is able to gain access to the Kubernetes etcd database, they will be able to read the contents of all Secrets.
To mitigate this risk, you should configure Kubernetes to encrypt Secrets at rest. This can be done by setting the encryption.provider.config.secret.kubernetes.io/aesgcm flag in the Kubernetes API server configuration.
You should also consider using a Secrets management tool, such as HashiCorp Vault, to manage your Kubernetes Secrets. Secrets management tools can provide additional security features, such as encryption at rest, audit logging, and access control.
kubectl create secret general secret-name \
--from-literal=MYSQL_PASSWORD=mypass123 \
--from-literal=APP_SECRET=qawsed1234 \
-o yaml \
--dry-run
kubectl create secret docker-registry secret-tiger-docker \
--docker-username=tiger \
--docker-password=pass113 \
--docker-email=tiger@acme.com
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Secret
metadata:
name: mysecret
namespace: mynamespace
type: Opaque
data:
password: $(echo -n "s33msi4" | base64 -w0)
username: $(echo -n "jane" | base64 -w0)
EOF
This is a simple example of an Ingress manifest. It creates an Ingress resource called my-ingress
that listens for traffic on the example.com
domain. When traffic arrives at example.com
, it is routed to the my-service
Service on port 80.
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: my-ingress
namespace: default
annotations:
kubernetes.io/ingress.class: nginx
spec:
rules:
- host: example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: my-service
port:
number: 80
This is an example of an Ingress manifest with multiple paths. It creates an Ingress resource called my-ingress
that listens for traffic on the example.com
domain. When traffic arrives at example.com
, it is routed to the my-service
Service on port 80. When traffic arrives at example.com/api
, it is routed to the my-api-service
Service on port 8080.
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: my-ingress
namespace: default
annotations:
kubernetes.io/ingress.class: nginx
spec:
rules:
- host: example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: my-service
port:
number: 80
- path: /api/
pathType: Prefix
backend:
service:
name: my-api-service
port:
number: 8080
This is an example of an Ingress manifest with TLS. It creates an Ingress resource called my-ingress
that listens for traffic on the example.com
domain and terminates TLS connections using the my-tls-secret
Secret. When traffic arrives at example.com
, it is routed to the my-service
Service on port 80.
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: my-ingress
namespace: default
annotations:
kubernetes.io/ingress.class: nginx
spec:
tls:
- hosts:
- example.com
secretName: my-tls-secret
rules:
- host: example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: my-service
port:
number: 80
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: my-ingress
namespace: default
annotations:
kubernetes.io/ingress.class: nginx
spec:
auth:
- name: my-auth
type: basic
secretName: my-auth-secret
rules:
- host: example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: my-service
port:
number: 80
Here is an example of a CronJob manifest:
apiVersion: batch/v1
kind: CronJob
metadata:
name: my-cron-job
namespace: default
labels:
app: my-cron-job
annotations:
source_url: "git@gitlab.com:kisphp/example.git"
spec:
# run every ten minutes
schedule: "*/10 * * * *"
# do not allow concurrent jobs
concurrencyPolicy: Forbid
# delete successful pods
successfulJobsHistoryLimit: 0
# keep last 2 failed pods for debug
failedJobsHistoryLimit: 2
startingDeadlineSeconds: 120
suspend: false
jobTemplate:
spec:
template:
metadata:
labels:
app: my-cron-job
annotations:
source_url: "git@gitlab.com:kisphp/example.git"
spec:
restartPolicy: Never
nodeSelector:
type: cron
containers:
# name of the container in pod
- name: app-cron
# docker image to load for this container
image: my-php-container:tag_name
# always download image from registry
imagePullPolicy: Always # IfNotPresent
# define necessary resources for this container
resources:
requests:
cpu: "1"
memory: "256Mi"
limits:
cpu: "1"
memory: "1Gi"
# load all variables defined in config map
envFrom:
- configMapRef:
name: my-app-config
env:
# set inline variable for container
- name: CONTAINER_PURPOSE
value: cron
# register a secret (password) to env vars from secrets manifests
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: my-app-secrets
# reference to variable name in secrets manifest
key: my_db_password
command:
- vendor/bin/codecept
args:
- run
- "--dry-run"
volumeMounts:
- name: project-data
mountPath: /stored_data
volumes:
# using aws EFS (you must enable the EFS Driver Addon on EKS cluster)
- name: project-data
nfs:
server: 1a2b3c4d.efs.eu-central-1.amazonaws.com
path: /
# using PVC
- name: uploads
persistentVolumeClaim:
claimName: uploaded-files
# ┌───────────── minute (0 - 59)
# │ ┌───────────── hour (0 - 23)
# │ │ ┌───────────── day of the month (1 - 31)
# │ │ │ ┌───────────── month (1 - 12)
# │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday;
# │ │ │ │ │ 7 is also Sunday on some systems)
# │ │ │ │ │
# │ │ │ │ │
# * * * * *
Entry | Description | Equivalent to |
---|---|---|
@yearly (or @annually ) |
Run once a year at midnight of 1 January | 0 0 1 1 * |
@monthly |
Run once a month at midnight of the first day of the month | 0 0 1 * * |
@weekly |
Run once a week at midnight on Sunday morning | 0 0 * * 0 |
@daily (or @midnight ) |
Run once a day at midnight | 0 0 * * * |
@hourly |
Run once an hour at the beginning of the hour | 0 * * * * |
kubectl create job --from=cronjob/<cronjob-name> <job-name>
You can also use kubectl to scale, suspend, and resume CronJobs.
Here are some examples of how to use these commands:
Create a new CronJob
kubectl create cronjob hello-world --schedule="*/15 * * * *" --image=busybox --command=["echo", "Hello, world!"]
List all CronJobs in the current namespace
kubectl get cronjobs
Display detailed information about a specific CronJob
kubectl describe cronjob hello-world
Delete a CronJob
kubectl delete cronjob hello-world
Watch for new Jobs created by CronJobs
kubectl get jobs --watch
Display the logs for a specific Pod created by a CronJob
kubectl logs hello-world-job-1234567890
Here is an example of a Job in Kubernetes:
apiVersion: batch/v1
kind: Job
metadata:
name: thumbs-cleanup
namespace: default
annotations:
source_url: "git@gitlab.com:kisphp/example.git"
spec:
ttlSecondsAfterFinished: 120 # deleted the finished pod after 2 minutes
backoffLimit: 6 # default value
parallelism: 2
completions: 3
template:
spec:
restartPolicy: Never
containers:
- name: php
image: my-custom-container
imagePullPolicy: Always
command:
- vendor/bin/console
- cleanup:thumbs
envFrom:
- configMapRef:
name: my-config
env:
- name: CONTAINER_PURPOSE
value: job
- name: MY_DB_PASSWORD
valueFrom:
secretKeyRef:
name: mysql-password
key: password
volumeMounts:
- name: my-volume
mountPath: /mnt/my-volume
volumes:
- name: my-volume
persistentVolumeClaim:
claimName: my-pvc
Here is a simple PersistentVolume manifest:
apiVersion: v1
kind: PersistentVolume
metadata:
name: uploaded-files
# PVs don't have a namespace
labels:
type: local
annotations:
source_url: "git@gitlab.com:kisphp/example.git"
spec:
storageClassName: manual
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
hostPath:
path: /mnt/data/uploaded_files
This manifest creates a PersistentVolume with the following properties:
ReadWriteMany
, can also be ReadWriteOnce
You should use PersistentVolumes if your application needs to store data that persists across restarts. For example, you might use Persistence Volumes for a database, a web application that needs to store user files, or a caching layer.
PersistentVolumes are a powerful way to provision storage for your Kubernetes applications. By understanding how PVs work, you can choose the right storage provider for your needs and ensure that your applications have the storage they need to run reliably.
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: my-app-uploads
namespace: default
annotations:
source_url: "git@gitlab.com:kisphp/example.git"
spec:
storageClassName: manual
accessModes:
- ReadWriteMany
- ReadWriteOnce
# Optional
selector:
matchLabels:
node-type: storage
resources:
requests:
storage: 10Gi
# if you define PVC for AWS EFS, just set the storage to 1Gi, no matter how big the EFS is
This PVC requests a 1Gi volume with ReadWriteOnce access mode. The Kubernetes controller will try to find a PersistentVolume (PV) that matches these criteria and bind it to the PVC. If no matching PV is found, the controller will provision a new PV based on the storage class specified in the PVC (or the default storage class if none is specified).
This HorizontalPodAutoscaler (HPA) will scale the Deployment my-deployment to a minimum of 1 replica and a maximum of 10 replicas based on the average CPU utilization of the pods in the deployment. When the average CPU utilization is above 80%, the HPA will scale up the deployment by one replica. When the average CPU utilization is below 80%, the HPA will scale down the deployment by one replica.
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: my-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: my-deployment
minReplicas: 1
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
targetAverageUtilization: 80
This HPA will scale the Deployment my-deployment
to a minimum of 1 replica and a maximum of 10 replicas based on the number of pods in the deployment. When the number of pods is less than 2, the HPA will scale up the deployment by one replica. When the number of pods is more than 5, the HPA will scale down the deployment by one replica.
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: my-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: my-deployment
minReplicas: 1
maxReplicas: 10
metrics:
- type: Pods
pods:
minPods: 2
maxPods: 5
This HPA will scale the Deployment my-deployment
to a minimum of 1 replica and a maximum of 10 replicas based on the value of the external metric my-metric
with the label app: my-app
. When the value of the metric is above a certain threshold, the HPA will scale up the deployment. When the value of the metric is below the threshold, the HPA will scale down the deployment.
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: my-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: my-deployment
minReplicas: 1
maxReplicas: 10
metrics:
- type: External
external:
metricName: my-metric
metricSelector:
matchLabels:
app: my-app
kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10
HorizontalPodAutoscalers are a powerful tool for managing the scale of your Kubernetes applications. By using HPAs, you can ensure that your applications have the resources they need to handle changes in demand.
This PDB ensures that at most one Pod of the frontend Deployment is disrupted at a time:
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: frontend-pdb
spec:
minAvailable: 2
selector:
matchLabels:
app: frontend
The minAvailable
field specifies that at least two Pods must be available at all times. If Kubernetes needs to disrupt a Pod, it will wait until at least two Pods are available before doing so.
This PDB ensures that at most 50% of the Pods of the database
StatefulSet are disrupted at a time:
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: database-pdb
spec:
maxUnavailable: 50%
selector:
matchLabels:
app: database
The maxUnavailable field specifies that at most 50% of the Pods can be unavailable at any given time. This is useful for StatefulSets, where the order in which Pods are deployed and terminated is important.
This PDB ensures that at most 10% of the Pods of the logging
DaemonSet are unavailable at a time:
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: logging-pdb
spec:
maxSurge: 10%
selector:
matchLabels:
app: logging
With the paused field set to true, Kubernetes will not prevent any Pods of the web Deployment from being disrupted. This can be useful if you need to perform maintenance on the application or if you are confident that the application can handle disruptions.
This PDB ensures that at least one Pod of any Deployment with the label environment: production is available at all times:
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: production-pdb
spec:
minAvailable: 1
selector:
matchLabels:
environment: production
The selector field can be used to select Pods based on any label selector. This allows you to create PDBs for specific applications, environments, or even individual Pods.
kubectl get poddisruptionbudgets
PDBs are a powerful tool for ensuring the availability of your Kubernetes applications. By using PDBs, you can prevent Kubernetes from disrupting your Pods unexpectedly.
This StatefulSet will create three Pods, each running a ZooKeeper server container. The Pods will be named my-zookeeper-cluster-0
, my-zookeeper-cluster-1
, and my-zookeeper-cluster-2
. The volumeMounts section of the spec tells the Pods to mount the PersistentVolumeClaim my-zookeeper-cluster-pvc
to the /zookeeper/data
directory. This will ensure that the ZooKeeper data is persistent and stored across restarts.
When you update the StatefulSet you are not allowed to change the following parameters:replicas
,ordinals
,template
,updateStrategy
,persistentVolumeClaimRetentionPolicy
andminReadySeconds
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: my-zookeeper-cluster
spec:
replicas: 3
selector:
matchLabels:
app: my-zookeeper-cluster
template:
metadata:
labels:
app: my-zookeeper-cluster
spec:
containers:
- name: my-zookeeper
image: bitnami/zookeeper:latest
ports:
- containerPort: 2181
volumeMounts:
- name: zookeeper-data
mountPath: /zookeeper/data
volumes:
- name: zookeeper-data
persistentVolumeClaim:
claimName: my-zookeeper-cluster-pvc
volumeMounts
section if the Pods need to access persistent storage.StatefulSets are valuable for applications that require one or more of the following.
In the above, stable is synonymous with persistence across Pod (re)scheduling. If an application doesn't require any stable identifiers or ordered deployment, deletion, or scaling, you should deploy your application using a workload object that provides a set of stateless replicas. Deployment or ReplicaSet may be better suited to your stateless needs.
StatefulSets are a good choice for running these applications because they provide the following benefits:
If you are running a stateful application in Kubernetes, I recommend using a StatefulSet to manage it. StatefulSets provide the features and functionality that you need to run stateful applications reliably and efficiently.
apiVersion: v1
kind: ServiceAccount
metadata:
name: default # this is the default service account
namespace: default # deployed on default namespace. Must be deployed on all namespaces where you deploy your apps
imagePullSecrets:
- name: gcp-registry-ro
---
# This manifest creates a service account named `my-service-account`
# with the permission to access the Kubernetes API server.
apiVersion: v1
kind: ServiceAccount
metadata:
name: my-service-account
secrets:
- name: token
type: kubernetes.io/service-account-token
To use a service account manifest, you can create it using a text editor and then save it as a YAML file. Once you have created the manifest, you can deploy it to your Kubernetes cluster using the kubectl apply
command.
For example, to deploy the basic service account manifest from above, you would run the following command:
kubectl apply -f my-service-account.yaml
Once the service account has been deployed, you can use it to create Pods and other Kubernetes objects. To do this, you will need to specify the service account name in the Pod or other Kubernetes object manifest.
For example, to create a Pod that uses the my-service-account service account, you would include the following section in the Pod manifest:
spec:
serviceAccountName: my-service-account
When the Pod is created, it will be able to access the Kubernetes API server using the permissions granted to the my-service-account
service account.
Kubernetes service accounts are a powerful way to manage permissions for Pods and other Kubernetes objects. By using service account manifests, you can easily create and manage service accounts with different permissions.
This example shows how to deploy a Fluentd DaemonSet to collect logs on every node in the cluster:
# Special kind of deployment that adds a pod in every node.
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd
namespace: demo
labels:
app: fluentd
spec:
selector:
matchLabels:
app: fluentd
template:
metadata:
labels:
app: fluentd
spec:
terminationGracePeriodSeconds: 30
containers:
- name: fluentd
image: fluent/fluentd:v0.14.10
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
This DaemonSet will create a Fluentd pod on every node in the cluster. The Fluentd pod will mount the /var/log
directory on the node, so that it can collect logs from all applications running on that node.
This example shows how to deploy a Node Monitoring DaemonSet to monitor the health of every node in the cluster:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: node-monitoring-daemon
spec:
selector:
matchLabels:
app: node-monitoring
template:
metadata:
labels:
app: node-monitoring
spec:
containers:
- name: node-monitoring
image: prom/node-exporter:latest
ports:
- containerPort: 9100
This DaemonSet will create a Node Exporter pod on every node in the cluster. The Node Exporter pod will expose a Prometheus endpoint on port 9100, which can be scraped by a Prometheus server to monitor the health of the node.
This example shows how to deploy a Cluster Storage DaemonSet to provide distributed storage for applications in the cluster:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cluster-storage-daemon
spec:
selector:
matchLabels:
app: cluster-storage
template:
metadata:
labels:
app: cluster-storage
spec:
containers:
- name: cluster-storage
image: gluster/glusterd-amd64:latest
volumeMounts:
- name: gluster-data
mountPath: /var/lib/glusterfs
volumes:
- name: gluster-data
hostPath:
path: /var/lib/glusterfs
This DaemonSet will create a GlusterFS daemon pod on every node in the cluster. The GlusterFS daemon pods will work together to provide distributed storage for the applications running in the cluster.
All examples above use the same basic structure for the DaemonSet manifest:
The selector
field specifies which nodes the DaemonSet should run on. In all the examples above, we are using a simple selector that matches all nodes. However, you can also use more complex selectors to target specific nodes.
The template
field specifies the pod that the DaemonSet should create on each node. The template contains the same information as a regular pod spec, such as the container image and any required volumes.
Once you have created a DaemonSet manifest, you can deploy it to your Kubernetes cluster using the kubectl apply
command. For example, to deploy the Fluentd DaemonSet from the first example, you would run the following command:
kubectl apply -f fluentd-daemon.yaml
Once the DaemonSet is deployed, it will ensure that there is always at least one Fluentd pod running on every node in the cluster.
DaemonSets are a powerful tool for deploying and managing distributed services in Kubernetes. They can be used to deploy a wide variety of services, such as logging agents, monitoring agents, cluster storage solutions, and more.
Create a Storage Class for EKS to access EFS, then use it with PVC
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: efs-sc
provisioner: efs.csi.aws.com
parameters:
provisioningMode: efs-ap
fileSystemId: <EFS-ID>
directoryPerms: "0755"
uid: "1000"
gid: "1000"