Kubernetes is an open-source platform for automating the deployment, scaling, and management of containerized applications. This article covers Kubernetes from the basics to practical operations.
Kubernetes Architecture
Cluster Structure
flowchart TB
subgraph ControlPlane["Control Plane"]
API["API Server"]
Sched["Scheduler"]
CM["Controller Manager"]
etcd["etcd<br/>(Data Store)"]
end
subgraph WorkerNodes["Worker Nodes"]
subgraph Node1["Node 1"]
kubelet1["kubelet"]
proxy1["kube-proxy"]
runtime1["Container Runtime<br/>(containerd)"]
Pod1["Pod"]
Pod2["Pod"]
end
subgraph Node2["Node 2"]
kubelet2["kubelet"]
proxy2["kube-proxy"]
runtime2["Container Runtime<br/>(containerd)"]
Pod3["Pod"]
Pod4["Pod"]
end
end
ControlPlane -->|kubelet communication| WorkerNodes
Main Components
| Component | Role |
|---|---|
| API Server | Processes all API requests to the cluster |
| etcd | Distributed KV store that saves cluster state |
| Scheduler | Places Pods on appropriate nodes |
| Controller Manager | Runs various controllers (ReplicaSet, Deployment, etc.) |
| kubelet | Manages Pods on the node |
| kube-proxy | Network proxy, service load balancing |
Local Environment Setup
Installing minikube
# macOS (Homebrew)
brew install minikube
# Linux
curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64
sudo install minikube-linux-amd64 /usr/local/bin/minikube
# Windows (winget)
winget install Kubernetes.minikube
# Start the cluster
minikube start --driver=docker --cpus=4 --memory=8192
# Check status
minikube status
# Start Kubernetes dashboard
minikube dashboard
Installing kind (Alternative)
# Kubernetes IN Docker - A lighter alternative
# macOS/Linux
brew install kind
# Create cluster (multi-node)
cat <<EOF | kind create cluster --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
- role: worker
- role: worker
EOF
# List clusters
kind get clusters
# Delete cluster
kind delete cluster
kubectl Configuration
# Install
brew install kubectl
# Check contexts
kubectl config get-contexts
# Switch context
kubectl config use-context minikube
# Check cluster info
kubectl cluster-info
kubectl get nodes
Basic Resources
Pod
# pod.yaml - Minimum deployment unit
apiVersion: v1
kind: Pod
metadata:
name: nginx-pod
labels:
app: nginx
environment: development
spec:
containers:
- name: nginx
image: nginx:1.25
ports:
- containerPort: 80
resources:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "128Mi"
cpu: "500m"
livenessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 10
periodSeconds: 10
readinessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 5
periodSeconds: 5
# Pod operations
kubectl apply -f pod.yaml
kubectl get pods
kubectl describe pod nginx-pod
kubectl logs nginx-pod
kubectl exec -it nginx-pod -- /bin/bash
kubectl delete pod nginx-pod
Deployment
# deployment.yaml - Application deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: web-app
labels:
app: web-app
spec:
replicas: 3
selector:
matchLabels:
app: web-app
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
template:
metadata:
labels:
app: web-app
spec:
containers:
- name: web-app
image: myapp:1.0.0
ports:
- containerPort: 3000
env:
- name: NODE_ENV
value: "production"
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: app-secrets
key: database-url
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "1000m"
livenessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /ready
port: 3000
initialDelaySeconds: 10
periodSeconds: 5
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchLabels:
app: web-app
topologyKey: kubernetes.io/hostname
# Deployment operations
kubectl apply -f deployment.yaml
kubectl get deployments
kubectl get pods -l app=web-app
# Scaling
kubectl scale deployment web-app --replicas=5
# Rolling update
kubectl set image deployment/web-app web-app=myapp:2.0.0
# Rollback
kubectl rollout undo deployment/web-app
kubectl rollout history deployment/web-app
kubectl rollout status deployment/web-app
Service
# service.yaml - Service exposure
apiVersion: v1
kind: Service
metadata:
name: web-app-service
spec:
type: ClusterIP # ClusterIP, NodePort, LoadBalancer
selector:
app: web-app
ports:
- name: http
port: 80
targetPort: 3000
protocol: TCP
---
# NodePort service (for external access)
apiVersion: v1
kind: Service
metadata:
name: web-app-nodeport
spec:
type: NodePort
selector:
app: web-app
ports:
- port: 80
targetPort: 3000
nodePort: 30080
---
# LoadBalancer service (for cloud environments)
apiVersion: v1
kind: Service
metadata:
name: web-app-lb
annotations:
service.beta.kubernetes.io/aws-load-balancer-type: nlb
spec:
type: LoadBalancer
selector:
app: web-app
ports:
- port: 80
targetPort: 3000
Ingress
# ingress.yaml - HTTP routing
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: web-app-ingress
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/ssl-redirect: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
ingressClassName: nginx
tls:
- hosts:
- app.example.com
secretName: app-tls-secret
rules:
- host: app.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: web-app-service
port:
number: 80
- path: /api
pathType: Prefix
backend:
service:
name: api-service
port:
number: 80
# Install Ingress Controller (minikube)
minikube addons enable ingress
# Check Ingress
kubectl get ingress
kubectl describe ingress web-app-ingress
Configuration Management
ConfigMap
# configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: app-config
data:
# Simple key-value
LOG_LEVEL: "info"
API_TIMEOUT: "30s"
# Mount as file
nginx.conf: |
server {
listen 80;
server_name localhost;
location / {
proxy_pass http://backend:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
# JSON configuration
config.json: |
{
"database": {
"host": "postgres",
"port": 5432
},
"cache": {
"enabled": true,
"ttl": 3600
}
}
# ConfigMap usage example
apiVersion: apps/v1
kind: Deployment
metadata:
name: app-with-config
spec:
template:
spec:
containers:
- name: app
image: myapp:1.0
# Inject as environment variables
envFrom:
- configMapRef:
name: app-config
# Individual environment variables
env:
- name: LOG_LEVEL
valueFrom:
configMapKeyRef:
name: app-config
key: LOG_LEVEL
# Mount as volume
volumeMounts:
- name: config-volume
mountPath: /etc/config
readOnly: true
volumes:
- name: config-volume
configMap:
name: app-config
items:
- key: nginx.conf
path: nginx.conf
- key: config.json
path: config.json
Secret
# secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: app-secrets
type: Opaque
data:
# Base64 encoded
database-url: cG9zdGdyZXNxbDovL3VzZXI6cGFzc0Bsb2NhbGhvc3Q6NTQzMi9teWRi
api-key: c3VwZXJzZWNyZXRhcGlrZXk=
stringData:
# Plain text (automatically Base64 encoded)
jwt-secret: my-super-secret-jwt-key
---
# Docker Registry authentication Secret
apiVersion: v1
kind: Secret
metadata:
name: docker-registry-secret
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: eyJhdXRocyI6ey...
# Create Secret (command line)
kubectl create secret generic app-secrets \
--from-literal=database-url='postgresql://user:pass@localhost:5432/mydb' \
--from-literal=api-key='supersecretapikey'
# Create Secret from file
kubectl create secret generic tls-secret \
--from-file=tls.crt=./server.crt \
--from-file=tls.key=./server.key
# Check Secret (values are masked)
kubectl get secrets
kubectl describe secret app-secrets
Persistent Storage
PersistentVolume and PersistentVolumeClaim
# storage.yaml
# PersistentVolume (created by cluster admin)
apiVersion: v1
kind: PersistentVolume
metadata:
name: postgres-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: standard
hostPath: # For local development
path: /data/postgres
---
# PersistentVolumeClaim (requested by developer)
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: standard
---
# StorageClass (dynamic provisioning)
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: fast-ssd
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp3
iops: "3000"
throughput: "125"
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
StatefulSet (for databases)
# statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: postgres
spec:
serviceName: postgres
replicas: 1
selector:
matchLabels:
app: postgres
template:
metadata:
labels:
app: postgres
spec:
containers:
- name: postgres
image: postgres:15
ports:
- containerPort: 5432
env:
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: postgres-secrets
key: username
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: postgres-secrets
key: password
- name: POSTGRES_DB
value: myapp
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
volumeMounts:
- name: postgres-storage
mountPath: /var/lib/postgresql/data
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "1Gi"
cpu: "1000m"
volumeClaimTemplates:
- metadata:
name: postgres-storage
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: fast-ssd
resources:
requests:
storage: 20Gi
---
# Headless Service for StatefulSet
apiVersion: v1
kind: Service
metadata:
name: postgres
spec:
clusterIP: None
selector:
app: postgres
ports:
- port: 5432
Practical Configuration Example
Complete Web Application Structure
# namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: production
labels:
name: production
---
# resourcequota.yaml
apiVersion: v1
kind: ResourceQuota
metadata:
name: production-quota
namespace: production
spec:
hard:
requests.cpu: "10"
requests.memory: 20Gi
limits.cpu: "20"
limits.memory: 40Gi
pods: "50"
---
# limitrange.yaml
apiVersion: v1
kind: LimitRange
metadata:
name: default-limits
namespace: production
spec:
limits:
- default:
cpu: "500m"
memory: "512Mi"
defaultRequest:
cpu: "100m"
memory: "128Mi"
type: Container
# complete-app.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: frontend
namespace: production
spec:
replicas: 2
selector:
matchLabels:
app: frontend
template:
metadata:
labels:
app: frontend
spec:
containers:
- name: frontend
image: myapp/frontend:1.0
ports:
- containerPort: 80
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "500m"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: backend
namespace: production
spec:
replicas: 3
selector:
matchLabels:
app: backend
template:
metadata:
labels:
app: backend
spec:
serviceAccountName: backend-sa
containers:
- name: backend
image: myapp/backend:1.0
ports:
- containerPort: 3000
env:
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: app-secrets
key: database-url
- name: REDIS_URL
value: "redis://redis:6379"
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "1000m"
livenessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 3000
initialDelaySeconds: 10
periodSeconds: 5
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
namespace: production
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: redis:7-alpine
ports:
- containerPort: 6379
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "100m"
---
# Services
apiVersion: v1
kind: Service
metadata:
name: frontend
namespace: production
spec:
selector:
app: frontend
ports:
- port: 80
---
apiVersion: v1
kind: Service
metadata:
name: backend
namespace: production
spec:
selector:
app: backend
ports:
- port: 3000
---
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: production
spec:
selector:
app: redis
ports:
- port: 6379
---
# Ingress
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: main-ingress
namespace: production
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "10m"
spec:
ingressClassName: nginx
rules:
- host: myapp.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: frontend
port:
number: 80
- path: /api
pathType: Prefix
backend:
service:
name: backend
port:
number: 3000
Monitoring and Debugging
kubectl Debug Commands
# Pod list (detailed)
kubectl get pods -o wide -n production
# Resource usage
kubectl top nodes
kubectl top pods -n production
# Pod details
kubectl describe pod <pod-name> -n production
# Check logs
kubectl logs <pod-name> -n production
kubectl logs <pod-name> -c <container-name> # Multi-container
kubectl logs -f <pod-name> # Real-time
kubectl logs --previous <pod-name> # Previous container
# Execute command in Pod
kubectl exec -it <pod-name> -- /bin/sh
kubectl exec -it <pod-name> -c <container-name> -- /bin/sh
# Port forward
kubectl port-forward <pod-name> 8080:80
kubectl port-forward svc/<service-name> 8080:80
# Check events
kubectl get events -n production --sort-by='.lastTimestamp'
# Output resource as YAML
kubectl get deployment <name> -o yaml
Horizontal Pod Autoscaler
# hpa.yaml
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: backend-hpa
namespace: production
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: backend
minReplicas: 2
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 70
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
behavior:
scaleDown:
stabilizationWindowSeconds: 300
policies:
- type: Percent
value: 10
periodSeconds: 60
scaleUp:
stabilizationWindowSeconds: 0
policies:
- type: Percent
value: 100
periodSeconds: 15
- type: Pods
value: 4
periodSeconds: 15
selectPolicy: Max
# Check HPA
kubectl get hpa -n production
kubectl describe hpa backend-hpa -n production
Package Management with Helm
Helm Basics
# Install Helm
brew install helm
# Add repository
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo update
# Search charts
helm search repo nginx
# Install chart
helm install my-nginx bitnami/nginx
# Install with custom values
helm install my-nginx bitnami/nginx \
--set service.type=ClusterIP \
--set replicaCount=3
# Use values.yaml
helm install my-nginx bitnami/nginx -f values.yaml
# Upgrade
helm upgrade my-nginx bitnami/nginx -f values.yaml
# Rollback
helm rollback my-nginx 1
# Uninstall
helm uninstall my-nginx
# List releases
helm list
Creating Custom Charts
# Create chart scaffold
helm create myapp
myapp/
├── Chart.yaml # Chart metadata
├── values.yaml # Default values
├── templates/
│ ├── deployment.yaml
│ ├── service.yaml
│ ├── ingress.yaml
│ ├── configmap.yaml
│ ├── secret.yaml
│ ├── hpa.yaml
│ ├── _helpers.tpl # Template helpers
│ └── NOTES.txt # Post-install message
└── charts/ # Dependent charts
# templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "myapp.fullname" . }}
labels:
{{- include "myapp.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "myapp.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "myapp.selectorLabels" . | nindent 8 }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
ports:
- containerPort: {{ .Values.service.port }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- if .Values.env }}
env:
{{- range $key, $value := .Values.env }}
- name: {{ $key }}
value: {{ $value | quote }}
{{- end }}
{{- end }}
# values.yaml
replicaCount: 3
image:
repository: myapp/backend
tag: "1.0.0"
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 3000
ingress:
enabled: true
className: nginx
hosts:
- host: myapp.example.com
paths:
- path: /
pathType: Prefix
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "1000m"
env:
NODE_ENV: production
LOG_LEVEL: info
autoscaling:
enabled: true
minReplicas: 2
maxReplicas: 10
targetCPUUtilizationPercentage: 70
Summary
Kubernetes has become the standard for container orchestration.
Learning Steps
| Step | Content |
|---|---|
| 1. Basics | Pod, Deployment, Service |
| 2. Configuration | ConfigMap, Secret |
| 3. Storage | PV, PVC, StatefulSet |
| 4. Networking | Ingress, NetworkPolicy |
| 5. Operations | HPA, Helm, Monitoring |
Best Practices
- Set Resource Limits: Always configure requests/limits
- Health Checks: Implement liveness/readinessProbe
- Use Labels: Consistent labeling strategy
- Namespace Isolation: Separate by environment or team
- GitOps: Version control your manifests
Mastering Kubernetes enables scalable and reliable application operations.