Learn to implement advanced network security in Kubernetes using Calico CNI. Configure namespace-based microsegmentation, application-level policies, and comprehensive monitoring for enterprise-grade cluster protection.
Prerequisites
- Running Kubernetes cluster (1.24+ recommended)
- Cluster administrator access with kubectl configured
- Understanding of Kubernetes networking concepts
- Basic knowledge of YAML and network security principles
What this solves
Kubernetes network policies with Calico CNI provide advanced microsegmentation and security enforcement for container workloads. This tutorial shows you how to implement namespace-based isolation, application-level traffic controls, and comprehensive monitoring to secure your cluster against lateral movement and unauthorized network access.
Prerequisites
- Running Kubernetes cluster (1.24+ recommended)
- Cluster administrator access with kubectl configured
- Understanding of Kubernetes networking concepts
- Basic knowledge of YAML and network security principles
Step-by-step installation and configuration
Install Calico CNI on Kubernetes cluster
Install Calico as the Container Network Interface to enable network policy enforcement capabilities.
kubectl create namespace calico-system
kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/tigera-operator.yaml
Configure Calico installation manifest
Create a custom installation configuration to enable policy enforcement and monitoring features.
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
calicoNetwork:
ipPools:
- blockSize: 26
cidr: 192.168.0.0/16
encapsulation: VXLANCrossSubnet
natOutgoing: Enabled
nodeSelector: all()
registry: quay.io/
imagePullSecrets:
- name: tigera-pull-secret
nodeUpdateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
flexVolumePath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/
nodeMetricsPort: 9091
typhaMetricsPort: 9093
Apply Calico configuration
Deploy Calico with the custom configuration and verify all components are running.
kubectl apply -f /tmp/calico-installation.yaml
kubectl get pods -n calico-system --watch
Install Calico CLI tool
Download and install the calicoctl command-line tool for advanced policy management.
curl -L https://github.com/projectcalico/calico/releases/download/v3.26.4/calicoctl-linux-amd64 -o calicoctl
sudo chmod +x calicoctl
sudo mv calicoctl /usr/local/bin/
calicoctl version
Configure calicoctl for cluster access
Set up calicoctl to communicate with your Kubernetes cluster using the existing kubeconfig.
apiVersion: projectcalico.org/v3
kind: CalicoAPIConfig
metadata:
spec:
datastoreType: "kubernetes"
kubeconfig: "/root/.kube/config"
Create test application namespaces
Set up separate namespaces for different application tiers to demonstrate microsegmentation.
kubectl create namespace frontend
kubectl create namespace backend
kubectl create namespace database
kubectl label namespace frontend tier=frontend
kubectl label namespace backend tier=backend
kubectl label namespace database tier=database
Deploy test applications
Deploy sample applications in each namespace to test network policy enforcement.
apiVersion: apps/v1
kind: Deployment
metadata:
name: frontend-app
namespace: frontend
spec:
replicas: 2
selector:
matchLabels:
app: frontend
template:
metadata:
labels:
app: frontend
tier: frontend
spec:
containers:
- name: nginx
image: nginx:1.24
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: frontend-service
namespace: frontend
spec:
selector:
app: frontend
ports:
- port: 80
targetPort: 80
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: backend-app
namespace: backend
spec:
replicas: 2
selector:
matchLabels:
app: backend
template:
metadata:
labels:
app: backend
tier: backend
spec:
containers:
- name: app
image: httpd:2.4
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: backend-service
namespace: backend
spec:
selector:
app: backend
ports:
- port: 80
targetPort: 80
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: database-app
namespace: database
spec:
replicas: 1
selector:
matchLabels:
app: database
template:
metadata:
labels:
app: database
tier: database
spec:
containers:
- name: postgres
image: postgres:15
env:
- name: POSTGRES_PASSWORD
value: "securepassword123"
ports:
- containerPort: 5432
---
apiVersion: v1
kind: Service
metadata:
name: database-service
namespace: database
spec:
selector:
app: database
ports:
- port: 5432
targetPort: 5432
Apply test applications
Deploy the test applications and verify they can communicate before implementing network policies.
kubectl apply -f /tmp/test-apps.yaml
kubectl get pods -A | grep -E "frontend|backend|database"
Create namespace-based network policies for microsegmentation
Implement default deny policy
Create a default deny-all policy for each namespace to establish a zero-trust network foundation.
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny-all
namespace: frontend
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny-all
namespace: backend
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny-all
namespace: database
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
Configure DNS access policy
Allow DNS resolution for all pods to maintain basic cluster functionality.
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-dns
namespace: frontend
spec:
podSelector: {}
policyTypes:
- Egress
egress:
- to:
- namespaceSelector:
matchLabels:
name: kube-system
ports:
- protocol: UDP
port: 53
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-dns
namespace: backend
spec:
podSelector: {}
policyTypes:
- Egress
egress:
- to:
- namespaceSelector:
matchLabels:
name: kube-system
ports:
- protocol: UDP
port: 53
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-dns
namespace: database
spec:
podSelector: {}
policyTypes:
- Egress
egress:
- to:
- namespaceSelector:
matchLabels:
name: kube-system
ports:
- protocol: UDP
port: 53
Apply network policies
Deploy the default deny and DNS policies to establish baseline network security.
kubectl apply -f /tmp/default-deny-policy.yaml
kubectl apply -f /tmp/allow-dns-policy.yaml
kubectl label namespace kube-system name=kube-system
Implement application-level network security policies
Create frontend to backend communication policy
Allow frontend applications to communicate with backend services on specific ports.
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: frontend-to-backend
namespace: backend
spec:
podSelector:
matchLabels:
tier: backend
policyTypes:
- Ingress
ingress:
- from:
- namespaceSelector:
matchLabels:
tier: frontend
podSelector:
matchLabels:
tier: frontend
ports:
- protocol: TCP
port: 80
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: frontend-egress-to-backend
namespace: frontend
spec:
podSelector:
matchLabels:
tier: frontend
policyTypes:
- Egress
egress:
- to:
- namespaceSelector:
matchLabels:
tier: backend
podSelector:
matchLabels:
tier: backend
ports:
- protocol: TCP
port: 80
Create backend to database communication policy
Allow backend applications to access database services with port-specific restrictions.
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: backend-to-database
namespace: database
spec:
podSelector:
matchLabels:
tier: database
policyTypes:
- Ingress
ingress:
- from:
- namespaceSelector:
matchLabels:
tier: backend
podSelector:
matchLabels:
tier: backend
ports:
- protocol: TCP
port: 5432
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: backend-egress-to-database
namespace: backend
spec:
podSelector:
matchLabels:
tier: backend
policyTypes:
- Egress
egress:
- to:
- namespaceSelector:
matchLabels:
tier: database
podSelector:
matchLabels:
tier: database
ports:
- protocol: TCP
port: 5432
Create external access policy for frontend
Allow external traffic to reach frontend services while maintaining internal security.
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-external-to-frontend
namespace: frontend
spec:
podSelector:
matchLabels:
tier: frontend
policyTypes:
- Ingress
ingress:
- from: []
ports:
- protocol: TCP
port: 80
Apply application-level policies
Deploy the communication policies between application tiers.
kubectl apply -f /tmp/frontend-to-backend-policy.yaml
kubectl apply -f /tmp/backend-to-database-policy.yaml
kubectl apply -f /tmp/frontend-external-policy.yaml
Configure advanced Calico policy for time-based access
Create Calico-specific policies with advanced features like time-based restrictions.
apiVersion: projectcalico.org/v3
kind: NetworkPolicy
metadata:
name: time-based-database-access
namespace: database
spec:
selector: tier == "database"
types:
- Ingress
ingress:
- action: Allow
protocol: TCP
source:
selector: tier == "backend"
destination:
ports:
- 5432
metadata:
annotations:
schedule: "0 6-22 *" # Allow access only during business hours
- action: Deny
protocol: TCP
destination:
ports:
- 5432
Apply advanced Calico policy
Deploy the advanced time-based policy using calicoctl.
calicoctl apply -f /tmp/calico-advanced-policy.yaml
Monitor and troubleshoot Calico network policies
Enable Calico policy logging
Configure log collection for network policy decisions to monitor traffic flows.
apiVersion: projectcalico.org/v3
kind: FelixConfiguration
metadata:
name: default
spec:
logSeverityScreen: Info
logFilePath: /var/log/calico/felix.log
policySyncPathPrefix: /var/run/nodeagent
prometheusMetricsEnabled: true
prometheusMetricsPort: 9091
reportingInterval: 30s
Deploy policy monitoring tools
Install monitoring components to track network policy effectiveness.
calicoctl apply -f /tmp/felix-configuration.yaml
kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/calico-policy-only.yaml
Test policy enforcement
Verify that network policies are working correctly by testing connections between pods.
# Test allowed connection (frontend to backend)
kubectl exec -n frontend deployment/frontend-app -- curl -m 5 backend-service.backend.svc.cluster.local
Test blocked connection (frontend to database - should fail)
kubectl exec -n frontend deployment/frontend-app -- curl -m 5 database-service.database.svc.cluster.local:5432
Test allowed connection (backend to database)
kubectl exec -n backend deployment/backend-app -- nc -zv database-service.database.svc.cluster.local 5432
Monitor policy violations
Check Calico logs for policy violations and blocked connections.
kubectl logs -n calico-system -l k8s-app=calico-node | grep -i "policy"
calicoctl get networkpolicy --all-namespaces
calicoctl get profile --all-namespaces
Set up metrics collection
Configure Prometheus to scrape Calico metrics for comprehensive monitoring.
apiVersion: v1
kind: Service
metadata:
name: calico-node-metrics
namespace: calico-system
labels:
k8s-app: calico-node
spec:
ports:
- name: calico-metrics-port
port: 9091
targetPort: 9091
selector:
k8s-app: calico-node
---
apiVersion: v1
kind: ServiceMonitor
metadata:
name: calico-node
namespace: calico-system
spec:
selector:
matchLabels:
k8s-app: calico-node
endpoints:
- port: calico-metrics-port
interval: 30s
path: /metrics
Apply monitoring configuration
Deploy the metrics collection service for ongoing policy monitoring.
kubectl apply -f /tmp/calico-metrics-service.yaml
Verify your setup
# Check Calico component status
kubectl get pods -n calico-system
Verify network policies are applied
kubectl get networkpolicy --all-namespaces
Test policy enforcement
kubectl exec -n frontend deployment/frontend-app -- curl -m 5 backend-service.backend.svc.cluster.local
Check Calico node status
calicoctl node status
View policy statistics
calicoctl get policy --all-namespaces -o wide
Common issues
| Symptom | Cause | Fix |
|---|---|---|
| Pods cannot resolve DNS | DNS policy not configured | Apply DNS egress policy and label kube-system namespace |
| Legitimate connections blocked | Overly restrictive policies | Review pod selectors and namespace labels in policies |
| Policy not enforcing | Calico node not running | Check kubectl get pods -n calico-system and restart failed pods |
| calicoctl commands fail | Configuration file missing | Create /etc/calico/calicoctl.cfg with correct kubeconfig path |
| Metrics not available | Prometheus integration not configured | Enable Felix metrics and deploy ServiceMonitor |
| Cross-namespace communication fails | Missing namespace selectors | Ensure both ingress and egress policies include correct namespace labels |
Next steps
- Implement Kubernetes Pod Security Standards and admission controllers for policy enforcement
- Implement Istio security policies with authorization and authentication for Kubernetes service mesh
- Configure Kubernetes RBAC with service accounts and cluster roles for access control
- Implement Kubernetes security scanning with Falco and OPA for runtime protection
- Configure Kubernetes network monitoring with Hubble and Cilium for traffic visibility
Automated install script
Run this to automate the entire setup
#!/usr/bin/env bash
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
# Default values
CLUSTER_POD_CIDR="${1:-192.168.0.0/16}"
CALICO_VERSION="v3.26.4"
# Usage message
usage() {
echo "Usage: $0 [CLUSTER_POD_CIDR]"
echo "Example: $0 192.168.0.0/16"
exit 1
}
# Logging functions
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
log_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Cleanup function
cleanup() {
log_warning "Script interrupted or failed. Cleaning up..."
rm -f /tmp/calico-*.yaml
rm -f /tmp/test-apps.yaml
rm -f /tmp/calicoctl-config.yaml
}
trap cleanup ERR INT TERM
# Check if running as root or with sudo
check_privileges() {
if [[ $EUID -ne 0 ]] && ! sudo -n true 2>/dev/null; then
log_error "This script requires root privileges or sudo access"
exit 1
fi
}
# Auto-detect distribution
detect_distro() {
if [ -f /etc/os-release ]; then
. /etc/os-release
case "$ID" in
ubuntu|debian)
PKG_MGR="apt"
PKG_INSTALL="apt install -y"
PKG_UPDATE="apt update"
;;
almalinux|rocky|centos|rhel|ol|fedora)
PKG_MGR="dnf"
PKG_INSTALL="dnf install -y"
PKG_UPDATE="dnf check-update || true"
;;
amzn)
PKG_MGR="yum"
PKG_INSTALL="yum install -y"
PKG_UPDATE="yum check-update || true"
;;
*)
log_error "Unsupported distribution: $ID"
exit 1
;;
esac
log_info "Detected distribution: $ID using $PKG_MGR"
else
log_error "Cannot detect distribution - /etc/os-release not found"
exit 1
fi
}
# Check prerequisites
check_prerequisites() {
log_info "[1/10] Checking prerequisites..."
# Check if kubectl is installed and configured
if ! command -v kubectl &> /dev/null; then
log_error "kubectl is not installed or not in PATH"
exit 1
fi
# Test cluster connectivity
if ! kubectl cluster-info &> /dev/null; then
log_error "Cannot connect to Kubernetes cluster. Check your kubeconfig"
exit 1
fi
# Check cluster admin permissions
if ! kubectl auth can-i '*' '*' --all-namespaces &> /dev/null; then
log_error "Insufficient permissions. Cluster admin access required"
exit 1
fi
# Install required packages
case "$PKG_MGR" in
apt)
sudo $PKG_UPDATE
sudo $PKG_INSTALL curl wget jq
;;
dnf|yum)
sudo $PKG_UPDATE
sudo $PKG_INSTALL curl wget jq
;;
esac
log_success "Prerequisites check completed"
}
# Install Calico operator
install_calico_operator() {
log_info "[2/10] Installing Calico operator..."
kubectl create namespace calico-system --dry-run=client -o yaml | kubectl apply -f -
kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/${CALICO_VERSION}/manifests/tigera-operator.yaml
# Wait for operator to be ready
kubectl wait --for=condition=available --timeout=300s deployment/tigera-operator -n tigera-operator
log_success "Calico operator installed"
}
# Configure Calico installation
configure_calico() {
log_info "[3/10] Configuring Calico installation..."
cat > /tmp/calico-installation.yaml << EOF
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
calicoNetwork:
ipPools:
- blockSize: 26
cidr: ${CLUSTER_POD_CIDR}
encapsulation: VXLANCrossSubnet
natOutgoing: Enabled
nodeSelector: all()
registry: quay.io/
nodeUpdateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
flexVolumePath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/
nodeMetricsPort: 9091
typhaMetricsPort: 9093
EOF
chmod 644 /tmp/calico-installation.yaml
kubectl apply -f /tmp/calico-installation.yaml
log_success "Calico configuration applied"
}
# Wait for Calico pods
wait_for_calico() {
log_info "[4/10] Waiting for Calico components to be ready..."
# Wait for all calico-system pods to be running
kubectl wait --for=condition=ready --timeout=600s pod -l k8s-app=calico-node -n calico-system
kubectl wait --for=condition=ready --timeout=300s pod -l k8s-app=calico-kube-controllers -n calico-system
log_success "Calico components are ready"
}
# Install calicoctl
install_calicoctl() {
log_info "[5/10] Installing calicoctl CLI tool..."
curl -L https://github.com/projectcalico/calico/releases/download/${CALICO_VERSION}/calicoctl-linux-amd64 -o /tmp/calicoctl
sudo chmod 755 /tmp/calicoctl
sudo mv /tmp/calicoctl /usr/local/bin/calicoctl
# Configure calicoctl
mkdir -p ~/.calicoctl
cat > ~/.calicoctl/config << EOF
apiVersion: projectcalico.org/v3
kind: CalicoAPIConfig
metadata:
spec:
datastoreType: "kubernetes"
kubeconfig: "${HOME}/.kube/config"
EOF
chmod 644 ~/.calicoctl/config
# Verify installation
calicoctl version
log_success "calicoctl installed and configured"
}
# Create test namespaces
create_namespaces() {
log_info "[6/10] Creating test application namespaces..."
kubectl create namespace frontend --dry-run=client -o yaml | kubectl apply -f -
kubectl create namespace backend --dry-run=client -o yaml | kubectl apply -f -
kubectl create namespace database --dry-run=client -o yaml | kubectl apply -f -
kubectl label namespace frontend tier=frontend --overwrite
kubectl label namespace backend tier=backend --overwrite
kubectl label namespace database tier=database --overwrite
log_success "Test namespaces created and labeled"
}
# Deploy test applications
deploy_test_apps() {
log_info "[7/10] Deploying test applications..."
cat > /tmp/test-apps.yaml << 'EOF'
apiVersion: apps/v1
kind: Deployment
metadata:
name: frontend-app
namespace: frontend
spec:
replicas: 1
selector:
matchLabels:
app: frontend
template:
metadata:
labels:
app: frontend
tier: frontend
spec:
containers:
- name: nginx
image: nginx:1.24-alpine
ports:
- containerPort: 80
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "100m"
---
apiVersion: v1
kind: Service
metadata:
name: frontend-service
namespace: frontend
spec:
selector:
app: frontend
ports:
- port: 80
targetPort: 80
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: backend-app
namespace: backend
spec:
replicas: 1
selector:
matchLabels:
app: backend
template:
metadata:
labels:
app: backend
tier: backend
spec:
containers:
- name: httpd
image: httpd:2.4-alpine
ports:
- containerPort: 80
resources:
requests:
memory: "64Mi"
cpu: "50m"
limits:
memory: "128Mi"
cpu: "100m"
---
apiVersion: v1
kind: Service
metadata:
name: backend-service
namespace: backend
spec:
selector:
app: backend
ports:
- port: 80
targetPort: 80
EOF
chmod 644 /tmp/test-apps.yaml
kubectl apply -f /tmp/test-apps.yaml
log_success "Test applications deployed"
}
# Create network policies
create_network_policies() {
log_info "[8/10] Creating network policies..."
# Default deny all policy for each namespace
for ns in frontend backend database; do
kubectl apply -f - << EOF
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny-all
namespace: $ns
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
EOF
done
# Allow frontend to backend communication
kubectl apply -f - << 'EOF'
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-frontend-to-backend
namespace: backend
spec:
podSelector:
matchLabels:
app: backend
policyTypes:
- Ingress
ingress:
- from:
- namespaceSelector:
matchLabels:
tier: frontend
ports:
- protocol: TCP
port: 80
EOF
log_success "Network policies created"
}
# Verify installation
verify_installation() {
log_info "[9/10] Verifying Calico installation..."
# Check Calico node status
calicoctl get nodes -o wide
# Check network policies
kubectl get networkpolicies --all-namespaces
# Verify pods are running
kubectl get pods --all-namespaces | grep -E "(calico|tigera)"
log_success "Installation verification completed"
}
# Main installation function
main() {
log_info "[10/10] Calico CNI installation completed successfully!"
log_info "Next steps:"
echo " 1. Test network policy enforcement with: kubectl exec -it <pod> -- curl <service>"
echo " 2. Monitor network flows with: calicoctl get felixConfiguration"
echo " 3. Create additional network policies as needed"
echo " 4. Configure logging and monitoring for policy violations"
cleanup
}
# Argument validation
if [ $# -gt 1 ]; then
usage
fi
# Execute installation steps
check_privileges
detect_distro
check_prerequisites
install_calico_operator
configure_calico
wait_for_calico
install_calicoctl
create_namespaces
deploy_test_apps
create_network_policies
verify_installation
main
Review the script before running. Execute with: bash install.sh