Set up Consul for dynamic service discovery in Kubernetes clusters with automatic service registration, health checks, and configuration management for microservices orchestration.
Prerequisites
- Kubernetes cluster with kubectl access
- Root or sudo access
- At least 3 nodes for Consul cluster
- Basic understanding of Kubernetes concepts
What this solves
Integrating Consul with Kubernetes provides dynamic service discovery, health checking, and configuration management for containerized applications. This eliminates the need for hard-coded service endpoints and enables automatic service registration when pods are created or destroyed.
This tutorial covers installing a Consul server cluster, deploying Consul agents on Kubernetes nodes, configuring automatic service registration, and setting up health checks for seamless microservices communication.
Step-by-step installation
Update system packages
Start by updating your package manager to ensure you get the latest versions.
sudo apt update && sudo apt upgrade -y
Install Consul server
Download and install Consul on the server nodes that will form the Consul cluster.
cd /tmp
wget https://releases.hashicorp.com/consul/1.17.0/consul_1.17.0_linux_amd64.zip
sudo apt install -y unzip
unzip consul_1.17.0_linux_amd64.zip
sudo mv consul /usr/local/bin/
sudo chmod +x /usr/local/bin/consul
Create Consul user and directories
Create a dedicated user for Consul and set up the required directory structure with proper permissions.
sudo useradd --system --home /etc/consul.d --shell /bin/false consul
sudo mkdir -p /opt/consul /etc/consul.d
sudo chown consul:consul /opt/consul /etc/consul.d
sudo chmod 755 /opt/consul /etc/consul.d
Generate Consul encryption key
Create an encryption key for securing Consul cluster communication. Save this key as you'll need it on all cluster nodes.
consul keygen
Configure Consul server
Create the main Consul server configuration file with cluster settings and encryption.
datacenter = "k8s-dc1"
data_dir = "/opt/consul"
log_level = "INFO"
node_name = "consul-server-1"
server = true
bootstrap_expect = 3
bind_addr = "203.0.113.10"
client_addr = "0.0.0.0"
retry_join = ["203.0.113.11", "203.0.113.12"]
encrypt = "your-encryption-key-here"
ui_config {
enabled = true
}
connect {
enabled = true
}
ports {
grpc = 8502
}
Create Consul systemd service
Set up a systemd service file to manage the Consul server process.
[Unit]
Description=Consul
Documentation=https://www.consul.io/
Requires=network-online.target
After=network-online.target
ConditionFileNotEmpty=/etc/consul.d/consul.hcl
[Service]
Type=notify
User=consul
Group=consul
ExecStart=/usr/local/bin/consul agent -config-dir=/etc/consul.d/
ExecReload=/bin/kill -HUP $MAINPID
KillMode=process
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
Start Consul server cluster
Enable and start the Consul service on all server nodes to form the cluster.
sudo systemctl daemon-reload
sudo systemctl enable consul
sudo systemctl start consul
sudo systemctl status consul
Install Helm for Kubernetes deployment
Install Helm package manager to deploy Consul agents on Kubernetes using the official Helm chart.
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
helm repo add hashicorp https://helm.releases.hashicorp.com
helm repo update
Create Kubernetes namespace for Consul
Create a dedicated namespace for Consul components in your Kubernetes cluster.
kubectl create namespace consul
Create Consul Kubernetes configuration
Configure Helm values to connect Kubernetes Consul agents to your external Consul cluster.
global:
name: consul
datacenter: k8s-dc1
gossipEncryption:
secretName: consul-gossip-encryption-key
secretKey: gossip-encryption-key
tls:
enabled: false
acls:
manageSystemACLs: false
server:
enabled: false
client:
enabled: true
join:
- "203.0.113.10"
- "203.0.113.11"
- "203.0.113.12"
grpc: true
connectInject:
enabled: true
default: false
syncCatalog:
enabled: true
toConsul: true
toK8S: true
k8sPrefix: "k8s-"
consulPrefix: "consul-"
dns:
enabled: true
enableRedirection: true
Create Consul encryption secret
Store the Consul encryption key as a Kubernetes secret for the agents to use.
kubectl create secret generic consul-gossip-encryption-key \
--from-literal="gossip-encryption-key=your-encryption-key-here" \
--namespace consul
Deploy Consul agents to Kubernetes
Install the Consul Helm chart with your configuration to deploy agents on all Kubernetes nodes.
helm install consul hashicorp/consul \
--namespace consul \
--values consul-values.yaml \
--version "1.3.0"
Configure automatic service registration
Create a sample application with Consul service annotations for automatic registration.
apiVersion: apps/v1
kind: Deployment
metadata:
name: web-app
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: web-app
template:
metadata:
labels:
app: web-app
annotations:
consul.hashicorp.com/connect-inject: "true"
consul.hashicorp.com/service-name: "web-app"
consul.hashicorp.com/service-port: "8080"
consul.hashicorp.com/service-tags: "web,http"
spec:
containers:
- name: web-app
image: nginx:latest
ports:
- containerPort: 80
name: http
---
apiVersion: v1
kind: Service
metadata:
name: web-app-service
namespace: default
annotations:
consul.hashicorp.com/service-name: "web-app"
consul.hashicorp.com/service-tags: "web,loadbalancer"
spec:
selector:
app: web-app
ports:
- port: 80
targetPort: 80
type: ClusterIP
Deploy the sample application
Apply the sample application configuration to test automatic service registration.
kubectl apply -f sample-app.yaml
Configure health checks
Create a more advanced service configuration with custom health checks and metadata.
apiVersion: apps/v1
kind: Deployment
metadata:
name: api-service
namespace: default
spec:
replicas: 3
selector:
matchLabels:
app: api-service
template:
metadata:
labels:
app: api-service
annotations:
consul.hashicorp.com/connect-inject: "true"
consul.hashicorp.com/service-name: "api-service"
consul.hashicorp.com/service-port: "3000"
consul.hashicorp.com/service-tags: "api,backend,v1"
consul.hashicorp.com/service-meta-version: "1.0.0"
consul.hashicorp.com/service-meta-team: "platform"
spec:
containers:
- name: api-service
image: node:18-alpine
ports:
- containerPort: 3000
name: http
command: ["sh", "-c"]
args:
- |
cat > server.js << EOF
const http = require('http');
const server = http.createServer((req, res) => {
if (req.url === '/health') {
res.writeHead(200, {'Content-Type': 'application/json'});
res.end(JSON.stringify({status: 'healthy', timestamp: new Date()}));
} else {
res.writeHead(200, {'Content-Type': 'application/json'});
res.end(JSON.stringify({message: 'API Service Running', version: '1.0.0'}));
}
});
server.listen(3000, () => console.log('Server running on port 3000'));
EOF
node server.js
livenessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 10
periodSeconds: 30
readinessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 5
periodSeconds: 10
Apply the API service configuration
Deploy the API service with health check configuration to demonstrate advanced Consul integration.
kubectl apply -f api-service.yaml
Configure firewall rules
Open the necessary ports for Consul communication between nodes and Kubernetes integration.
sudo ufw allow 8300/tcp comment 'Consul server RPC'
sudo ufw allow 8301/tcp comment 'Consul Serf LAN'
sudo ufw allow 8301/udp comment 'Consul Serf LAN'
sudo ufw allow 8302/tcp comment 'Consul Serf WAN'
sudo ufw allow 8302/udp comment 'Consul Serf WAN'
sudo ufw allow 8500/tcp comment 'Consul HTTP API'
sudo ufw allow 8502/tcp comment 'Consul gRPC API'
sudo ufw allow 8600/tcp comment 'Consul DNS'
sudo ufw allow 8600/udp comment 'Consul DNS'
Verify your setup
Check that your Consul cluster is operational and Kubernetes services are being registered automatically.
# Check Consul cluster members
consul members
Verify Consul leader election
consul operator raft list-peers
Check Kubernetes pods are running
kubectl get pods -n consul
Verify services are registered in Consul
consul catalog services
Check specific service details
consul catalog nodes -service web-app
Test DNS resolution
dig @203.0.113.10 -p 8600 web-app.service.consul
Check service health
consul health checks -service web-app
Configure service mesh communication
Enable service-to-service communication using Consul Connect for secure microservices networking.
Create service intentions
Define which services can communicate with each other using Consul's intention system.
Kind = "service-intentions"
Name = "api-service"
Sources = [
{
Name = "web-app"
Action = "allow"
},
{
Name = "*"
Action = "deny"
}
]
Apply service intentions
Load the intention configuration into Consul to enforce service communication policies.
consul config write api-intentions.hcl
Configure service mesh for existing deployments
Update your existing services to use Consul Connect sidecar proxies for secure communication.
# Restart deployments to inject Connect sidecars
kubectl rollout restart deployment/web-app
kubectl rollout restart deployment/api-service
Check sidecar injection
kubectl get pods -o wide
Common issues
| Symptom | Cause | Fix |
|---|---|---|
| Consul agents can't join cluster | Incorrect encryption key or network connectivity | Verify encryption key matches and check firewall rules: consul members |
| Services not appearing in Consul catalog | Sync catalog not configured or annotations missing | Check syncCatalog.enabled and verify pod annotations: kubectl describe pod |
| Connect injection not working | Connect inject webhook not running | Check webhook pods: kubectl get pods -n consul -l component=connect-injector |
| DNS resolution failing | Consul DNS not configured in cluster | Verify DNS service: kubectl get svc -n consul consul-dns |
| Health checks always failing | Incorrect health check endpoint or timing | Check application logs and adjust probe settings: kubectl logs pod-name |
| Service mesh traffic blocked | Default deny intentions or missing intentions | Create allow intentions: consul intention create web-app api-service |
Next steps
- Implement Consul ACL security and encryption for production deployments
- Monitor Consul with Prometheus and Grafana for service discovery observability
- Configure Consul backup and disaster recovery with automated snapshots and restoration
- Set up Istio multi-cluster service mesh with cross-cluster communication
- Configure Kubernetes network policies with Calico CNI for microsegmentation and security enforcement
Automated install script
Run this to automate the entire setup
#!/usr/bin/env bash
set -euo pipefail
# Consul + Kubernetes Integration Install Script
# Installs Consul server and prepares Kubernetes integration
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
# Global variables
CONSUL_VERSION="1.17.0"
CONSUL_USER="consul"
CONSUL_CONFIG_DIR="/etc/consul.d"
CONSUL_DATA_DIR="/opt/consul"
CONSUL_BINARY="/usr/local/bin/consul"
CLUSTER_SIZE=3
ENCRYPTION_KEY=""
# Usage function
usage() {
cat << EOF
Usage: $0 [OPTIONS]
Install Consul server for Kubernetes integration
OPTIONS:
-s, --server-ip Primary server IP address (required)
-j, --join-ips Comma-separated list of cluster IPs (required)
-n, --node-name Consul node name (default: consul-server-1)
-d, --datacenter Datacenter name (default: k8s-dc1)
-h, --help Show this help
Example:
$0 -s 203.0.113.10 -j 203.0.113.10,203.0.113.11,203.0.113.12
EOF
exit 1
}
# Cleanup function
cleanup() {
if [[ $? -ne 0 ]]; then
echo -e "${RED}[ERROR] Installation failed. Cleaning up...${NC}"
systemctl stop consul 2>/dev/null || true
systemctl disable consul 2>/dev/null || true
rm -f /etc/systemd/system/consul.service
rm -rf $CONSUL_CONFIG_DIR $CONSUL_DATA_DIR
userdel $CONSUL_USER 2>/dev/null || true
rm -f $CONSUL_BINARY
fi
}
trap cleanup ERR
# Detect distribution
detect_distro() {
if [ -f /etc/os-release ]; then
. /etc/os-release
case "$ID" in
ubuntu|debian)
PKG_MGR="apt"
PKG_UPDATE="apt update && apt upgrade -y"
PKG_INSTALL="apt install -y"
UNZIP_PKG="unzip"
;;
almalinux|rocky|centos|rhel|ol|fedora)
PKG_MGR="dnf"
PKG_UPDATE="dnf update -y"
PKG_INSTALL="dnf install -y"
UNZIP_PKG="unzip"
;;
amzn)
PKG_MGR="yum"
PKG_UPDATE="yum update -y"
PKG_INSTALL="yum install -y"
UNZIP_PKG="unzip"
;;
*)
echo -e "${RED}Unsupported distribution: $ID${NC}"
exit 1
;;
esac
else
echo -e "${RED}Cannot detect distribution${NC}"
exit 1
fi
}
# Check prerequisites
check_prerequisites() {
echo -e "${YELLOW}[1/10] Checking prerequisites...${NC}"
if [[ $EUID -ne 0 ]]; then
echo -e "${RED}This script must be run as root${NC}"
exit 1
fi
if ! command -v wget &> /dev/null; then
$PKG_INSTALL wget
fi
if ! command -v curl &> /dev/null; then
$PKG_INSTALL curl
fi
}
# Parse arguments
parse_args() {
SERVER_IP=""
JOIN_IPS=""
NODE_NAME="consul-server-1"
DATACENTER="k8s-dc1"
while [[ $# -gt 0 ]]; do
case $1 in
-s|--server-ip)
SERVER_IP="$2"
shift 2
;;
-j|--join-ips)
JOIN_IPS="$2"
shift 2
;;
-n|--node-name)
NODE_NAME="$2"
shift 2
;;
-d|--datacenter)
DATACENTER="$2"
shift 2
;;
-h|--help)
usage
;;
*)
echo -e "${RED}Unknown option: $1${NC}"
usage
;;
esac
done
if [[ -z "$SERVER_IP" ]] || [[ -z "$JOIN_IPS" ]]; then
echo -e "${RED}Server IP and join IPs are required${NC}"
usage
fi
}
# Update system packages
update_system() {
echo -e "${YELLOW}[2/10] Updating system packages...${NC}"
$PKG_UPDATE
}
# Install Consul
install_consul() {
echo -e "${YELLOW}[3/10] Installing Consul...${NC}"
cd /tmp
wget -q "https://releases.hashicorp.com/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip"
$PKG_INSTALL $UNZIP_PKG
unzip -q "consul_${CONSUL_VERSION}_linux_amd64.zip"
mv consul $CONSUL_BINARY
chmod 755 $CONSUL_BINARY
rm -f "consul_${CONSUL_VERSION}_linux_amd64.zip"
echo -e "${GREEN}Consul installed successfully${NC}"
}
# Create Consul user and directories
setup_consul_user() {
echo -e "${YELLOW}[4/10] Creating Consul user and directories...${NC}"
if ! id $CONSUL_USER &>/dev/null; then
useradd --system --home $CONSUL_CONFIG_DIR --shell /bin/false $CONSUL_USER
fi
mkdir -p $CONSUL_DATA_DIR $CONSUL_CONFIG_DIR
chown $CONSUL_USER:$CONSUL_USER $CONSUL_DATA_DIR $CONSUL_CONFIG_DIR
chmod 755 $CONSUL_DATA_DIR $CONSUL_CONFIG_DIR
}
# Generate encryption key
generate_encryption_key() {
echo -e "${YELLOW}[5/10] Generating encryption key...${NC}"
ENCRYPTION_KEY=$($CONSUL_BINARY keygen)
echo -e "${GREEN}Encryption key generated: $ENCRYPTION_KEY${NC}"
echo -e "${YELLOW}Save this key for other cluster nodes!${NC}"
}
# Configure Consul
configure_consul() {
echo -e "${YELLOW}[6/10] Configuring Consul server...${NC}"
# Convert comma-separated IPs to HCL array format
IFS=',' read -ra IP_ARRAY <<< "$JOIN_IPS"
RETRY_JOIN=""
for ip in "${IP_ARRAY[@]}"; do
RETRY_JOIN+='"'$(echo $ip | xargs)'",'
done
RETRY_JOIN=${RETRY_JOIN%,}
cat > $CONSUL_CONFIG_DIR/consul.hcl << EOF
datacenter = "$DATACENTER"
data_dir = "$CONSUL_DATA_DIR"
log_level = "INFO"
node_name = "$NODE_NAME"
server = true
bootstrap_expect = $CLUSTER_SIZE
bind_addr = "$SERVER_IP"
client_addr = "0.0.0.0"
retry_join = [$RETRY_JOIN]
encrypt = "$ENCRYPTION_KEY"
ui_config {
enabled = true
}
connect {
enabled = true
}
ports {
grpc = 8502
}
EOF
chown $CONSUL_USER:$CONSUL_USER $CONSUL_CONFIG_DIR/consul.hcl
chmod 640 $CONSUL_CONFIG_DIR/consul.hcl
}
# Create systemd service
create_systemd_service() {
echo -e "${YELLOW}[7/10] Creating systemd service...${NC}"
cat > /etc/systemd/system/consul.service << EOF
[Unit]
Description=Consul
Documentation=https://www.consul.io/
Requires=network-online.target
After=network-online.target
ConditionFileNotEmpty=$CONSUL_CONFIG_DIR/consul.hcl
[Service]
Type=notify
User=$CONSUL_USER
Group=$CONSUL_USER
ExecStart=$CONSUL_BINARY agent -config-dir=$CONSUL_CONFIG_DIR/
ExecReload=/bin/kill -HUP \$MAINPID
KillMode=process
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
chmod 644 /etc/systemd/system/consul.service
}
# Configure firewall
configure_firewall() {
echo -e "${YELLOW}[8/10] Configuring firewall...${NC}"
if command -v firewall-cmd &> /dev/null; then
firewall-cmd --permanent --add-port=8300/tcp # Server RPC
firewall-cmd --permanent --add-port=8301/tcp # Serf LAN
firewall-cmd --permanent --add-port=8301/udp # Serf LAN
firewall-cmd --permanent --add-port=8302/tcp # Serf WAN
firewall-cmd --permanent --add-port=8302/udp # Serf WAN
firewall-cmd --permanent --add-port=8500/tcp # HTTP API
firewall-cmd --permanent --add-port=8502/tcp # gRPC
firewall-cmd --reload
elif command -v ufw &> /dev/null; then
ufw allow 8300/tcp
ufw allow 8301
ufw allow 8302
ufw allow 8500/tcp
ufw allow 8502/tcp
fi
}
# Start Consul service
start_consul() {
echo -e "${YELLOW}[9/10] Starting Consul service...${NC}"
systemctl daemon-reload
systemctl enable consul
systemctl start consul
sleep 5
if systemctl is-active --quiet consul; then
echo -e "${GREEN}Consul service started successfully${NC}"
else
echo -e "${RED}Failed to start Consul service${NC}"
systemctl status consul
exit 1
fi
}
# Verify installation
verify_installation() {
echo -e "${YELLOW}[10/10] Verifying installation...${NC}"
if $CONSUL_BINARY version &>/dev/null; then
echo -e "${GREEN}✓ Consul binary is working${NC}"
else
echo -e "${RED}✗ Consul binary test failed${NC}"
exit 1
fi
if systemctl is-active --quiet consul; then
echo -e "${GREEN}✓ Consul service is running${NC}"
else
echo -e "${RED}✗ Consul service is not running${NC}"
exit 1
fi
if $CONSUL_BINARY members &>/dev/null; then
echo -e "${GREEN}✓ Consul cluster communication working${NC}"
$CONSUL_BINARY members
else
echo -e "${YELLOW}⚠ Consul cluster not fully formed yet${NC}"
fi
echo -e "${GREEN}Installation completed successfully!${NC}"
echo -e "${YELLOW}Consul UI available at: http://$SERVER_IP:8500${NC}"
echo -e "${YELLOW}Encryption key: $ENCRYPTION_KEY${NC}"
}
# Main execution
main() {
parse_args "$@"
detect_distro
check_prerequisites
update_system
install_consul
setup_consul_user
generate_encryption_key
configure_consul
create_systemd_service
configure_firewall
start_consul
verify_installation
}
main "$@"
Review the script before running. Execute with: bash install.sh