Learn how to create production-ready Helm charts for Kubernetes deployments with proper templating, values management, security configurations, and environment-specific customizations for scalable application orchestration.
Prerequisites
- Running Kubernetes cluster with kubectl access
- Basic understanding of Kubernetes resources
- Docker registry access for container images
What this solves
Helm charts provide a templated approach to Kubernetes deployments, allowing you to package, version, and manage complex applications with consistent configurations across environments. This tutorial covers creating production-grade Helm charts with proper security practices, resource management, and deployment patterns that scale from development to production clusters.
Prerequisites and cluster preparation
Verify Kubernetes cluster access
Ensure you have a running Kubernetes cluster and kubectl access with appropriate permissions.
kubectl cluster-info
kubectl get nodes
kubectl auth can-i create deployments --namespace=default
Install Helm 3
Download and install the latest Helm client for managing Kubernetes applications.
curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list
sudo apt update
sudo apt install -y helm
Configure Helm repositories
Add essential Helm repositories for common dependencies and verify installation.
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update
helm version
Create namespace and RBAC
Set up dedicated namespaces with proper service accounts for application deployments.
kubectl create namespace myapp-dev
kubectl create namespace myapp-prod
kubectl create serviceaccount myapp-sa --namespace=myapp-dev
kubectl create serviceaccount myapp-sa --namespace=myapp-prod
Helm chart creation and templating best practices
Create a new Helm chart
Generate a basic chart structure and examine the default template files.
helm create myapp
cd myapp
ls -la
tree .
Configure Chart.yaml with metadata
Define chart metadata, version, and dependencies for proper packaging and management.
apiVersion: v2
name: myapp
description: A production-ready web application Helm chart
type: application
version: 1.0.0
appVersion: "2.1.0"
home: https://example.com
sources:
- https://github.com/example/myapp
maintainers:
- name: Infrastructure Team
email: infra@example.com
keywords:
- web
- application
- microservice
annotations:
category: Application
dependencies:
- name: postgresql
version: 12.x.x
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: redis
version: 17.x.x
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled
Design production values.yaml
Create a comprehensive values file with environment-specific configurations and sensible defaults.
replicaCount: 3
image:
repository: myapp
pullPolicy: IfNotPresent
tag: "2.1.0"
imagePullSecrets:
- name: regcred
nameOverride: ""
fullnameOverride: ""
serviceAccount:
create: true
annotations: {}
name: ""
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8080"
prometheus.io/path: "/metrics"
podSecurityContext:
runAsNonRoot: true
runAsUser: 10001
runAsGroup: 10001
fsGroup: 10001
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
service:
type: ClusterIP
port: 80
targetPort: 8080
ingress:
enabled: true
className: nginx
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/rate-limit: "100"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
hosts:
- host: myapp.example.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: myapp-tls
hosts:
- myapp.example.com
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 250m
memory: 256Mi
autoscaling:
enabled: true
minReplicas: 3
maxReplicas: 10
targetCPUUtilizationPercentage: 70
targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- myapp
topologyKey: kubernetes.io/hostname
postgresql:
enabled: true
auth:
database: myapp
username: myapp
existingSecret: myapp-postgresql
primary:
resources:
requests:
memory: 256Mi
cpu: 250m
limits:
memory: 512Mi
cpu: 500m
redis:
enabled: true
auth:
existingSecret: myapp-redis
master:
resources:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
config:
logLevel: info
maxConnections: 100
cacheTimeout: 300
Create production-ready deployment template
Build a deployment template with security best practices, health checks, and resource management.
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "myapp.fullname" . }}
labels:
{{- include "myapp.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "myapp.selectorLabels" . | nindent 6 }}
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "myapp.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "myapp.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.targetPort }}
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /ready
port: http
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 2
resources:
{{- toYaml .Values.resources | nindent 12 }}
env:
- name: LOG_LEVEL
value: {{ .Values.config.logLevel | quote }}
- name: MAX_CONNECTIONS
value: {{ .Values.config.maxConnections | quote }}
- name: CACHE_TIMEOUT
value: {{ .Values.config.cacheTimeout | quote }}
{{- if .Values.postgresql.enabled }}
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: {{ include "myapp.fullname" . }}-database
key: url
{{- end }}
{{- if .Values.redis.enabled }}
- name: REDIS_URL
valueFrom:
secretKeyRef:
name: {{ include "myapp.fullname" . }}-redis
key: url
{{- end }}
volumeMounts:
- name: tmp
mountPath: /tmp
- name: cache
mountPath: /app/cache
- name: config
mountPath: /app/config
readOnly: true
volumes:
- name: tmp
emptyDir: {}
- name: cache
emptyDir: {}
- name: config
configMap:
name: {{ include "myapp.fullname" . }}-config
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
Create ConfigMap and Secret templates
Separate configuration and sensitive data using ConfigMaps and Secrets with proper templating.
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "myapp.fullname" . }}-config
labels:
{{- include "myapp.labels" . | nindent 4 }}
data:
app.conf: |
log_level = {{ .Values.config.logLevel }}
max_connections = {{ .Values.config.maxConnections }}
cache_timeout = {{ .Values.config.cacheTimeout }}
[server]
port = {{ .Values.service.targetPort }}
{{- if .Values.postgresql.enabled }}
[database]
type = postgresql
{{- end }}
{{- if .Values.redis.enabled }}
[cache]
type = redis
{{- end }}
{{- if .Values.postgresql.enabled }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "myapp.fullname" . }}-database
labels:
{{- include "myapp.labels" . | nindent 4 }}
type: Opaque
data:
url: {{ printf "postgresql://%s:%s@%s-postgresql:5432/%s" .Values.postgresql.auth.username "$(POSTGRES_PASSWORD)" (include "myapp.fullname" .) .Values.postgresql.auth.database | b64enc | quote }}
---
{{- end }}
{{- if .Values.redis.enabled }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "myapp.fullname" . }}-redis
labels:
{{- include "myapp.labels" . | nindent 4 }}
type: Opaque
data:
url: {{ printf "redis://:%s@%s-redis-master:6379/0" "$(REDIS_PASSWORD)" (include "myapp.fullname" .) | b64enc | quote }}
{{- end }}
Add HorizontalPodAutoscaler template
Enable automatic scaling based on CPU and memory metrics for production workloads.
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "myapp.fullname" . }}
labels:
{{- include "myapp.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "myapp.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}
Create PodDisruptionBudget for availability
Ensure application availability during cluster maintenance and node failures.
{{- if gt (.Values.replicaCount | int) 1 }}
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ include "myapp.fullname" . }}
labels:
{{- include "myapp.labels" . | nindent 4 }}
spec:
minAvailable: {{ if .Values.autoscaling.enabled }}{{ .Values.autoscaling.minReplicas }}{{ else }}{{ sub .Values.replicaCount 1 }}{{ end }}
selector:
matchLabels:
{{- include "myapp.selectorLabels" . | nindent 6 }}
{{- end }}
Application deployment with values management
Create environment-specific values files
Define separate configuration files for different deployment environments with appropriate resource allocations.
mkdir environments
touch environments/values-dev.yaml
touch environments/values-staging.yaml
touch environments/values-prod.yaml
Configure development environment values
Create minimal resource configuration for development deployments with debugging enabled.
replicaCount: 1
image:
tag: "dev-latest"
pullPolicy: Always
resources:
limits:
cpu: 200m
memory: 256Mi
requests:
cpu: 100m
memory: 128Mi
autoscaling:
enabled: false
ingress:
hosts:
- host: myapp-dev.example.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: myapp-dev-tls
hosts:
- myapp-dev.example.com
config:
logLevel: debug
maxConnections: 50
postgresql:
primary:
resources:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 256Mi
cpu: 200m
redis:
master:
resources:
requests:
memory: 64Mi
cpu: 50m
limits:
memory: 128Mi
cpu: 100m
Configure production environment values
Set production-grade resource limits, multiple replicas, and enhanced security settings.
replicaCount: 5
image:
tag: "2.1.0"
pullPolicy: IfNotPresent
resources:
limits:
cpu: 1000m
memory: 1Gi
requests:
cpu: 500m
memory: 512Mi
autoscaling:
enabled: true
minReplicas: 5
maxReplicas: 20
targetCPUUtilizationPercentage: 60
targetMemoryUtilizationPercentage: 70
ingress:
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/rate-limit: "200"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
hosts:
- host: myapp.example.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: myapp-prod-tls
hosts:
- myapp.example.com
config:
logLevel: warn
maxConnections: 200
cacheTimeout: 600
nodeSelector:
node-type: application
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- myapp
topologyKey: kubernetes.io/hostname
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: node-type
operator: In
values:
- application
postgresql:
primary:
resources:
requests:
memory: 1Gi
cpu: 500m
limits:
memory: 2Gi
cpu: 1000m
persistence:
enabled: true
size: 20Gi
storageClass: fast-ssd
redis:
master:
resources:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 1Gi
cpu: 500m
persistence:
enabled: true
size: 10Gi
Update chart dependencies
Download and update all chart dependencies before deployment.
helm dependency update
helm dependency list
Deploy to development environment
Install the application in the development namespace with environment-specific values.
helm install myapp-dev . \
--namespace myapp-dev \
--values environments/values-dev.yaml \
--set image.tag=dev-$(date +%Y%m%d) \
--wait --timeout=300s
Deploy to production with validation
Perform a dry-run first, then deploy to production with comprehensive validation and monitoring.
# Validate the chart and templates
helm lint .
helm template myapp-prod . \
--namespace myapp-prod \
--values environments/values-prod.yaml \
--validate
Dry-run the deployment
helm install myapp-prod . \
--namespace myapp-prod \
--values environments/values-prod.yaml \
--dry-run --debug
Deploy to production
helm install myapp-prod . \
--namespace myapp-prod \
--values environments/values-prod.yaml \
--wait --timeout=600s \
--atomic
Production deployment patterns and security hardening
Implement NetworkPolicies for microsegmentation
Create network policies to restrict pod-to-pod communication and enhance security.
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ include "myapp.fullname" . }}
labels:
{{- include "myapp.labels" . | nindent 4 }}
spec:
podSelector:
matchLabels:
{{- include "myapp.selectorLabels" . | nindent 6 }}
policyTypes:
- Ingress
- Egress
ingress:
- from:
- namespaceSelector:
matchLabels:
name: ingress-nginx
- podSelector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
ports:
- protocol: TCP
port: {{ .Values.service.targetPort }}
egress:
- to:
- podSelector:
matchLabels:
app.kubernetes.io/name: postgresql
ports:
- protocol: TCP
port: 5432
- to:
- podSelector:
matchLabels:
app.kubernetes.io/name: redis
ports:
- protocol: TCP
port: 6379
- to: []
ports:
- protocol: TCP
port: 53
- protocol: UDP
port: 53
- to: []
ports:
- protocol: TCP
port: 443
Add Pod Security Standards
Configure Pod Security Standards to enforce security policies at the namespace level.
kubectl label namespace myapp-prod \
pod-security.kubernetes.io/enforce=restricted \
pod-security.kubernetes.io/audit=restricted \
pod-security.kubernetes.io/warn=restricted
kubectl label namespace myapp-dev \
pod-security.kubernetes.io/enforce=baseline \
pod-security.kubernetes.io/audit=restricted \
pod-security.kubernetes.io/warn=restricted
Create RBAC policies
Define minimal RBAC permissions for the application service account.
{{- if .Values.serviceAccount.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "myapp.serviceAccountName" . }}
labels:
{{- include "myapp.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
resourceNames:
- {{ include "myapp.fullname" . }}-database
- {{ include "myapp.fullname" . }}-redis
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "myapp.serviceAccountName" . }}
labels:
{{- include "myapp.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "myapp.serviceAccountName" . }}
subjects:
- kind: ServiceAccount
name: {{ include "myapp.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}
Implement resource quotas
Set namespace-level resource quotas to prevent resource exhaustion and ensure fair allocation.
apiVersion: v1
kind: ResourceQuota
metadata:
name: myapp-quota
namespace: myapp-prod
spec:
hard:
requests.cpu: "10"
requests.memory: 20Gi
limits.cpu: "20"
limits.memory: 40Gi
persistentvolumeclaims: "10"
pods: "50"
services: "10"
secrets: "20"
configmaps: "20"
kubectl apply -f resource-quota-prod.yaml
Configure monitoring and observability
Add ServiceMonitor for Prometheus metrics collection and integrate with existing monitoring infrastructure.
{{- if .Values.monitoring.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "myapp.fullname" . }}
labels:
{{- include "myapp.labels" . | nindent 4 }}
spec:
selector:
matchLabels:
{{- include "myapp.selectorLabels" . | nindent 6 }}
endpoints:
- port: http
path: /metrics
interval: 30s
scrapeTimeout: 10s
{{- end }}
Upgrade application with rolling deployment
Perform safe application upgrades with rollback capability and health checks.
# Upgrade with new image version
helm upgrade myapp-prod . \
--namespace myapp-prod \
--values environments/values-prod.yaml \
--set image.tag=2.1.1 \
--wait --timeout=600s \
--atomic
Monitor rollout status
kubectl rollout status deployment/myapp-prod --namespace=myapp-prod
Rollback if needed
helm rollback myapp-prod 1 --namespace=myapp-prod
Verify your setup
# Check Helm releases
helm list --all-namespaces
Verify application pods
kubectl get pods --namespace=myapp-prod -l app.kubernetes.io/name=myapp
Check service and ingress
kubectl get svc,ingress --namespace=myapp-prod
Verify autoscaling
kubectl get hpa --namespace=myapp-prod
Test application endpoint
curl -k https://myapp.example.com/health
Check resource usage
kubectl top pods --namespace=myapp-prod
Verify security context
kubectl get pod -o jsonpath='{.items[0].spec.securityContext}' --namespace=myapp-prod
Check network policies
kubectl get networkpolicy --namespace=myapp-prod
Common issues
| Symptom | Cause | Fix |
|---|---|---|
| Pods stuck in Pending | Insufficient cluster resources | kubectl describe pod and adjust resource requests or add nodes |
| ImagePullBackOff errors | Invalid image tag or registry auth | Verify image exists and check imagePullSecrets configuration |
| CrashLoopBackOff status | Application startup failure | kubectl logs to check application logs and health check endpoints |
| Ingress not working | Missing ingress controller or DNS | Verify ingress controller is running and DNS points to cluster |
| Database connection failed | NetworkPolicy blocking traffic | Check NetworkPolicy rules allow database connections on port 5432 |
| Horizontal Pod Autoscaler not scaling | Missing metrics server | Install metrics-server: kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml |
| Helm upgrade fails | Resource conflicts or validation errors | Use helm diff plugin and --dry-run to identify issues |
| Secret not found | Dependency secrets not created | Ensure PostgreSQL and Redis secrets exist before application deployment |
Next steps
- Integrate GitLab CI/CD pipelines for automated Helm deployments
- Set up HashiCorp Vault integration for secure secrets management
- Configure automated SSL certificates with cert-manager and Let's Encrypt
- Implement cluster autoscaling for dynamic node management
- Set up comprehensive cluster monitoring with Prometheus and Grafana
Running this in production?
Automated install script
Run this to automate the entire setup
#!/usr/bin/env bash
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Global variables
NAMESPACE_DEV="${NAMESPACE_DEV:-myapp-dev}"
NAMESPACE_PROD="${NAMESPACE_PROD:-myapp-prod}"
CHART_NAME="${CHART_NAME:-myapp}"
WORK_DIR="/tmp/helm-install-$$"
# Usage function
usage() {
echo "Usage: $0 [OPTIONS]"
echo "Options:"
echo " --namespace-dev NAME Development namespace (default: myapp-dev)"
echo " --namespace-prod NAME Production namespace (default: myapp-prod)"
echo " --chart-name NAME Chart name (default: myapp)"
echo " --help Show this help message"
exit 1
}
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--namespace-dev)
NAMESPACE_DEV="$2"
shift 2
;;
--namespace-prod)
NAMESPACE_PROD="$2"
shift 2
;;
--chart-name)
CHART_NAME="$2"
shift 2
;;
--help)
usage
;;
*)
echo -e "${RED}Unknown option: $1${NC}"
usage
;;
esac
done
# Cleanup function
cleanup() {
if [[ -d "$WORK_DIR" ]]; then
rm -rf "$WORK_DIR"
fi
}
# Error handler
error_exit() {
echo -e "${RED}Error: $1${NC}" >&2
cleanup
exit 1
}
# Trap errors
trap 'error_exit "Installation failed on line $LINENO"' ERR
trap cleanup EXIT
# Check if running as root or with sudo
check_privileges() {
if [[ $EUID -eq 0 ]]; then
SUDO=""
elif command -v sudo >/dev/null 2>&1; then
SUDO="sudo"
else
error_exit "This script requires root privileges or sudo"
fi
}
# Detect OS and package manager
detect_os() {
if [[ ! -f /etc/os-release ]]; then
error_exit "Cannot detect OS - /etc/os-release not found"
fi
. /etc/os-release
case "$ID" in
ubuntu|debian)
PKG_MGR="apt"
PKG_INSTALL="apt install -y"
PKG_UPDATE="apt update"
;;
almalinux|rocky|centos|rhel|ol|fedora)
PKG_MGR="dnf"
PKG_INSTALL="dnf install -y"
PKG_UPDATE="dnf update -y"
;;
amzn)
PKG_MGR="yum"
PKG_INSTALL="yum install -y"
PKG_UPDATE="yum update -y"
;;
*)
error_exit "Unsupported distribution: $ID"
;;
esac
}
# Install prerequisites
install_prerequisites() {
echo -e "${BLUE}[1/7] Installing prerequisites...${NC}"
$SUDO $PKG_UPDATE
case "$PKG_MGR" in
apt)
$SUDO $PKG_INSTALL curl wget gnupg apt-transport-https ca-certificates
;;
dnf|yum)
$SUDO $PKG_INSTALL curl wget gnupg2 ca-certificates
;;
esac
# Install kubectl if not present
if ! command -v kubectl >/dev/null 2>&1; then
echo -e "${YELLOW}kubectl not found, installing...${NC}"
case "$PKG_MGR" in
apt)
curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | $SUDO gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | $SUDO tee /etc/apt/sources.list.d/kubernetes.list
$SUDO apt update
$SUDO $PKG_INSTALL kubectl
;;
dnf|yum)
cat <<EOF | $SUDO tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
$SUDO $PKG_INSTALL kubectl
;;
esac
fi
}
# Verify Kubernetes cluster access
verify_cluster_access() {
echo -e "${BLUE}[2/7] Verifying Kubernetes cluster access...${NC}"
if ! kubectl cluster-info >/dev/null 2>&1; then
error_exit "Cannot connect to Kubernetes cluster. Please check your kubeconfig."
fi
if ! kubectl get nodes >/dev/null 2>&1; then
error_exit "Cannot list cluster nodes. Check your permissions."
fi
if ! kubectl auth can-i create deployments --namespace=default >/dev/null 2>&1; then
echo -e "${YELLOW}Warning: May not have sufficient permissions to create deployments${NC}"
fi
echo -e "${GREEN}Kubernetes cluster access verified${NC}"
}
# Install Helm
install_helm() {
echo -e "${BLUE}[3/7] Installing Helm...${NC}"
if command -v helm >/dev/null 2>&1; then
echo -e "${YELLOW}Helm already installed, skipping...${NC}"
return
fi
case "$PKG_MGR" in
apt)
curl https://baltocdn.com/helm/signing.asc | $SUDO gpg --dearmor --yes -o /usr/share/keyrings/helm.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | $SUDO tee /etc/apt/sources.list.d/helm-stable-debian.list
$SUDO apt update
$SUDO $PKG_INSTALL helm
;;
dnf|yum)
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
$SUDO mv helm /usr/local/bin/
;;
esac
# Verify Helm installation
if ! helm version >/dev/null 2>&1; then
error_exit "Helm installation failed"
fi
echo -e "${GREEN}Helm installed successfully${NC}"
}
# Configure Helm repositories
configure_helm_repos() {
echo -e "${BLUE}[4/7] Configuring Helm repositories...${NC}"
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update
echo -e "${GREEN}Helm repositories configured${NC}"
}
# Create namespaces and RBAC
create_namespaces() {
echo -e "${BLUE}[5/7] Creating namespaces and service accounts...${NC}"
# Create namespaces
kubectl create namespace "$NAMESPACE_DEV" --dry-run=client -o yaml | kubectl apply -f -
kubectl create namespace "$NAMESPACE_PROD" --dry-run=client -o yaml | kubectl apply -f -
# Create service accounts
kubectl create serviceaccount "${CHART_NAME}-sa" --namespace="$NAMESPACE_DEV" --dry-run=client -o yaml | kubectl apply -f -
kubectl create serviceaccount "${CHART_NAME}-sa" --namespace="$NAMESPACE_PROD" --dry-run=client -o yaml | kubectl apply -f -
echo -e "${GREEN}Namespaces and service accounts created${NC}"
}
# Create Helm chart
create_helm_chart() {
echo -e "${BLUE}[6/7] Creating Helm chart...${NC}"
mkdir -p "$WORK_DIR"
cd "$WORK_DIR"
# Create chart
helm create "$CHART_NAME"
cd "$CHART_NAME"
# Create production-ready Chart.yaml
cat > Chart.yaml <<EOF
apiVersion: v2
name: $CHART_NAME
description: A production-ready web application Helm chart
type: application
version: 1.0.0
appVersion: "2.1.0"
home: https://example.com
sources:
- https://github.com/example/$CHART_NAME
maintainers:
- name: Infrastructure Team
email: infra@example.com
keywords:
- web
- application
- microservice
annotations:
category: Application
dependencies:
- name: postgresql
version: 12.x.x
repository: https://charts.bitnami.com/bitnami
condition: postgresql.enabled
- name: redis
version: 17.x.x
repository: https://charts.bitnami.com/bitnami
condition: redis.enabled
EOF
# Create production-ready values.yaml
cat > values.yaml <<EOF
replicaCount: 3
image:
repository: $CHART_NAME
pullPolicy: IfNotPresent
tag: "2.1.0"
imagePullSecrets:
- name: regcred
serviceAccount:
create: true
annotations: {}
name: ""
podSecurityContext:
runAsNonRoot: true
runAsUser: 10001
runAsGroup: 10001
fsGroup: 10001
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
service:
type: ClusterIP
port: 80
targetPort: 8080
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 250m
memory: 256Mi
autoscaling:
enabled: true
minReplicas: 3
maxReplicas: 10
targetCPUUtilizationPercentage: 70
postgresql:
enabled: false
redis:
enabled: false
EOF
# Copy chart to a permanent location
$SUDO mkdir -p /opt/helm-charts
$SUDO cp -r "$WORK_DIR/$CHART_NAME" /opt/helm-charts/
$SUDO chown -R root:root /opt/helm-charts
$SUDO chmod -R 755 /opt/helm-charts
echo -e "${GREEN}Helm chart created at /opt/helm-charts/$CHART_NAME${NC}"
}
# Verify installation
verify_installation() {
echo -e "${BLUE}[7/7] Verifying installation...${NC}"
# Check Helm version
echo -n "Helm version: "
helm version --short
# Check repositories
echo "Configured repositories:"
helm repo list
# Check namespaces
echo "Created namespaces:"
kubectl get namespace "$NAMESPACE_DEV" "$NAMESPACE_PROD" --no-headers | awk '{print $1 " - " $2}'
# Check service accounts
echo "Created service accounts:"
kubectl get serviceaccount "${CHART_NAME}-sa" --namespace="$NAMESPACE_DEV" --no-headers | awk '{print $1 " (dev)"}'
kubectl get serviceaccount "${CHART_NAME}-sa" --namespace="$NAMESPACE_PROD" --no-headers | awk '{print $1 " (prod)"}'
# Validate chart
if helm lint "/opt/helm-charts/$CHART_NAME" >/dev/null 2>&1; then
echo -e "${GREEN}Chart validation passed${NC}"
else
echo -e "${YELLOW}Warning: Chart validation failed${NC}"
fi
echo -e "${GREEN}Installation completed successfully!${NC}"
echo -e "${BLUE}Chart location: /opt/helm-charts/$CHART_NAME${NC}"
echo -e "${BLUE}To deploy: helm install my-release /opt/helm-charts/$CHART_NAME --namespace $NAMESPACE_DEV${NC}"
}
# Main execution
main() {
echo -e "${GREEN}Starting Helm and Kubernetes setup...${NC}"
check_privileges
detect_os
install_prerequisites
verify_cluster_access
install_helm
configure_helm_repos
create_namespaces
create_helm_chart
verify_installation
echo -e "${GREEN}Setup completed successfully!${NC}"
}
# Run main function
main "$@"
Review the script before running. Execute with: bash install.sh