Set up Falco with eBPF monitoring to detect runtime security threats in Kubernetes clusters. Configure custom rules, integrate Prometheus metrics, and establish comprehensive threat detection for container workloads.
Prerequisites
- Kubernetes cluster with admin access
- Helm 3 installed
- Prometheus Operator or Prometheus server
- Basic understanding of eBPF concepts
What this solves
Falco provides runtime security monitoring for Kubernetes clusters by detecting anomalous activity and potential threats in real-time. It uses eBPF (extended Berkeley Packet Filter) to monitor system calls and kernel events without requiring kernel modules, making it ideal for modern containerized environments where you need to detect privilege escalations, suspicious file access, network connections, and malicious process executions.
Step-by-step installation
Update system packages
Start by updating your package manager to ensure compatibility with Falco installation dependencies.
sudo apt update && sudo apt upgrade -y
sudo apt install -y curl gnupg2 software-properties-common
Add Falco repository
Add the official Falco repository to access the latest stable packages and security updates.
curl -fsSL https://falco.org/repo/falcosecurity-packages.asc | sudo gpg --dearmor -o /usr/share/keyrings/falco-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/falco-archive-keyring.gpg] https://download.falco.org/packages/deb stable main" | sudo tee /etc/apt/sources.list.d/falcosecurity.list
sudo apt update
Install Falco with eBPF support
Install Falco with eBPF driver support, which provides better performance and compatibility than kernel modules.
sudo apt install -y falco
sudo apt install -y linux-headers-$(uname -r)
Configure Falco for eBPF
Configure Falco to use the eBPF driver instead of kernel modules for better security and compatibility.
engine:
kind: ebpf
ebpf:
host_root: /host
buf_size_preset: 4
drop_failed_exit: false
Enable JSON output for better parsing
json_output: true
json_include_output_property: true
json_include_tags_property: true
Configure log output
log_stderr: true
log_syslog: false
log_level: info
Enable metrics
metrics:
enabled: true
interval: 1h
output_rule: true
rules_counters_enabled: true
resource_utilization_enabled: true
state_counters_enabled: true
kernel_event_counters_enabled: true
libbpf_stats_enabled: true
convert_memory_to_mb: true
include_empty_values: false
Web server for health checks and metrics
webserver:
enabled: true
listen_port: 8765
k8s_healthz_endpoint: /healthz
ssl_enabled: false
prometheus_metrics_enabled: true
Install Falco on Kubernetes with Helm
Deploy Falco as a DaemonSet on your Kubernetes cluster to monitor all nodes for security events.
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
Create namespace for Falco
kubectl create namespace falco
Install Falco with eBPF and Prometheus integration
helm install falco falcosecurity/falco \
--namespace falco \
--set driver.kind=ebpf \
--set collectors.enabled=true \
--set falco.webserver.enabled=true \
--set falco.metrics.enabled=true \
--set serviceMonitor.enabled=true \
--set falco.json_output=true \
--set falco.json_include_output_property=true \
--set falco.log_level=info
Configure custom Falco rules
Create custom security rules tailored to your Kubernetes environment and application requirements.
# Custom Kubernetes security rules
- rule: Detect privilege escalation
desc: Detect attempts to escalate privileges in containers
condition: >
spawned_process and container and
(proc.name in (sudo, su) or
proc.args contains "--privileged" or
proc.args contains "--cap-add")
output: >
Privilege escalation attempt detected (user=%user.name
container=%container.name image=%container.image.repository
command=%proc.cmdline)
priority: HIGH
tags: [kubernetes, privilege_escalation, mitre_privilege_escalation]
- rule: Suspicious file access in container
desc: Detect access to sensitive files in containers
condition: >
open_read and container and
(fd.name startswith "/etc/passwd" or
fd.name startswith "/etc/shadow" or
fd.name startswith "/root/.ssh" or
fd.name startswith "/home/*/.ssh")
output: >
Suspicious file access (user=%user.name container=%container.name
image=%container.image.repository file=%fd.name command=%proc.cmdline)
priority: HIGH
tags: [kubernetes, file_access, credential_access]
- rule: Container escape attempt
desc: Detect potential container escape attempts
condition: >
spawned_process and container and
(proc.name in (docker, runc, ctr, crictl) or
proc.args contains "nsenter" or
proc.args contains "unshare" or
proc.args contains "chroot")
output: >
Container escape attempt detected (user=%user.name
container=%container.name command=%proc.cmdline)
priority: CRITICAL
tags: [kubernetes, container_escape, mitre_escape]
- rule: Unauthorized network connection
desc: Detect unexpected outbound network connections
condition: >
outbound and container and
not fd.sip in (cluster_ip_range, service_ip_range) and
not fd.sport in (53, 443, 80) and
not proc.name in (curl, wget, apt, yum, dnf)
output: >
Unauthorized network connection (user=%user.name
container=%container.name dest=%fd.sip:%fd.sport command=%proc.cmdline)
priority: MEDIUM
tags: [kubernetes, network, exfiltration]
- rule: Crypto mining activity
desc: Detect potential cryptocurrency mining activity
condition: >
spawned_process and
(proc.name in (xmrig, cpuminer, cgminer, bfgminer) or
proc.args contains "stratum+tcp" or
proc.args contains "mining" or
proc.cmdline contains "--donate-level")
output: >
Crypto mining activity detected (user=%user.name
container=%container.name command=%proc.cmdline)
priority: CRITICAL
tags: [kubernetes, cryptomining, resource_abuse]
Configure Falco logging output
Set up structured logging to forward security events to your centralized logging system.
apiVersion: v1
kind: ConfigMap
metadata:
name: falco-config
namespace: falco
data:
falco.yaml: |
rules_file:
- /etc/falco/falco_rules.yaml
- /etc/falco/falco_rules.local.yaml
- /etc/falco/k8s_audit_rules.yaml
json_output: true
json_include_output_property: true
json_include_tags_property: true
file_output:
enabled: true
keep_alive: false
filename: "/var/log/falco/events.log"
stdout_output:
enabled: true
syslog_output:
enabled: false
program_output:
enabled: false
keep_alive: false
program: "jq '{timestamp: .time, rule: .rule, priority: .priority, container: .output_fields.container_name, image: .output_fields.container_image_repository}' | logger -t falco-json"
http_output:
enabled: false
url: "http://your-webhook-endpoint/falco"
user_agent: "falcosecurity/falco"
ca_cert: ""
ca_bundle: ""
ca_path: ""
insecure: false
grpc:
enabled: false
bind_address: "0.0.0.0:5060"
threadiness: 0
grpc_output:
enabled: false
Apply the logging configuration
Update your Falco deployment with the custom logging configuration.
kubectl apply -f falco-logging-config.yaml
Restart Falco pods to pick up new configuration
kubectl rollout restart daemonset/falco -n falco
Verify the restart completed
kubectl rollout status daemonset/falco -n falco
Set up Prometheus monitoring
Configure Prometheus to scrape Falco metrics for monitoring security event trends and system performance.
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-falco-config
namespace: monitoring
data:
falco-scrape-config.yaml: |
- job_name: 'falco-metrics'
kubernetes_sd_configs:
- role: pod
namespaces:
names:
- falco
relabel_configs:
- source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name]
action: keep
regex: falco
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
scrape_interval: 30s
scrape_timeout: 10s
metrics_path: /metrics
Create Falco ServiceMonitor for Prometheus
Set up automatic service discovery for Falco metrics if you're using the Prometheus Operator.
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: falco-metrics
namespace: falco
labels:
app.kubernetes.io/name: falco
prometheus: kube-prometheus
spec:
selector:
matchLabels:
app.kubernetes.io/name: falco
endpoints:
- port: http-metrics
interval: 30s
path: /metrics
scheme: http
scrapeTimeout: 10s
namespaceSelector:
matchNames:
- falco
Apply Prometheus monitoring configuration
Deploy the ServiceMonitor to enable automatic metrics collection from Falco instances.
kubectl apply -f falco-servicemonitor.yaml
Verify ServiceMonitor is created
kubectl get servicemonitor -n falco
Check if Prometheus is discovering Falco targets
kubectl port-forward -n monitoring svc/prometheus-kube-prometheus-prometheus 9090:9090 &
Visit http://localhost:9090/targets to verify Falco endpoints
Configure alerting rules
Create Prometheus alerting rules to notify your team when Falco detects security threats.
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: falco-security-alerts
namespace: falco
labels:
prometheus: kube-prometheus
role: alert-rules
spec:
groups:
- name: falco.security
rules:
- alert: FalcoSecurityThreatHigh
expr: increase(falco_events_total{priority="Critical"}[5m]) > 0
for: 0m
labels:
severity: critical
component: falco
annotations:
summary: "Critical security threat detected by Falco"
description: "Falco detected {{ $value }} critical security events in the last 5 minutes. Rule: {{ $labels.rule }}. Pod: {{ $labels.kubernetes_pod_name }}"
- alert: FalcoSecurityThreatMedium
expr: increase(falco_events_total{priority="High"}[10m]) > 3
for: 2m
labels:
severity: warning
component: falco
annotations:
summary: "Multiple high-priority security threats detected"
description: "Falco detected {{ $value }} high-priority security events in the last 10 minutes. This may indicate ongoing malicious activity."
- alert: FalcoDown
expr: up{job="falco-metrics"} == 0
for: 3m
labels:
severity: critical
component: falco
annotations:
summary: "Falco is down"
description: "Falco has been down for more than 3 minutes. Security monitoring is compromised on {{ $labels.kubernetes_pod_name }}."
- alert: FalcoHighEventRate
expr: rate(falco_events_total[5m]) > 10
for: 5m
labels:
severity: warning
component: falco
annotations:
summary: "High Falco event rate detected"
description: "Falco is generating events at a rate of {{ $value }} per second, which may indicate system issues or attacks."
- alert: FalcoDroppedEvents
expr: increase(falco_kernel_drops_total[5m]) > 0
for: 2m
labels:
severity: warning
component: falco
annotations:
summary: "Falco is dropping kernel events"
description: "Falco dropped {{ $value }} kernel events in the last 5 minutes, potentially missing security threats."
- alert: FalcoContainerEscape
expr: increase(falco_events_total{rule=~".container.escape.*"}[1m]) > 0
for: 0m
labels:
severity: critical
component: falco
annotations:
summary: "Container escape attempt detected"
description: "Potential container escape attempt detected on {{ $labels.kubernetes_pod_name }}. Immediate investigation required."
Apply alerting rules
Deploy the Prometheus alerting rules to monitor Falco security events.
kubectl apply -f falco-alerts.yaml
Verify the PrometheusRule is created
kubectl get prometheusrule -n falco
Check if Prometheus loaded the rules
kubectl logs -n monitoring -l app.kubernetes.io/name=prometheus -c prometheus | grep "falco"
Test Falco detection
Trigger test security events to verify that Falco is properly detecting and alerting on suspicious activities.
# Create a test pod with suspicious activity
kubectl run falco-test --image=ubuntu:22.04 --rm -it --restart=Never -- /bin/bash
Inside the test pod, run commands that should trigger Falco rules:
Try to read sensitive files
cat /etc/passwd
cat /etc/shadow
Attempt privilege escalation (this will fail but should be detected)
sudo whoami
Create network connections
nc -zv google.com 80
Exit the test pod
exit
View Falco security events
Check that Falco detected the test activities and generated appropriate security alerts.
# View recent Falco logs
kubectl logs -n falco -l app.kubernetes.io/name=falco --tail=50
Check Falco metrics in Prometheus
kubectl port-forward -n monitoring svc/prometheus-kube-prometheus-prometheus 9090:9090 &
Query for recent security events
curl -G 'http://localhost:9090/api/v1/query' --data-urlencode 'query=falco_events_total' | jq
View events by priority
curl -G 'http://localhost:9090/api/v1/query' --data-urlencode 'query=falco_events_total{priority="High"}' | jq
Configure advanced threat detection
Enable Kubernetes audit log integration
Configure Falco to analyze Kubernetes API audit logs for detecting suspicious administrative activities.
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
- level: Metadata
namespaces: ["kube-system", "falco"]
verbs: ["create", "update", "patch", "delete"]
resources:
- group: ""
resources: ["secrets", "configmaps"]
- group: "rbac.authorization.k8s.io"
resources: ["clusterroles", "clusterrolebindings"]
omitStages:
- RequestReceived
- level: Request
verbs: ["create", "update", "patch"]
resources:
- group: ""
resources: ["pods/exec", "pods/attach", "pods/portforward"]
omitStages:
- RequestReceived
- level: Metadata
verbs: ["create", "delete"]
resources:
- group: ""
resources: ["pods", "services"]
- group: "apps"
resources: ["deployments", "daemonsets"]
omitStages:
- RequestReceived
Update kube-apiserver for audit logging
Configure the Kubernetes API server to send audit logs to Falco for analysis.
# Add these flags to kube-apiserver command:
- --audit-log-path=/var/log/audit.log
- --audit-policy-file=/etc/kubernetes/audit-policy.yaml
- --audit-log-maxage=30
- --audit-log-maxbackup=10
- --audit-log-maxsize=100
Add volume mounts for audit policy
volumeMounts:
- mountPath: /etc/kubernetes/audit-policy.yaml
name: audit-policy
readOnly: true
- mountPath: /var/log/audit.log
name: audit-log
readOnly: false
volumes:
- name: audit-policy
hostPath:
path: /etc/kubernetes/audit-policy.yaml
type: File
- name: audit-log
hostPath:
path: /var/log/audit.log
type: FileOrCreate
Create custom detection rules for your environment
Add application-specific security rules based on your workload patterns and security requirements.
# Application-specific security rules
apiVersion: v1
kind: ConfigMap
metadata:
name: custom-falco-rules
namespace: falco
data:
custom_rules.yaml: |
- rule: Suspicious database access
desc: Detect direct database file access outside normal operations
condition: >
open_read and container and
fd.name endswith ".db" and
not proc.name in (postgres, mysqld, mongodb, redis-server) and
not container.image.repository contains "backup"
output: >
Direct database file access detected (user=%user.name
container=%container.name file=%fd.name command=%proc.cmdline)
priority: HIGH
tags: [database, data_access]
- rule: Unexpected package manager usage
desc: Detect package installations in running containers
condition: >
spawned_process and container and
proc.name in (apt, apt-get, yum, dnf, apk, pip, npm, gem) and
not container.image.repository contains "build" and
not k8s.pod.label.stage = "build"
output: >
Package manager used in runtime container (user=%user.name
container=%container.name command=%proc.cmdline)
priority: MEDIUM
tags: [package_management, container_modification]
- rule: Secrets access attempt
desc: Detect attempts to access Kubernetes secrets
condition: >
open_read and container and
(fd.name startswith "/var/run/secrets/kubernetes.io" or
fd.name contains "secret" or
fd.name contains "token")
output: >
Kubernetes secrets access detected (user=%user.name
container=%container.name file=%fd.name)
priority: HIGH
tags: [secrets, credential_access]
- rule: Compliance violation - SSH server
desc: Detect SSH server running in container (compliance violation)
condition: >
spawned_process and container and
(proc.name = sshd or proc.name = ssh)
output: >
SSH server detected in container (user=%user.name
container=%container.name command=%proc.cmdline)
priority: MEDIUM
tags: [compliance, ssh, remote_access]
Verify your setup
# Check Falco DaemonSet status
kubectl get daemonset -n falco
Verify Falco pods are running
kubectl get pods -n falco -l app.kubernetes.io/name=falco
Check Falco metrics endpoint
kubectl port-forward -n falco service/falco-metrics 8765:8765 &
curl http://localhost:8765/metrics | grep falco_events_total
Test rule loading
kubectl logs -n falco -l app.kubernetes.io/name=falco | grep "Rules loaded"
Verify eBPF driver is loaded
kubectl logs -n falco -l app.kubernetes.io/name=falco | grep "eBPF probe"
Check Prometheus target discovery
kubectl get servicemonitor -n falco
Verify alerting rules are active
kubectl get prometheusrule -n falco
Monitor security events with Prometheus
Create Grafana dashboard for Falco
Set up comprehensive dashboards to visualize security events and system health metrics. This builds on the monitoring setup covered in our Kubernetes Prometheus monitoring tutorial.
{
"dashboard": {
"id": null,
"title": "Falco Security Monitoring",
"tags": ["falco", "security", "kubernetes"],
"timezone": "browser",
"panels": [
{
"title": "Security Events by Priority",
"type": "stat",
"targets": [
{
"expr": "sum by (priority) (rate(falco_events_total[5m]))",
"legendFormat": "{{ priority }}"
}
],
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"thresholds": {
"steps": [
{"color": "green", "value": null},
{"color": "yellow", "value": 0.1},
{"color": "red", "value": 1}
]
}
}
}
},
{
"title": "Top Security Rules Triggered",
"type": "table",
"targets": [
{
"expr": "topk(10, sum by (rule) (increase(falco_events_total[1h])))",
"format": "table",
"instant": true
}
]
},
{
"title": "Events by Container Image",
"type": "piechart",
"targets": [
{
"expr": "sum by (container_image) (increase(falco_events_total[24h]))"
}
]
},
{
"title": "Falco System Health",
"type": "timeseries",
"targets": [
{
"expr": "falco_kernel_drops_total",
"legendFormat": "Kernel Drops"
},
{
"expr": "rate(falco_events_total[5m])",
"legendFormat": "Event Rate"
}
]
}
],
"time": {
"from": "now-24h",
"to": "now"
},
"refresh": "30s"
}
}
Import dashboard to Grafana
Load the Falco security dashboard into your Grafana instance for real-time monitoring.
# Port forward to Grafana
kubectl port-forward -n monitoring service/grafana 3000:80 &
Import dashboard via API
curl -X POST \
http://admin:password@localhost:3000/api/dashboards/db \
-H "Content-Type: application/json" \
-d @falco-dashboard.json
Alternative: Import via Grafana UI
1. Navigate to http://localhost:3000
2. Go to Dashboards > Import
3. Paste the JSON content or upload the file
Common issues
| Symptom | Cause | Fix |
|---|---|---|
| Falco pods in CrashLoopBackOff | eBPF driver failed to load or kernel headers missing | kubectl logs -n falco -l app.kubernetes.io/name=falco and install kernel headers for your distribution |
| High false positive rate | Default rules too generic for your environment | Tune rules with condition exceptions and whitelist known-good processes |
| Missing security events | Falco dropping events due to high load | Increase buf_size_preset in falco.yaml and monitor falco_kernel_drops_total metric |
| Prometheus not scraping metrics | ServiceMonitor misconfigured or Falco webserver disabled | Verify webserver.enabled: true and check ServiceMonitor selector labels |
| Rules not loading | YAML syntax errors in custom rules | Validate rule syntax with falco --validate /etc/falco/rules.yaml |
| No audit events from Kubernetes API | API server audit logging not configured | Configure audit policy in kube-apiserver or use managed cluster audit features |
Production optimization
Configure resource limits
Set appropriate CPU and memory limits for Falco to prevent resource contention with application workloads.
# Resource limits for production deployment
resources:
limits:
cpu: "1"
memory: "1Gi"
requests:
cpu: "200m"
memory: "512Mi"
Node selector for dedicated security nodes (optional)
nodeSelector:
node-role.kubernetes.io/security: "true"
Tolerations for tainted nodes
tolerations:
- key: "security"
operator: "Equal"
value: "true"
effect: "NoSchedule"
Priority class for critical security workload
priorityClassName: "system-node-critical"
Enable log rotation and retention
Configure log rotation to manage disk space and ensure long-term security event retention.
/var/log/falco/*.log {
daily
rotate 30
compress
delaycompress
missingok
notifempty
create 644 falco falco
postrotate
/usr/bin/killall -USR1 falco 2>/dev/null || true
endscript
}
Next steps
- Integrate HashiCorp Vault with Kubernetes secrets management for enhanced security
- Configure Kubernetes network policies with Calico CNI for network microsegmentation
- Set up Falco incident response automation with webhooks
- Implement Falco custom rules for application-specific security
- Configure Falco log forwarding to SIEM systems
Running this in production?
Automated install script
Run this to automate the entire setup
#!/usr/bin/env bash
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Default values
FALCO_CONFIG_DIR="/etc/falco"
INSTALL_K8S="false"
NAMESPACE="falco"
# Usage function
usage() {
echo "Usage: $0 [OPTIONS]"
echo "Options:"
echo " -k, --kubernetes Install Falco on Kubernetes with Helm"
echo " -n, --namespace NAME Kubernetes namespace (default: falco)"
echo " -h, --help Show this help message"
exit 1
}
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
-k|--kubernetes)
INSTALL_K8S="true"
shift
;;
-n|--namespace)
NAMESPACE="$2"
shift 2
;;
-h|--help)
usage
;;
*)
echo -e "${RED}Unknown option: $1${NC}"
usage
;;
esac
done
# Error cleanup function
cleanup() {
echo -e "${RED}Installation failed. Cleaning up...${NC}"
systemctl stop falco 2>/dev/null || true
systemctl disable falco 2>/dev/null || true
}
trap cleanup ERR
# Check if running as root or with sudo
check_privileges() {
if [[ $EUID -ne 0 ]]; then
echo -e "${RED}This script must be run as root or with sudo${NC}"
exit 1
fi
}
# Detect distribution and set package manager
detect_distro() {
if [ ! -f /etc/os-release ]; then
echo -e "${RED}/etc/os-release not found. Cannot detect distribution.${NC}"
exit 1
fi
. /etc/os-release
case "$ID" in
ubuntu|debian)
PKG_MGR="apt"
PKG_INSTALL="apt install -y"
PKG_UPDATE="apt update"
PKG_UPGRADE="apt upgrade -y"
KERNEL_HEADERS="linux-headers-$(uname -r)"
;;
almalinux|rocky|centos|rhel|ol)
PKG_MGR="dnf"
PKG_INSTALL="dnf install -y"
PKG_UPDATE="dnf update -y"
PKG_UPGRADE=""
KERNEL_HEADERS="kernel-devel-$(uname -r) kernel-headers-$(uname -r)"
;;
amzn|fedora)
if command -v dnf >/dev/null 2>&1; then
PKG_MGR="dnf"
PKG_INSTALL="dnf install -y"
PKG_UPDATE="dnf update -y"
else
PKG_MGR="yum"
PKG_INSTALL="yum install -y"
PKG_UPDATE="yum update -y"
fi
PKG_UPGRADE=""
KERNEL_HEADERS="kernel-devel-$(uname -r) kernel-headers-$(uname -r)"
;;
*)
echo -e "${RED}Unsupported distribution: $ID${NC}"
exit 1
;;
esac
echo -e "${GREEN}Detected distribution: $PRETTY_NAME${NC}"
echo -e "${BLUE}Package manager: $PKG_MGR${NC}"
}
# Update system packages
update_system() {
echo -e "${BLUE}[1/7] Updating system packages...${NC}"
$PKG_UPDATE
if [[ -n "$PKG_UPGRADE" ]]; then
$PKG_UPGRADE
fi
# Install common dependencies
$PKG_INSTALL curl gnupg2
# Install distro-specific packages
case "$PKG_MGR" in
apt)
$PKG_INSTALL software-properties-common
;;
dnf|yum)
$PKG_INSTALL yum-utils
;;
esac
}
# Add Falco repository
add_falco_repo() {
echo -e "${BLUE}[2/7] Adding Falco repository...${NC}"
case "$PKG_MGR" in
apt)
curl -fsSL https://falco.org/repo/falcosecurity-packages.asc | gpg --dearmor -o /usr/share/keyrings/falco-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/falco-archive-keyring.gpg] https://download.falco.org/packages/deb stable main" > /etc/apt/sources.list.d/falcosecurity.list
chmod 644 /etc/apt/sources.list.d/falcosecurity.list
$PKG_UPDATE
;;
dnf|yum)
rpm --import https://falco.org/repo/falcosecurity-packages.asc
curl -s -o /etc/yum.repos.d/falcosecurity.repo https://falco.org/repo/falcosecurity-rpm.repo
chmod 644 /etc/yum.repos.d/falcosecurity.repo
;;
esac
}
# Install Falco
install_falco() {
echo -e "${BLUE}[3/7] Installing Falco with eBPF support...${NC}"
# Install kernel headers first
$PKG_INSTALL $KERNEL_HEADERS || echo -e "${YELLOW}Warning: Could not install kernel headers${NC}"
# Install Falco
$PKG_INSTALL falco
}
# Configure Falco for eBPF
configure_falco() {
echo -e "${BLUE}[4/7] Configuring Falco for eBPF...${NC}"
# Create backup of original config
cp "$FALCO_CONFIG_DIR/falco.yaml" "$FALCO_CONFIG_DIR/falco.yaml.backup"
# Configure Falco with eBPF and enhanced settings
cat > "$FALCO_CONFIG_DIR/falco.yaml" << 'EOF'
engine:
kind: ebpf
ebpf:
host_root: /host
buf_size_preset: 4
drop_failed_exit: false
json_output: true
json_include_output_property: true
json_include_tags_property: true
log_stderr: true
log_syslog: false
log_level: info
metrics:
enabled: true
interval: 1h
output_rule: true
rules_counters_enabled: true
resource_utilization_enabled: true
state_counters_enabled: true
kernel_event_counters_enabled: true
libbpf_stats_enabled: true
convert_memory_to_mb: true
include_empty_values: false
webserver:
enabled: true
listen_port: 8765
k8s_healthz_endpoint: /healthz
ssl_enabled: false
prometheus_metrics_enabled: true
rules_file:
- /etc/falco/falco_rules.yaml
- /etc/falco/falco_rules.local.yaml
- /etc/falco/k8s_audit_rules.yaml
- /etc/falco/rules.d
load_plugins: []
EOF
chmod 644 "$FALCO_CONFIG_DIR/falco.yaml"
}
# Create custom rules
create_custom_rules() {
echo -e "${BLUE}[5/7] Creating custom Falco rules...${NC}"
cat > "$FALCO_CONFIG_DIR/custom_rules.yaml" << 'EOF'
- rule: Detect Privilege Escalation
desc: Detect attempts to escalate privileges in containers
condition: >
spawned_process and container and
(proc.name in (sudo, su) or
proc.args contains "--privileged" or
proc.args contains "--cap-add")
output: >
Privilege escalation attempt detected (user=%user.name
container=%container.name image=%container.image.repository
proc=%proc.name args=%proc.args)
priority: WARNING
tags: [container, privilege_escalation]
- rule: Suspicious File Access
desc: Detect suspicious file access patterns
condition: >
open_read and container and
(fd.name startswith "/etc/shadow" or
fd.name startswith "/etc/passwd" or
fd.name startswith "/etc/sudoers")
output: >
Suspicious file access (user=%user.name container=%container.name
file=%fd.name proc=%proc.name)
priority: WARNING
tags: [container, file_access]
EOF
chmod 644 "$FALCO_CONFIG_DIR/custom_rules.yaml"
# Add custom rules to main config
echo " - /etc/falco/custom_rules.yaml" >> "$FALCO_CONFIG_DIR/falco.yaml"
}
# Start and enable Falco service
start_falco_service() {
echo -e "${BLUE}[6/7] Starting and enabling Falco service...${NC}"
systemctl daemon-reload
systemctl enable falco
systemctl start falco
# Wait for service to start
sleep 5
if systemctl is-active --quiet falco; then
echo -e "${GREEN}Falco service started successfully${NC}"
else
echo -e "${RED}Failed to start Falco service${NC}"
systemctl status falco
exit 1
fi
}
# Install Falco on Kubernetes
install_k8s_falco() {
echo -e "${BLUE}[6/7] Installing Falco on Kubernetes...${NC}"
# Check if kubectl is available
if ! command -v kubectl >/dev/null 2>&1; then
echo -e "${RED}kubectl not found. Please install kubectl first.${NC}"
exit 1
fi
# Check if helm is available
if ! command -v helm >/dev/null 2>&1; then
echo -e "${RED}Helm not found. Please install Helm first.${NC}"
exit 1
fi
# Add Falco Helm repository
helm repo add falcosecurity https://falcosecurity.github.io/charts
helm repo update
# Create namespace
kubectl create namespace "$NAMESPACE" --dry-run=client -o yaml | kubectl apply -f -
# Install Falco with Helm
helm upgrade --install falco falcosecurity/falco \
--namespace "$NAMESPACE" \
--set driver.kind=ebpf \
--set collectors.enabled=true \
--set falco.webserver.enabled=true \
--set falco.metrics.enabled=true \
--set serviceMonitor.enabled=true \
--set falco.json_output=true \
--set falco.json_include_output_property=true \
--set falco.log_level=info
echo -e "${GREEN}Falco installed on Kubernetes in namespace: $NAMESPACE${NC}"
}
# Verify installation
verify_installation() {
echo -e "${BLUE}[7/7] Verifying Falco installation...${NC}"
if [[ "$INSTALL_K8S" == "true" ]]; then
# Verify Kubernetes installation
echo "Checking Falco pods..."
kubectl get pods -n "$NAMESPACE" -l app.kubernetes.io/name=falco
echo "Checking Falco service..."
kubectl get svc -n "$NAMESPACE" -l app.kubernetes.io/name=falco
else
# Verify system installation
if systemctl is-active --quiet falco; then
echo -e "${GREEN}✓ Falco service is running${NC}"
else
echo -e "${RED}✗ Falco service is not running${NC}"
exit 1
fi
# Check if Falco is using eBPF
if journalctl -u falco --since "1 minute ago" | grep -q "eBPF probe loaded"; then
echo -e "${GREEN}✓ Falco is using eBPF driver${NC}"
else
echo -e "${YELLOW}⚠ Falco may not be using eBPF driver${NC}"
fi
# Check web server
if curl -s http://localhost:8765/healthz >/dev/null 2>&1; then
echo -e "${GREEN}✓ Falco web server is responding${NC}"
else
echo -e "${YELLOW}⚠ Falco web server is not responding${NC}"
fi
fi
echo -e "${GREEN}Falco installation completed successfully!${NC}"
echo -e "${BLUE}Configuration location: $FALCO_CONFIG_DIR${NC}"
if [[ "$INSTALL_K8S" == "false" ]]; then
echo -e "${BLUE}Service status: systemctl status falco${NC}"
echo -e "${BLUE}View logs: journalctl -u falco -f${NC}"
else
echo -e "${BLUE}View logs: kubectl logs -n $NAMESPACE -l app.kubernetes.io/name=falco -f${NC}"
fi
}
# Main execution
main() {
echo -e "${GREEN}=== Falco Runtime Security Installation ===${NC}"
check_privileges
detect_distro
update_system
add_falco_repo
install_falco
configure_falco
create_custom_rules
if [[ "$INSTALL_K8S" == "true" ]]; then
install_k8s_falco
else
start_falco_service
fi
verify_installation
}
main "$@"
Review the script before running. Execute with: bash install.sh